repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
pmb-nll | pmb-nll-main/src/probabilistic_inference/probabilistic_detr_predictor.py | import numpy as np
import torch
import torch.nn.functional as F
# DETR imports
from detr.util.box_ops import box_cxcywh_to_xyxy
# Detectron Imports
from detectron2.structures import Boxes
# Project Imports
from probabilistic_inference import inference_utils
from probabilistic_inference.inference_core import ProbabilisticPredictor
from probabilistic_modeling.modeling_utils import covariance_output_to_cholesky, clamp_log_variance
class DetrProbabilisticPredictor(ProbabilisticPredictor):
def __init__(self, cfg):
super().__init__(cfg)
# These are mock variables to be compatible with probabilistic detectron library. No NMS is performed for DETR.
# Only needed for ensemble methods
self.test_nms_thresh = 0.5
self.test_topk_per_image = self.model.detr.num_queries
def detr_probabilistic_inference(self,
input_im):
outputs = self.model(input_im,
return_raw_results=True,
is_mc_dropout=self.mc_dropout_enabled)
image_width = input_im[0]['image'].shape[2]
image_height = input_im[0]['image'].shape[1]
# Handle logits and classes
predicted_logits = outputs['pred_logits'][0]
if 'pred_logits_var' in outputs.keys():
predicted_logits_var = outputs['pred_logits_var'][0]
box_cls_dists = torch.distributions.normal.Normal(
predicted_logits, scale=torch.sqrt(
torch.exp(predicted_logits_var)))
predicted_logits = box_cls_dists.rsample(
(self.model.cls_var_num_samples,))
predicted_prob_vectors = F.softmax(predicted_logits, dim=-1)
predicted_prob_vectors = predicted_prob_vectors.mean(0)
else:
predicted_prob_vectors = F.softmax(predicted_logits, dim=-1)
predicted_prob, classes_idxs = predicted_prob_vectors[:, :-1].max(-1)
# Handle boxes and covariance matrices
predicted_boxes = outputs['pred_boxes'][0]
# Rescale boxes to inference image size (not COCO original size)
pred_boxes = Boxes(box_cxcywh_to_xyxy(predicted_boxes))
pred_boxes.scale(scale_x=image_width, scale_y=image_height)
predicted_boxes = pred_boxes.tensor
# Rescale boxes to inference image size (not COCO original size)
if 'pred_boxes_cov' in outputs.keys():
predicted_boxes_covariance = covariance_output_to_cholesky(
outputs['pred_boxes_cov'][0])
predicted_boxes_covariance = torch.matmul(
predicted_boxes_covariance, predicted_boxes_covariance.transpose(
1, 2))
transform_mat = torch.tensor([[[1.0, 0.0, -0.5, 0.0],
[0.0, 1.0, 0.0, -0.5],
[1.0, 0.0, 0.5, 0.0],
[0.0, 1.0, 0.0, 0.5]]]).to(self.model.device)
predicted_boxes_covariance = torch.matmul(
torch.matmul(
transform_mat,
predicted_boxes_covariance),
transform_mat.transpose(
1,
2))
scale_mat = torch.diag_embed(
torch.as_tensor(
(image_width,
image_height,
image_width,
image_height),
dtype=torch.float32)).to(
self.model.device).unsqueeze(0)
predicted_boxes_covariance = torch.matmul(
torch.matmul(
scale_mat,
predicted_boxes_covariance),
torch.transpose(scale_mat, 2, 1))
else:
predicted_boxes_covariance = []
if 'ppp' in outputs:
ppp = outputs['ppp']
else:
ppp = []
return predicted_boxes, predicted_boxes_covariance, predicted_prob, classes_idxs, predicted_prob_vectors, ppp
def post_processing_standard_nms(self, input_im):
"""
This function produces results using standard non-maximum suppression. The function takes into
account any probabilistic modeling method when computing the results.
Args:
input_im (list): an input im list generated from dataset handler.
Returns:
result (instances): object instances
"""
outputs = self.detr_probabilistic_inference(input_im)
return inference_utils.general_standard_nms_postprocessing(
input_im, outputs)
def post_processing_topk_detections(self, input_im):
"""
This function produces results using topk selection based on confidence scores.
Args:
input_im (list): an input im list generated from dataset handler.
Returns:
result (instances): object instances
"""
outputs = self.detr_probabilistic_inference(input_im)
return inference_utils.general_topk_detection_postprocessing(input_im, outputs)
def post_processing_output_statistics(self, input_im):
"""
Output statistics does not make much sense for DETR architecture. There is some redundancy due to forced 100
detections per image, but cluster sizes would be too small for meaningful estimates. Might implement it later
on.
"""
raise NotImplementedError
pass
def post_processing_mc_dropout_ensembles(self, input_im):
if self.cfg.PROBABILISTIC_INFERENCE.ENSEMBLES.BOX_MERGE_MODE == 'pre_nms':
raise NotImplementedError
else:
# Merge results:
results = [
inference_utils.general_standard_nms_postprocessing(
input_im,
self.detr_probabilistic_inference(input_im),
self.test_nms_thresh,
self.test_topk_per_image) for _ in range(
self.num_mc_dropout_runs)]
# Append per-ensemble outputs after NMS has been performed.
ensemble_pred_box_list = [
result.pred_boxes.tensor for result in results]
ensemble_pred_prob_vectors_list = [
result.pred_cls_probs for result in results]
ensembles_class_idxs_list = [
result.pred_classes for result in results]
ensembles_pred_box_covariance_list = [
result.pred_boxes_covariance for result in results]
return inference_utils.general_black_box_ensembles_post_processing(
input_im,
ensemble_pred_box_list,
ensembles_class_idxs_list,
ensemble_pred_prob_vectors_list,
ensembles_pred_box_covariance_list,
self.test_nms_thresh,
self.test_topk_per_image,
self.cfg.PROBABILISTIC_INFERENCE.AFFINITY_THRESHOLD,
is_generalized_rcnn=True,
merging_method=self.cfg.PROBABILISTIC_INFERENCE.ENSEMBLES.BOX_FUSION_MODE)
def post_processing_ensembles(self, input_im, model_dict):
if self.cfg.PROBABILISTIC_INFERENCE.ENSEMBLES.BOX_MERGE_MODE == 'pre_nms':
raise NotImplementedError
else:
outputs_list = []
for model in model_dict:
self.model = model
outputs_list.append(
self.post_processing_standard_nms(input_im))
# Merge results:
ensemble_pred_box_list = []
ensemble_pred_prob_vectors_list = []
ensembles_class_idxs_list = []
ensembles_pred_box_covariance_list = []
for results in outputs_list:
# Append per-ensemble outputs after NMS has been performed.
ensemble_pred_box_list.append(results.pred_boxes.tensor)
ensemble_pred_prob_vectors_list.append(results.pred_cls_probs)
ensembles_class_idxs_list.append(results.pred_classes)
ensembles_pred_box_covariance_list.append(
results.pred_boxes_covariance)
return inference_utils.general_black_box_ensembles_post_processing(
input_im,
ensemble_pred_box_list,
ensembles_class_idxs_list,
ensemble_pred_prob_vectors_list,
ensembles_pred_box_covariance_list,
self.test_nms_thresh,
self.test_topk_per_image,
self.cfg.PROBABILISTIC_INFERENCE.AFFINITY_THRESHOLD,
is_generalized_rcnn=True,
merging_method=self.cfg.PROBABILISTIC_INFERENCE.ENSEMBLES.BOX_FUSION_MODE)
def post_processing_bayes_od(self, input_im):
"""
Since there is no NMS step in DETR, bayesod is not implemented. Although possible to add NMS
and implement it later on.
"""
raise NotImplementedError
pass
| 9,040 | 40.095455 | 119 | py |
pmb-nll | pmb-nll-main/src/probabilistic_modeling/losses.py | from collections import defaultdict
from math import comb
from math import factorial
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
import torch
from core.fastmurty.mhtdaClink import (allocateWorkvarsforDA,
deallocateWorkvarsforDA, mhtda, sparse)
from core.fastmurty.mhtdaClink import sparsifyByRow as sparsify
from scipy.optimize import linear_sum_assignment
from torch.distributions.multivariate_normal import MultivariateNormal
from probabilistic_modeling.modeling_utils import (
clamp_log_variance, covariance_output_to_cholesky)
def reshape_box_preds(preds, num_classes):
"""
Tiny helper function to reshape box predictions from [numpreds,classes*boxdim] to [numpreds,classes,boxdim]
"""
num_preds, *_ = preds.shape
if num_preds == 0:
return preds
if len(preds.shape) == 2:
preds = preds.unsqueeze(1)
if preds.shape[-1] > num_classes: # if box predicted per class
preds = preds.reshape(num_preds, num_classes, -1)
else:
preds = preds.repeat(1, num_classes, 1)
return preds
def run_murtys(cost_matrix: torch.tensor, nsolutions: int):
"""
Run fastmurtys given cost_matrix and number of assignments to search for.
Returns associations and costs.
Based on example_simplest.py in fastmurty.
"""
# make all costs negative for algo to work properly
cost_matrix_max = cost_matrix.max()
if cost_matrix_max >= 0:
cost_matrix = cost_matrix - (cost_matrix_max + 1)
cost_matrix = cost_matrix.detach().numpy()
nrows, ncolumns = cost_matrix.shape
# sparse cost matrices only include a certain number of elements
# the rest are implicitly infinity
# in this case, the sparse matrix includes all elements
# The sparse and dense versions are compiled differently (see the Makefile).
# The variable "sparse" in mhtdaClink needs to match the version compiled
cost_matrix_to_use = sparsify(cost_matrix, ncolumns) if sparse else cost_matrix
# mhtda is set up to potentially take multiple input hypotheses for both rows and columns
# input hypotheses specify a subset of rows or columns.
# In this case, we just want to use the whole matrix.
row_priors = np.ones((1, nrows), dtype=np.bool8)
col_priors = np.ones((1, ncolumns), dtype=np.bool8)
# Each hypothesis has a relative weight too.
# These values don't matter if there is only one hypothesis...
row_prior_weights = np.zeros(1)
col_prior_weights = np.zeros(1)
# The mhtda function modifies preallocated outputs rather than
# allocating new ones. This is slightly more efficient for repeated use
# within a tracker.
# The cost of each returned association:
out_costs = np.zeros(nsolutions)
# The row-column pairs in each association:
# Generally there will be less than nrows+ncolumns pairs in an association.
# The unused pairs are currently set to (-2, -2)
out_associations = np.zeros((nsolutions, nrows + ncolumns, 2), dtype=np.int32)
# variables needed within the algorithm (a C function sets this up):
workvars = allocateWorkvarsforDA(nrows, ncolumns, nsolutions)
# run!
mhtda(
cost_matrix_to_use,
row_priors,
row_prior_weights,
col_priors,
col_prior_weights,
out_associations,
out_costs,
workvars,
)
deallocateWorkvarsforDA(workvars)
return out_associations, out_costs
def compute_negative_log_likelihood(
box_scores: torch.tensor,
box_regs: torch.tensor,
box_covars: torch.tensor,
gt_box: torch.tensor,
gt_class: torch.tensor,
image_size: List[int],
reg_distribution: torch.distributions.distribution.Distribution,
associations: np.ndarray,
device: torch.device,
intensity_func=lambda x: 0.00000001,
scores_have_bg_cls=False,
target_delta=None,
pred_delta=None,
pred_delta_chol=None,
):
"""Compute NLL for given associations.
Args:
box_scores (torch.tensor): [description]
box_regs (torch.tensor): [description]
box_covars (torch.tensor): [description]
gt_box (torch.tensor): [description]
gt_class (torch.tensor): [description]
image_size (List[int]): [description]
reg_distribution (torch.distributions.distribution.Distribution): [description]
associations (np.ndarray[np.int32]): [description]
device (torch.device): [description]
intensity_func ([type], optional): [description]. Defaults to lambdax:0.00000001.
Returns:
[type]: [description]
"""
if type(image_size) is not torch.tensor:
image_size = torch.tensor(image_size)
img_size = image_size.unsqueeze(0).to(device)
existance_prob = 1 - box_scores[:, -1]
num_preds, num_classes = box_scores.shape
if scores_have_bg_cls:
num_classes -= 1 # do not count background class
num_gt, _ = gt_box.shape
out_dict = defaultdict(list)
out_dict.update(
{
"matched_bernoulli": [],
"unmatched_bernoulli": [],
"matched_ppp": [],
"matched_bernoulli_reg": [],
"matched_bernoulli_cls": [],
"num_matched_bernoulli": [],
"num_unmatched_bernoulli": [],
"num_matched_ppp": [],
"ppp_integral": None,
}
)
nll = torch.zeros(len(associations), dtype=torch.float64, device=device)
for a, association in enumerate(associations):
log_matched_bernoulli = torch.tensor(0, dtype=torch.float64, device=device)
log_unmatched_bernoulli = torch.tensor(0, dtype=torch.float64, device=device)
log_poisson = torch.tensor(0, dtype=torch.float64, device=device)
log_matched_regression = torch.tensor(0, dtype=torch.float64, device=device)
log_matched_classification = torch.tensor(0, dtype=torch.float64, device=device)
num_matched_bernoulli = 0
num_unmatched_bernoulli = 0
num_matched_ppp = 0
log_matched_bernoulli_regs = []
log_matched_bernoulli_cls = []
log_unmatched_bernoullis = []
log_matched_ppps = []
for pair in association:
pred = pair[0]
gt = pair[1]
if (
0 <= pred < num_preds
) and gt >= 0: # if bernoulli was assigned to a GT element
num_matched_bernoulli += 1
assigned_gt = gt
k = pred
gt_c = gt_class[assigned_gt]
if scores_have_bg_cls:
r = existance_prob[k]
else:
r = box_scores[k, gt_c]
covar = box_covars[k, gt_c]
if target_delta is None:
covar = box_covars[k, gt_c]
dist = reg_distribution(box_regs[k, gt_c, :], covar)
regression = dist.log_prob(gt_box[assigned_gt, :]).sum()
classification = torch.log(box_scores[k, gt_c])
else:
covar = pred_delta_chol[k, gt_c]
dist = reg_distribution(pred_delta[k, gt_c, :], covar)
regression = dist.log_prob(target_delta[k, assigned_gt, :]).sum()
classification = torch.log(box_scores[k, gt_c])
log_f = regression + classification
# Save stats
log_matched_bernoulli_regs.append(-regression.squeeze().item())
log_matched_bernoulli_cls.append(-classification.squeeze().item())
# Update total bernoulli component
log_matched_bernoulli = log_matched_bernoulli + log_f.squeeze()
log_matched_regression = log_matched_regression + regression.squeeze()
log_matched_classification = (
log_matched_classification + classification.squeeze()
)
elif (
0 <= pred < num_preds
) and gt == -1: # if bernoulli was not assigned to a GT element
num_unmatched_bernoulli += 1
k = pred
if scores_have_bg_cls:
log_f = torch.log(1 - existance_prob[k])
else:
log_f = torch.log(1 - box_scores[k].max())
log_unmatched_bernoulli = log_unmatched_bernoulli + log_f.squeeze()
# Save stats
log_unmatched_bernoullis.append(-log_f.squeeze().item())
elif (pred >= num_preds) and (
gt >= 0
): # if poisson was assigned to a GT element
num_matched_ppp += 1
assigned_gt = gt
gt_c = gt_class[assigned_gt].unsqueeze(0)
gt_vec = torch.cat([gt_box[assigned_gt, :], gt_c])
log_f = intensity_func(gt_vec.unsqueeze(0), img_size).squeeze()
log_poisson = log_poisson + log_f
# Save stats
log_matched_ppps.append(-log_f.item())
association_sum = log_matched_bernoulli + log_unmatched_bernoulli + log_poisson
out_dict["matched_bernoulli"].append(-log_matched_bernoulli.item())
out_dict["matched_bernoulli_reg"].append(-log_matched_regression.item())
out_dict["matched_bernoulli_cls"].append(-log_matched_classification.item())
out_dict["num_matched_bernoulli"].append(num_matched_bernoulli)
out_dict["unmatched_bernoulli"].append(-log_unmatched_bernoulli.item())
out_dict["num_unmatched_bernoulli"].append(num_unmatched_bernoulli)
out_dict["matched_ppp"].append(-log_poisson.item())
out_dict["num_matched_ppp"].append(num_matched_ppp)
out_dict["matched_bernoulli_regs"].append(log_matched_bernoulli_regs)
out_dict["matched_bernoulli_clss"].append(log_matched_bernoulli_cls)
out_dict["unmatched_bernoullis"].append(log_unmatched_bernoullis)
out_dict["matched_ppps"].append(log_matched_ppps)
nll[a] = association_sum
nll = torch.logsumexp(nll, -1)
n_class = torch.tensor(num_classes).unsqueeze(0).to(device)
ppp_regularizer = intensity_func(None, img_size, n_class, integrate=True).squeeze()
nll = ppp_regularizer - nll
out_dict["ppp_integral"] = ppp_regularizer.item()
out_dict["total"] = [
out_dict["matched_bernoulli"][i]
+ out_dict["unmatched_bernoulli"][i]
+ out_dict["matched_ppp"][i]
+ out_dict["ppp_integral"]
for i in range(len(associations))
]
return nll, out_dict
def negative_log_likelihood_matching(
box_scores: torch.tensor,
box_regs: torch.tensor,
box_covars: torch.tensor,
gt_box: torch.tensor,
gt_class: torch.tensor,
image_size: List[int],
reg_distribution: torch.distributions.distribution.Distribution,
device: torch.device,
intensity_func=lambda x: 0.00000001,
max_n_solutions: int = 5,
scores_have_bg_cls=False,
target_delta=None,
distance_type="log_prob",
covar_scaling = 1,
use_target_delta_matching=True,
pred_delta=None,
pred_delta_chol=None,
):
img_size = torch.tensor(image_size).unsqueeze(0).to(device)
num_preds, num_classes = box_scores.shape
if scores_have_bg_cls:
num_classes -= 1 # do not count background class
num_gt = gt_box.shape[0]
existance_prob = 1 - box_scores[:, -1]
# Init potential covar scaling for matching
covar_scaling = torch.eye(box_covars.shape[-1]).to(box_covars.device)*covar_scaling
# save indices of inf cost
infinite_costs = []
with torch.no_grad():
if not(num_gt > 0 and num_preds > 0):
associations = -np.ones((1, num_preds + num_gt, 2))
if num_gt > 0:
associations[0, -num_gt:, 1] = np.arange(num_gt)
associations[0, :, 0] = np.arange(num_preds + num_gt)
associations = associations.astype(np.int32)
return associations
# Assemble and fill cost matrix
cost_matrix = torch.zeros((num_preds + num_gt, num_gt), dtype=torch.float64)
if scores_have_bg_cls:
r = existance_prob.unsqueeze(-1).repeat(1, num_gt)
else:
r = box_scores[:, gt_class] # assume existance prob == class prob
covar = box_covars[:, gt_class] if pred_delta_chol is None or not use_target_delta_matching else pred_delta_chol[:, gt_class]
reg_means = box_regs if pred_delta is None or not use_target_delta_matching else pred_delta
# Repeat gt to be [num_preds,num_gt,dim] if needed
if len(gt_box.shape) < len(reg_means[:, gt_class].shape):
gt_box = gt_box.unsqueeze(0).repeat(num_preds, 1, 1)
if distance_type == "log_prob":
# Covar is actually cholesky decomposed, hence only one multiplication with scaling
scaled_covar = covar_scaling@covar
dist = reg_distribution(reg_means[:, gt_class], scaled_covar)
if target_delta is None or not use_target_delta_matching:
log_p = dist.log_prob(gt_box)
else:
log_p = dist.log_prob(target_delta)
elif distance_type == "euclidian_squared":
# We use minus since its sign is reversed later (and cost should be minimized)
if target_delta is None or not use_target_delta_matching:
log_p = -(reg_means[:, gt_class] - gt_box).pow(2).sum(-1)
else:
log_p = -(reg_means[:, gt_class] - target_delta).pow(2).sum(-1)
elif distance_type == "euclidian":
# We use minus since its sign is reversed later (and cost should be minimized)
if target_delta is None or not use_target_delta_matching:
log_p = -(reg_means[:, gt_class] - gt_box).pow(2).sum(-1).sqrt()
else:
log_p = (
-(reg_means[:, gt_class] - target_delta).pow(2).sum(-1).sqrt()
)
else:
raise NotImplementedError(
f'Distance type for PMB-NLL matching "{distance_type}" not implemented.'
)
log_p = log_p.sum(-1) if len(log_p.shape) > 2 else log_p
log_p = log_p + torch.log(
box_scores[:, gt_class]
) # box regression + class scores conditioned on existance
cost = -(log_p - torch.log(1 - r))
cost_matrix[:num_preds] = cost
if not torch.isfinite(cost).all():
for k, l in torch.isfinite(cost).logical_not().nonzero():
infinite_costs.append((k, l))
cost_matrix[k, l] = 0
# Build GT vector with [box, class]
if target_delta is None or not use_target_delta_matching:
gt_vec = torch.cat([gt_box[0, :, :], gt_class.unsqueeze(-1)], -1)
else:
gt_vec = torch.cat([target_delta[0, :, :], gt_class.unsqueeze(-1)], -1)
# PPP cost
cost = -intensity_func(gt_vec, img_size, dist_type=distance_type)
if torch.isfinite(cost).all():
cost_matrix[num_preds:] = torch.diag(cost)
else:
cost_matrix[num_preds:] = torch.diag(cost)
for l in torch.isfinite(cost).logical_not().nonzero():
infinite_costs.append((num_preds + l, l))
cost_matrix[num_preds + l, l] = 0
# Fill in "inf"
if cost_matrix.numel() > 0:
largest_cost = cost_matrix.max()
for k in range(num_preds, num_preds + num_gt): # loop over predictions
for l in range(num_gt): # loop over ground truths
if k != (l + num_preds):
cost_matrix[k, l] = largest_cost * 3
for coord in infinite_costs:
k, l = coord
cost_matrix[k, l] = largest_cost * 2
# Find nsolutions best solutions
nsolutions = 0
for i in range(num_gt+1):
if i > num_preds or nsolutions > max_n_solutions:
break
nsolutions += (factorial(num_preds)//factorial(num_preds-i))*comb(num_gt, i)
nsolutions = min(
max_n_solutions, nsolutions
) # comb gives maximum number unique associations
try:
associations, _ = run_murtys(cost_matrix, nsolutions)
except AssertionError:
print(
"[NLLOD] Murtys could not find solution! Using linear sum assignment."
)
row_ind, col_ind = linear_sum_assignment(cost_matrix.cpu().numpy())
associations = -np.ones((1, num_preds + num_gt, 2))
associations[0, :, 0] = np.arange(num_preds + num_gt)
associations[0, row_ind, 1] = col_ind
associations = associations.astype(np.int32)
return associations
def negative_log_likelihood(
pred_box_scores: List[torch.tensor],
pred_box_regs: List[torch.tensor],
pred_box_covars: List[torch.tensor],
gt_boxes: List[torch.tensor],
gt_classes: List[torch.tensor],
image_sizes: List[List[int]],
reg_distribution: torch.distributions.distribution.Distribution,
intensity_func=lambda x: 0.00000001,
max_n_solutions: int = 5,
training: bool = True,
scores_have_bg_cls: bool = True,
target_deltas: torch.tensor = None,
matching_distance: str = "log_prob",
covar_scaling: float = 1.0,
use_target_delta_matching=False,
pred_deltas=None,
pred_delta_chols=None,
):
"""
Calculate NLL for a PMB prediction.
"""
assert len(pred_box_scores) == len(pred_box_regs) == len(pred_box_covars)
device = pred_box_scores[0].device
nll_total_losses = torch.tensor(
0, dtype=torch.float64, device=device, requires_grad=training
)
bs = len(pred_box_scores)
total_associations = []
total_decompositions = []
for i in range(bs): # loop over images
if type(intensity_func) == list:
if type(intensity_func[i]) != dict:
ppp = {"matching": intensity_func[i], "loss": intensity_func[i]}
else:
ppp = intensity_func[i]
else:
if type(intensity_func) != dict:
ppp = {"matching": intensity_func, "loss": intensity_func}
else:
ppp = intensity_func
# [N, num_classes] or [N, num_classes+1]
box_scores = pred_box_scores[i]
num_preds, num_classes = box_scores.shape
if scores_have_bg_cls:
num_classes -= 1 # do not count background class
# [N, num_classes, boxdims]
box_regs = pred_box_regs[i]
# [N, num_classes, boxdims, boxdims]
box_covars = pred_box_covars[i]
# [M, boxdims]
gt_box = gt_boxes[i]
# [M, 1]
gt_class = gt_classes[i]
if target_deltas is None:
target_delta = None
else:
# [N, M, boxdims]
target_delta = target_deltas[i]
if pred_deltas is None:
pred_delta = None
else:
# [N, M, boxdims]
pred_delta = pred_deltas[i]
if pred_delta_chols is None:
pred_delta_chol = None
else:
# [N, M, boxdims]
pred_delta_chol = pred_delta_chols[i]
image_size = image_sizes[i]
associations = negative_log_likelihood_matching(
box_scores,
box_regs,
box_covars,
gt_box,
gt_class,
image_size,
reg_distribution,
device,
ppp["matching"],
max_n_solutions,
scores_have_bg_cls,
target_delta,
matching_distance,
covar_scaling,
use_target_delta_matching,
pred_delta,
pred_delta_chol,
)
nll, decomposition = compute_negative_log_likelihood(
box_scores=box_scores,
box_regs=box_regs,
box_covars=box_covars,
gt_box=gt_box,
gt_class=gt_class,
image_size=image_size,
reg_distribution=reg_distribution,
associations=associations,
device=device,
intensity_func=ppp["loss"],
scores_have_bg_cls=scores_have_bg_cls,
target_delta=target_delta,
pred_delta=pred_delta,
pred_delta_chol=pred_delta_chol,
)
if torch.isfinite(nll):
# Normalize by num predictions if training
if training:
number_preds = decomposition["num_matched_ppp"][0]+decomposition["num_matched_bernoulli"][0]+decomposition["num_unmatched_bernoulli"][0]
regularizer = max(1, number_preds)
nll_total_losses = nll_total_losses + nll / regularizer
else:
nll_total_losses = nll_total_losses + nll
else:
bs = max(1, bs - 1)
print("WARNING: Infinite loss in NLL!")
print(f"box scores: {box_scores}")
print(f"box_regs: {box_regs}")
print(f"box_covars: {box_covars}")
print(f"gt_box: {gt_box}")
print(f"gt_class: {gt_class}")
print(f"associations: {associations}")
total_associations.append(associations)
total_decompositions.append(decomposition)
return nll_total_losses / bs, total_associations, total_decompositions
| 21,481 | 37.846293 | 152 | py |
pmb-nll | pmb-nll-main/src/probabilistic_modeling/probabilistic_retinanet.py | import logging
import math
from typing import List, Tuple
import numpy as np
import torch
from core.visualization_tools.probabilistic_visualizer import ProbabilisticVisualizer
from detectron2.data.detection_utils import convert_image_to_rgb
# Detectron Imports
from detectron2.layers import ShapeSpec, batched_nms, cat, nonzero_tuple
from detectron2.modeling.anchor_generator import build_anchor_generator
from detectron2.modeling.meta_arch.build import META_ARCH_REGISTRY
from detectron2.modeling.meta_arch.retinanet import (
RetinaNet,
RetinaNetHead,
permute_to_N_HWA_K,
)
from detectron2.modeling.postprocessing import detector_postprocess
from detectron2.structures import Boxes, Instances
from detectron2.utils.events import get_event_storage
from fvcore.nn import sigmoid_focal_loss_jit, smooth_l1_loss
from matplotlib import cm
from probabilistic_inference import inference_utils
from torch import Tensor, distributions, nn
from probabilistic_modeling.losses import (
negative_log_likelihood,
negative_log_likelihood_matching,
)
# Project Imports
from probabilistic_modeling.modeling_utils import (
PoissonPointProcessIntensityFunction,
clamp_log_variance,
covariance_output_to_cholesky,
get_probabilistic_loss_weight,
unscented_transform,
PoissonPointUnion,
)
@META_ARCH_REGISTRY.register()
class ProbabilisticRetinaNet(RetinaNet):
"""
Probabilistic retinanet class.
"""
def __init__(self, cfg):
super().__init__(cfg)
# Parse configs
self.cls_var_loss = cfg.MODEL.PROBABILISTIC_MODELING.CLS_VAR_LOSS.NAME
self.compute_cls_var = self.cls_var_loss != "none"
self.cls_var_num_samples = (
cfg.MODEL.PROBABILISTIC_MODELING.CLS_VAR_LOSS.NUM_SAMPLES
)
self.bbox_cov_loss = cfg.MODEL.PROBABILISTIC_MODELING.BBOX_COV_LOSS.NAME
self.compute_bbox_cov = self.bbox_cov_loss != "none"
self.bbox_cov_num_samples = (
cfg.MODEL.PROBABILISTIC_MODELING.BBOX_COV_LOSS.NUM_SAMPLES
)
self.bbox_cov_dist_type = (
cfg.MODEL.PROBABILISTIC_MODELING.BBOX_COV_LOSS.DISTRIBUTION_TYPE
)
self.bbox_cov_type = (
cfg.MODEL.PROBABILISTIC_MODELING.BBOX_COV_LOSS.COVARIANCE_TYPE
)
if self.bbox_cov_type == "diagonal":
# Diagonal covariance matrix has N elements
self.bbox_cov_dims = 4
else:
# Number of elements required to describe an NxN covariance matrix is
# computed as: (N * (N + 1)) / 2
self.bbox_cov_dims = 10
if self.bbox_cov_loss == "pmb_negative_log_likelihood":
self.ppp_constructor = lambda x: PoissonPointProcessIntensityFunction(
cfg, **x
)
self.ppp_intensity_function = PoissonPointProcessIntensityFunction(cfg, device=self.device)
self.nll_max_num_solutions = (
cfg.MODEL.PROBABILISTIC_MODELING.NLL_MAX_NUM_SOLUTIONS
)
self.matching_distance = cfg.MODEL.PROBABILISTIC_MODELING.MATCHING_DISTANCE
self.use_prediction_mixture = cfg.MODEL.PROBABILISTIC_MODELING.PPP.USE_PREDICTION_MIXTURE
self.dropout_rate = cfg.MODEL.PROBABILISTIC_MODELING.DROPOUT_RATE
self.use_dropout = self.dropout_rate != 0.0
self.current_step = 0
self.annealing_step = (
cfg.SOLVER.STEPS[1]
if cfg.MODEL.PROBABILISTIC_MODELING.ANNEALING_STEP <= 0
else cfg.MODEL.PROBABILISTIC_MODELING.ANNEALING_STEP
)
# Define custom probabilistic head
backbone_shape = self.backbone.output_shape()
feature_shapes = [backbone_shape[f] for f in self.head_in_features]
self.head = ProbabilisticRetinaNetHead(
cfg,
self.use_dropout,
self.dropout_rate,
self.compute_cls_var,
self.compute_bbox_cov,
self.bbox_cov_dims,
feature_shapes,
)
# Send to device
self.to(self.device)
def get_ppp_intensity_function(self):
return self.ppp_intensity_function
def forward(
self, batched_inputs, return_anchorwise_output=False, num_mc_dropout_runs=-1
):
"""
Args:
batched_inputs: a list, batched outputs of :class:`DatasetMapper` .
Each item in the list contains the inputs for one image.
For now, each item in the list is a dict that contains:
* image: Tensor, image in (C, H, W) format.
* instances: Instances
Other information that's included in the original dicts, such as:
* "height", "width" (int): the output resolution of the model, used in inference.
See :meth:`postprocess` for details.
return_anchorwise_output (bool): returns raw output for probabilistic inference
num_mc_dropout_runs (int): perform efficient monte-carlo dropout runs by running only the head and
not full neural network.
Returns:
dict[str: Tensor]:
mapping from a named loss to a tensor storing the loss. Used during training only.
"""
# Update step
try:
self.current_step += get_event_storage().iter
except:
self.current_step += 1
# Preprocess image
images = self.preprocess_image(batched_inputs)
# Extract features and generate anchors
features = self.backbone(images.tensor)
features = [features[f] for f in self.head_in_features]
anchors = self.anchor_generator(features)
# MC_Dropout inference forward
if num_mc_dropout_runs > 1:
anchors = anchors * num_mc_dropout_runs
features = features * num_mc_dropout_runs
output_dict = self.produce_raw_output(anchors, features)
return output_dict
# Regular inference forward
if return_anchorwise_output:
return self.produce_raw_output(anchors, features)
# Training and validation forward
(
pred_logits,
pred_anchor_deltas,
pred_logits_vars,
pred_anchor_deltas_vars,
) = self.head(features)
# Transpose the Hi*Wi*A dimension to the middle:
pred_logits = [permute_to_N_HWA_K(x, self.num_classes) for x in pred_logits]
pred_anchor_deltas = [permute_to_N_HWA_K(x, 4) for x in pred_anchor_deltas]
if pred_logits_vars is not None:
pred_logits_vars = [
permute_to_N_HWA_K(x, self.num_classes) for x in pred_logits_vars
]
if pred_anchor_deltas_vars is not None:
pred_anchor_deltas_vars = [
permute_to_N_HWA_K(x, self.bbox_cov_dims)
for x in pred_anchor_deltas_vars
]
if self.training:
assert (
"instances" in batched_inputs[0]
), "Instance annotations are missing in training!"
gt_instances = [x["instances"].to(self.device) for x in batched_inputs]
gt_classes, gt_boxes = self.label_anchors(anchors, gt_instances)
self.anchors = torch.cat(
[Boxes.cat(anchors).tensor for i in range(len(gt_instances))], 0
)
# Loss is computed based on what values are to be estimated by the neural
# network
losses = self.losses(
anchors,
gt_classes,
gt_boxes,
pred_logits,
pred_anchor_deltas,
pred_logits_vars,
pred_anchor_deltas_vars,
gt_instances,
images.image_sizes,
)
if self.vis_period > 0:
storage = get_event_storage()
if storage.iter % self.vis_period == 0:
results = self.inference(
anchors, pred_logits, pred_anchor_deltas, images.image_sizes
)
self.visualize_training(
batched_inputs,
results,
pred_logits,
pred_anchor_deltas,
pred_anchor_deltas_vars,
anchors,
)
return losses
else:
results = self.inference(
anchors, pred_logits, pred_anchor_deltas, images.image_sizes
)
processed_results = []
for results_per_image, input_per_image, image_size in zip(
results, batched_inputs, images.image_sizes
):
height = input_per_image.get("height", image_size[0])
width = input_per_image.get("width", image_size[1])
r = detector_postprocess(results_per_image[0], height, width)
processed_results.append({"instances": r})
return processed_results
def visualize_training(
self,
batched_inputs,
results,
pred_logits,
pred_anchor_deltas,
pred_anchor_deltas_vars,
anchors,
):
"""
A function used to visualize ground truth images and final network predictions.
It shows ground truth bounding boxes on the original image and up to 20
predicted object bounding boxes on the original image.
Args:
batched_inputs (list): a list that contains input to the model.
results (List[Instances]): a list of #images elements.
"""
from detectron2.utils.visualizer import Visualizer
pred_instaces, kept_idx = results
assert len(batched_inputs) == len(
pred_instaces
), "Cannot visualize inputs and results of different sizes"
storage = get_event_storage()
max_boxes = 20
image_index = 0 # only visualize a single image
img = batched_inputs[image_index]["image"]
img = convert_image_to_rgb(img.permute(1, 2, 0), self.input_format)
# Extract NMS kept predictions
box_scores = torch.cat([logits.squeeze() for logits in pred_logits])[
kept_idx
].sigmoid()
box_scores = torch.cat(
(box_scores, 1 - pred_instaces[image_index].scores.unsqueeze(-1)), dim=-1
)
anchor_deltas = torch.cat([delta.squeeze() for delta in pred_anchor_deltas])[
kept_idx
]
anchor_delta_vars = torch.cat(
[var.squeeze() for var in pred_anchor_deltas_vars]
)[kept_idx]
anchor_boxes = torch.cat([box.tensor.squeeze() for box in anchors])[kept_idx]
cholesky_decomp = covariance_output_to_cholesky(anchor_delta_vars)
######## Get covariance for corner coordinates instead #########
multivariate_normal_samples = torch.distributions.MultivariateNormal(
anchor_deltas, scale_tril=cholesky_decomp
)
# Define monte-carlo samples
distributions_samples = multivariate_normal_samples.rsample((1000,))
distributions_samples = torch.transpose(
torch.transpose(distributions_samples, 0, 1), 1, 2
)
samples_proposals = torch.repeat_interleave(
anchor_boxes.unsqueeze(2), 1000, dim=2
)
# Transform samples from deltas to boxes
box_transform = inference_utils.SampleBox2BoxTransform(
self.box2box_transform.weights
)
t_dist_samples = box_transform.apply_samples_deltas(
distributions_samples, samples_proposals
)
# Compute samples mean and covariance matrices.
_, boxes_covars = inference_utils.compute_mean_covariance_torch(t_dist_samples)
# Scale if image has been reshaped during processing
scale_x, scale_y = (
img.shape[1] / pred_instaces[image_index].image_size[1],
img.shape[0] / pred_instaces[image_index].image_size[0],
)
scaling = torch.tensor(np.stack([scale_x, scale_y, scale_x, scale_y]) ** 2).to(
device=boxes_covars.device
)
boxes_covars = (boxes_covars * scaling).float()
processed_results = detector_postprocess(
pred_instaces[image_index], img.shape[0], img.shape[1]
)
predicted_boxes = processed_results.pred_boxes.tensor
if self.bbox_cov_dist_type == "gaussian":
reg_distribution = (
lambda x, y: distributions.multivariate_normal.MultivariateNormal(x, y)
)
elif self.bbox_cov_dist_type == "laplacian":
reg_distribution = lambda x, y: distributions.laplace.Laplace(
loc=x, scale=(y.diagonal(dim1=-2, dim2=-1) / np.sqrt(2))
)
else:
raise Exception(
f"Bounding box uncertainty distribution {self.bbox_cov_dist_type} is not available."
)
associations = negative_log_likelihood_matching(
box_scores,
box_regs=predicted_boxes.unsqueeze(1).repeat(1, 80, 1),
box_covars=boxes_covars.unsqueeze(1).repeat(1, 80, 1, 1),
gt_box=batched_inputs[image_index]["instances"].gt_boxes.tensor,
gt_class=batched_inputs[image_index]["instances"].gt_classes,
image_size=img.shape,
reg_distribution=reg_distribution,
device=boxes_covars.device,
intensity_func=self.ppp_intensity_function,
max_n_solutions=1,
)
################# Draw results ####################
color_map = cm.get_cmap("tab20")
num_gt = batched_inputs[image_index]["instances"].gt_boxes.tensor.shape[0]
gt_colors = [color_map(i) for i in range(num_gt)]
v_gt = Visualizer(img, None)
v_gt = v_gt.overlay_instances(
boxes=batched_inputs[image_index]["instances"].gt_boxes,
assigned_colors=gt_colors,
)
anno_img = v_gt.get_image()
num_preds = len(boxes_covars)
pred_colors = [(0.0, 0.0, 0.0, 1.0)] * num_preds
for i in range(num_preds):
matched_gt = associations[0, i, 1]
if matched_gt >= 0:
pred_colors[i] = color_map(matched_gt)
pred_labels = [
f"{pred_class.item()}: {round(pred_score.item(),2)}"
for pred_class, pred_score in zip(
pred_instaces[image_index].pred_classes,
pred_instaces[image_index].scores,
)
]
v_pred = ProbabilisticVisualizer(img, None)
v_pred = v_pred.overlay_covariance_instances(
boxes=predicted_boxes[:max_boxes].detach().cpu().numpy(),
covariance_matrices=boxes_covars[:max_boxes].detach().cpu().numpy(),
assigned_colors=pred_colors,
labels=pred_labels[:max_boxes],
)
prop_img = v_pred.get_image()
vis_img = np.vstack((anno_img, prop_img))
vis_img = vis_img.transpose(2, 0, 1)
vis_name = (
f"Top: GT bounding boxes; Bottom: {max_boxes} Highest Scoring Results"
)
storage.put_image(vis_name, vis_img)
def losses(
self,
anchors,
gt_classes,
gt_boxes,
pred_class_logits,
pred_anchor_deltas,
pred_class_logits_var=None,
pred_bbox_cov=None,
gt_instances=None,
image_sizes: List[Tuple[int, int]] = [],
):
"""
Args:
For `gt_classes` and `gt_anchors_deltas` parameters, see
:meth:`RetinaNet.get_ground_truth`.
Their shapes are (N, R) and (N, R, 4), respectively, where R is
the total number of anchors across levels, i.e. sum(Hi x Wi x A)
For `pred_class_logits`, `pred_anchor_deltas`, `pred_class_logits_var` and `pred_bbox_cov`, see
:meth:`RetinaNetHead.forward`.
Returns:
dict[str: Tensor]:
mapping from a named loss to a scalar tensor
storing the loss. Used during training only. The dict keys are:
"loss_cls" and "loss_box_reg"
"""
num_images = len(gt_classes)
gt_labels = torch.stack(gt_classes) # (N, R)
# Do NMS before reshaping stuff
if self.bbox_cov_loss == "pmb_negative_log_likelihood":
with torch.no_grad():
nms_results = self.inference(
anchors, pred_class_logits, pred_anchor_deltas, image_sizes
)
anchors = type(anchors[0]).cat(anchors).tensor # (R, 4)
gt_anchor_deltas = [
self.box2box_transform.get_deltas(anchors, k) for k in gt_boxes
]
gt_anchor_deltas = torch.stack(gt_anchor_deltas) # (N, R, 4)
valid_mask = gt_labels >= 0
pos_mask = (gt_labels >= 0) & (gt_labels != self.num_classes)
num_pos_anchors = pos_mask.sum().item()
get_event_storage().put_scalar("num_pos_anchors", num_pos_anchors / num_images)
self.loss_normalizer = self.loss_normalizer_momentum * self.loss_normalizer + (
1 - self.loss_normalizer_momentum
) * max(num_pos_anchors, 1)
# classification and regression loss
# Shapes:
# (N x R, K) for class_logits and class_logits_var.
# (N x R, 4), (N x R x 10) for pred_anchor_deltas and pred_class_bbox_cov respectively.
# Transform per-feature layer lists to a single tensor
pred_class_logits = cat(pred_class_logits, dim=1)
pred_anchor_deltas = cat(pred_anchor_deltas, dim=1)
if pred_class_logits_var is not None:
pred_class_logits_var = cat(pred_class_logits_var, dim=1)
if pred_bbox_cov is not None:
pred_bbox_cov = cat(pred_bbox_cov, dim=1)
gt_classes_target = torch.nn.functional.one_hot(
gt_labels[valid_mask], num_classes=self.num_classes + 1
)[:, :-1].to(
pred_class_logits[0].dtype
) # no loss for the last (background) class
# Classification losses
if self.compute_cls_var:
# Compute classification variance according to:
# "What Uncertainties Do We Need in Bayesian Deep Learning for Computer Vision?", NIPS 2017
if self.cls_var_loss == "loss_attenuation":
num_samples = self.cls_var_num_samples
# Compute standard deviation
pred_class_logits_var = torch.sqrt(
torch.exp(pred_class_logits_var[valid_mask])
)
pred_class_logits = pred_class_logits[valid_mask]
# Produce normal samples using logits as the mean and the standard deviation computed above
# Scales with GPU memory. 12 GB ---> 3 Samples per anchor for
# COCO dataset.
univariate_normal_dists = distributions.normal.Normal(
pred_class_logits, scale=pred_class_logits_var
)
pred_class_stochastic_logits = univariate_normal_dists.rsample(
(num_samples,)
)
pred_class_stochastic_logits = pred_class_stochastic_logits.view(
(
pred_class_stochastic_logits.shape[1] * num_samples,
pred_class_stochastic_logits.shape[2],
-1,
)
)
pred_class_stochastic_logits = pred_class_stochastic_logits.squeeze(2)
# Produce copies of the target classes to match the number of
# stochastic samples.
gt_classes_target = torch.unsqueeze(gt_classes_target, 0)
gt_classes_target = torch.repeat_interleave(
gt_classes_target, num_samples, dim=0
).view(
(
gt_classes_target.shape[1] * num_samples,
gt_classes_target.shape[2],
-1,
)
)
gt_classes_target = gt_classes_target.squeeze(2)
# Produce copies of the target classes to form the stochastic
# focal loss.
loss_cls = (
sigmoid_focal_loss_jit(
pred_class_stochastic_logits,
gt_classes_target,
alpha=self.focal_loss_alpha,
gamma=self.focal_loss_gamma,
reduction="sum",
)
/ (num_samples * max(1, self.loss_normalizer))
)
else:
raise ValueError(
"Invalid classification loss name {}.".format(self.bbox_cov_loss)
)
else:
# Standard loss computation in case one wants to use this code
# without any probabilistic inference.
loss_cls = (
sigmoid_focal_loss_jit(
pred_class_logits[valid_mask],
gt_classes_target,
alpha=self.focal_loss_alpha,
gamma=self.focal_loss_gamma,
reduction="sum",
)
/ max(1, self.loss_normalizer)
)
# Compute Regression Loss
if self.bbox_cov_loss == "pmb_negative_log_likelihood":
og_pred_anchor_deltas = pred_anchor_deltas
pred_anchor_deltas = pred_anchor_deltas[pos_mask]
gt_anchors_deltas = gt_anchor_deltas[pos_mask]
if self.compute_bbox_cov:
# We have to clamp the output variance else probabilistic metrics
# go to infinity.
if self.bbox_cov_loss == "pmb_negative_log_likelihood":
og_pred_bbox_cov = pred_bbox_cov
pred_bbox_cov = clamp_log_variance(pred_bbox_cov[pos_mask])
if self.bbox_cov_loss == "negative_log_likelihood":
if self.bbox_cov_type == "diagonal":
# Compute regression variance according to:
# "What Uncertainties Do We Need in Bayesian Deep Learning for Computer Vision?", NIPS 2017
# This implementation with smooth_l1_loss outperforms using
# torch.distribution.multivariate_normal. Losses might have different numerical values
# since we do not include constants in this implementation.
loss_box_reg = (
0.5
* torch.exp(-pred_bbox_cov)
* smooth_l1_loss(
pred_anchor_deltas,
gt_anchors_deltas,
beta=self.smooth_l1_beta,
)
)
loss_covariance_regularize = 0.5 * pred_bbox_cov
loss_box_reg += loss_covariance_regularize
# Sum over all elements
loss_box_reg = torch.sum(loss_box_reg) / max(
1, self.loss_normalizer
)
else:
# Multivariate negative log likelihood. Implemented with
# pytorch multivariate_normal.log_prob function. Custom implementations fail to finish training
# due to NAN loss.
# This is the Cholesky decomposition of the covariance matrix. We reconstruct it from 10 estimated
# parameters as a lower triangular matrix.
forecaster_cholesky = covariance_output_to_cholesky(pred_bbox_cov)
# Compute multivariate normal distribution using torch
# distribution functions.
multivariate_normal_dists = (
distributions.multivariate_normal.MultivariateNormal(
pred_anchor_deltas, scale_tril=forecaster_cholesky
)
)
loss_box_reg = -multivariate_normal_dists.log_prob(
gt_anchors_deltas
)
loss_box_reg = torch.sum(loss_box_reg) / max(
1, self.loss_normalizer
)
elif self.bbox_cov_loss == "second_moment_matching":
# Compute regression covariance using second moment matching.
loss_box_reg = smooth_l1_loss(
pred_anchor_deltas, gt_anchors_deltas, beta=self.smooth_l1_beta
)
# Compute errors
errors = pred_anchor_deltas - gt_anchors_deltas
if self.bbox_cov_type == "diagonal":
# Compute second moment matching term.
second_moment_matching_term = smooth_l1_loss(
torch.exp(pred_bbox_cov), errors ** 2, beta=self.smooth_l1_beta
)
loss_box_reg += second_moment_matching_term
loss_box_reg = torch.sum(loss_box_reg) / max(
1, self.loss_normalizer
)
else:
# Compute second moment matching term.
errors = torch.unsqueeze(errors, 2)
gt_error_covar = torch.matmul(errors, torch.transpose(errors, 2, 1))
# This is the cholesky decomposition of the covariance matrix. We reconstruct it from 10 estimated
# parameters as a lower triangular matrix.
forecaster_cholesky = covariance_output_to_cholesky(pred_bbox_cov)
predicted_covar = torch.matmul(
forecaster_cholesky, torch.transpose(forecaster_cholesky, 2, 1)
)
second_moment_matching_term = smooth_l1_loss(
predicted_covar,
gt_error_covar,
beta=self.smooth_l1_beta,
reduction="sum",
)
loss_box_reg = (
torch.sum(loss_box_reg) + second_moment_matching_term
) / max(1, self.loss_normalizer)
elif self.bbox_cov_loss == "energy_loss":
# Compute regression variance according to energy score loss.
forecaster_means = pred_anchor_deltas
# Compute forecaster cholesky. Takes care of diagonal case
# automatically.
forecaster_cholesky = covariance_output_to_cholesky(pred_bbox_cov)
# Define normal distribution samples. To compute energy score,
# we need i+1 samples.
# Define per-anchor Distributions
multivariate_normal_dists = (
distributions.multivariate_normal.MultivariateNormal(
forecaster_means, scale_tril=forecaster_cholesky
)
)
# Define Monte-Carlo Samples
distributions_samples = multivariate_normal_dists.rsample(
(self.bbox_cov_num_samples + 1,)
)
distributions_samples_1 = distributions_samples[
0 : self.bbox_cov_num_samples, :, :
]
distributions_samples_2 = distributions_samples[
1 : self.bbox_cov_num_samples + 1, :, :
]
# Compute energy score
gt_anchors_deltas_samples = torch.repeat_interleave(
gt_anchors_deltas.unsqueeze(0), self.bbox_cov_num_samples, dim=0
)
energy_score_first_term = (
2.0
* smooth_l1_loss(
distributions_samples_1,
gt_anchors_deltas_samples,
beta=self.smooth_l1_beta,
reduction="sum",
)
/ self.bbox_cov_num_samples
) # First term
energy_score_second_term = (
-smooth_l1_loss(
distributions_samples_1,
distributions_samples_2,
beta=self.smooth_l1_beta,
reduction="sum",
)
/ self.bbox_cov_num_samples
) # Second term
# Final Loss
loss_box_reg = (
energy_score_first_term + energy_score_second_term
) / max(1, self.loss_normalizer)
elif self.bbox_cov_loss == "pmb_negative_log_likelihood":
pred_class_scores = pred_class_logits.sigmoid()
losses = self.nll_od_loss_with_nms(
nms_results,
gt_instances,
anchors,
pred_class_scores,
og_pred_anchor_deltas,
og_pred_bbox_cov,
image_sizes,
)
loss_box_reg = losses["loss_box_reg"]
use_nll_loss = True
else:
raise ValueError(
"Invalid regression loss name {}.".format(self.bbox_cov_loss)
)
# Perform loss annealing. Essential for reliably training variance estimates using NLL in RetinaNet.
# For energy score and second moment matching, this is optional.
standard_regression_loss = (
smooth_l1_loss(
pred_anchor_deltas,
gt_anchors_deltas,
beta=self.smooth_l1_beta,
reduction="sum",
)
/ max(1, self.loss_normalizer)
)
probabilistic_loss_weight = get_probabilistic_loss_weight(
self.current_step, self.annealing_step
)
loss_box_reg = (
1.0 - probabilistic_loss_weight
) * standard_regression_loss + probabilistic_loss_weight * loss_box_reg
if self.bbox_cov_loss == "pmb_negative_log_likelihood":
loss_cls = (1.0 - probabilistic_loss_weight) * loss_cls
else:
# Standard regression loss in case no variance is needed to be
# estimated.
loss_box_reg = (
smooth_l1_loss(
pred_anchor_deltas,
gt_anchors_deltas,
beta=self.smooth_l1_beta,
reduction="sum",
)
/ max(1, self.loss_normalizer)
)
if use_nll_loss:
losses["loss_cls"] = loss_cls
losses["loss_box_reg"] = loss_box_reg
else:
losses = {"loss_cls": loss_cls, "loss_box_reg": loss_box_reg}
return losses
def nll_od_loss_with_nms(
self,
nms_results,
gt_instances,
anchors,
scores,
deltas,
pred_covs,
image_shapes,
):
if "log_prob" in self.matching_distance and self.matching_distance != "log_prob":
covar_scaling = float(self.matching_distance.split("_")[-1])
matching_distance = "log_prob"
else:
covar_scaling = 1
matching_distance = self.matching_distance
self.ppp_intensity_function.update_distribution()
instances, kept_idx = nms_results
bs = len(instances)
boxes = [
self.box2box_transform.apply_deltas(delta, anchors) for delta in deltas
]
nll_pred_cov = [
pred_cov[kept].unsqueeze(1).repeat(1, self.num_classes, 1)
for pred_cov, kept in zip(pred_covs, kept_idx)
]
nll_pred_cov = [covariance_output_to_cholesky(cov) for cov in nll_pred_cov]
nll_scores = [score[kept] for score, kept in zip(scores, kept_idx)]
nll_pred_deltas = [
delta[kept].unsqueeze(1).repeat(1, self.num_classes, 1)
for delta, kept in zip(deltas, kept_idx)
]
gt_boxes = [instances.gt_boxes.tensor for instances in gt_instances]
nll_gt_classes = [instances.gt_classes for instances in gt_instances]
kept_proposals = [anchors[idx] for idx in kept_idx]
trans_func = lambda x,y: self.box2box_transform.apply_deltas(x,y)
box_means = []
box_chols = []
for i in range(bs):
box_mean, box_chol = unscented_transform(nll_pred_deltas[i], nll_pred_cov[i], kept_proposals[i], trans_func)
box_means.append(box_mean)
box_chols.append(box_chol)
if self.bbox_cov_dist_type == "gaussian":
regression_dist = (
lambda x, y: distributions.multivariate_normal.MultivariateNormal(
loc=x, scale_tril=y
)
)
elif self.bbox_cov_dist_type == "laplacian":
# Map cholesky decomp to laplacian scale
regression_dist = lambda x, y: distributions.laplace.Laplace(
loc=x, scale=y.diagonal(dim1=-2, dim2=-1) / np.sqrt(2)
)
else:
raise Exception(
f"Bounding box uncertainty distribution {self.bbox_cov_dist_type} is not available."
)
nll_scores = [
torch.cat(
(
nll_scores[i],
(
1
- nll_scores[i][
torch.arange(len(kept_idx[i])), instances[i].pred_classes
]
).unsqueeze(-1),
),
dim=-1,
)
for i in range(bs)
]
# Clamp for numerical stability
nll_scores = [scores.clamp(1e-6, 1 - 1e-6) for scores in nll_scores]
if self.use_prediction_mixture:
ppps = []
src_boxes_tot = []
src_box_chol_tot = []
src_boxes_deltas_tot = []
src_boxes_deltas_chol_tot = []
src_scores_tot = []
gt_box_deltas = []
for i in range(bs):
image_shape = image_shapes[i]
h,w = image_shape
scaling = torch.tensor([1/w,1/h],device=box_means[i].device).repeat(2)
pred_box_means = box_means[i]*scaling
pred_box_chols = torch.diag_embed(scaling)@box_chols[i]
pred_box_deltas = nll_pred_deltas[i]
pred_box_delta_chols = nll_pred_cov[i]
pred_cls_probs = nll_scores[i]
#max_conf = pred_cls_probs[..., :num_classes].max(dim=1)[0]
max_conf = 1 - pred_cls_probs[..., -1]
ppp_preds_idx = (
max_conf <= self.ppp_intensity_function.ppp_confidence_thres
)
props = kept_proposals[i][ppp_preds_idx.logical_not()]
# Get delta between each GT and proposal, batch-wise
tmp = torch.stack(
[
self.box2box_transform.get_deltas(
props,
gt_boxes[i][j].unsqueeze(0).repeat(len(props), 1),
)
for j in range(len(gt_boxes[i]))
]
)
gt_box_deltas.append(
tmp.permute(1, 0, 2)
) # [gt,pred,boxdim] -> [pred, gt, boxdim]
gt_boxes[i] = gt_boxes[i]*scaling
mixture_dict = {}
mixture_dict["weights"] = max_conf[ppp_preds_idx]
mixture_dict["means"] = pred_box_means[ppp_preds_idx, 0]
mixture_dict["covs"] = pred_box_chols[ppp_preds_idx, 0]@pred_box_chols[ppp_preds_idx, 0].transpose(-1,-2)
mixture_dict["cls_probs"] = pred_cls_probs[ppp_preds_idx, :self.num_classes]
mixture_dict["reg_dist_type"] = self.bbox_cov_dist_type
if self.bbox_cov_dist_type == "gaussian":
mixture_dict[
"reg_dist"
] = distributions.multivariate_normal.MultivariateNormal
mixture_dict["reg_kwargs"] = {
"scale_tril": pred_box_chols[ppp_preds_idx, 0]
}
elif self.bbox_cov_dist_type == "laplacian":
mixture_dict["reg_dist"] = distributions.laplace.Laplace
mixture_dict["reg_kwargs"] = {
"scale": (
pred_box_chols[ppp_preds_idx, 0].diagonal(dim1=-2, dim2=-1)
/ np.sqrt(2)
)
}
loss_ppp = PoissonPointUnion()
loss_ppp.add_ppp(self.ppp_constructor({"predictions": mixture_dict}))
loss_ppp.add_ppp(self.ppp_intensity_function)
mixture_dict = {}
mixture_dict["weights"] = max_conf[ppp_preds_idx]
mixture_dict["means"] = pred_box_means[ppp_preds_idx, 0]
scale_mat = torch.eye(pred_box_chols.shape[-1]).to(pred_box_chols.device)*covar_scaling
scaled_chol = scale_mat@pred_box_chols[ppp_preds_idx, 0]
mixture_dict["covs"] = (scaled_chol)@(scaled_chol.transpose(-1,-2))
mixture_dict["cls_probs"] = pred_cls_probs[ppp_preds_idx, :self.num_classes]
mixture_dict["reg_dist_type"] = self.bbox_cov_dist_type
if self.bbox_cov_dist_type == "gaussian":
mixture_dict[
"reg_dist"
] = distributions.multivariate_normal.MultivariateNormal
mixture_dict["reg_kwargs"] = {
"scale_tril": scaled_chol
}
elif self.bbox_cov_dist_type == "laplacian":
mixture_dict["reg_dist"] = distributions.laplace.Laplace
mixture_dict["reg_kwargs"] = {
"scale": (
(scaled_chol).diagonal(dim1=-2, dim2=-1)
/ np.sqrt(2)
)
}
match_ppp = PoissonPointUnion()
match_ppp.add_ppp(self.ppp_constructor({"predictions": mixture_dict}))
match_ppp.add_ppp(self.ppp_intensity_function)
ppps.append({"matching": match_ppp, "loss": loss_ppp})
src_boxes_tot.append(pred_box_means[ppp_preds_idx.logical_not()])
src_box_chol_tot.append(pred_box_chols[ppp_preds_idx.logical_not()])
src_scores_tot.append(pred_cls_probs[ppp_preds_idx.logical_not()])
src_boxes_deltas_tot.append(pred_box_deltas[ppp_preds_idx.logical_not()])
src_boxes_deltas_chol_tot.append(pred_box_delta_chols[ppp_preds_idx.logical_not()])
nll_pred_deltas = src_boxes_deltas_tot
nll_pred_delta_chols = src_boxes_deltas_chol_tot
nll_pred_boxes = src_boxes_tot
nll_pred_cov = src_box_chol_tot
nll_scores = src_scores_tot
use_target_delta_matching = False
elif self.ppp_intensity_function.ppp_intensity_type == "gaussian_mixture":
ppps = []
src_boxes_tot = []
src_box_chol_tot = []
src_boxes_deltas_tot = []
src_boxes_deltas_chol_tot = []
src_scores_tot = []
gt_box_deltas = []
for i in range(bs):
image_shape = image_shapes[i]
h,w = image_shape
scaling = torch.tensor([1/w,1/h],device=box_means[i].device).repeat(2)
pred_box_means = box_means[i]*scaling
pred_box_chols = torch.diag_embed(scaling)@box_chols[i]
pred_box_deltas = nll_pred_deltas[i]
pred_box_delta_chols = nll_pred_cov[i]
pred_cls_probs = nll_scores[i]
props = kept_proposals[i]
# Get delta between each GT and proposal, batch-wise
tmp = torch.stack(
[
self.box2box_transform.get_deltas(
props,
gt_boxes[i][j].unsqueeze(0).repeat(len(props), 1),
)
for j in range(len(gt_boxes[i]))
]
)
gt_box_deltas.append(
tmp.permute(1, 0, 2)
) # [gt,pred,boxdim] -> [pred, gt, boxdim]
gt_boxes[i] = gt_boxes[i]*scaling
src_boxes_tot.append(pred_box_means)
src_box_chol_tot.append(pred_box_chols)
src_scores_tot.append(pred_cls_probs)
src_boxes_deltas_tot.append(pred_box_deltas)
src_boxes_deltas_chol_tot.append(pred_box_delta_chols)
nll_pred_deltas = src_boxes_deltas_tot
nll_pred_delta_chols = src_boxes_deltas_chol_tot
nll_pred_boxes = src_boxes_tot
nll_pred_cov = src_box_chol_tot
nll_scores = src_scores_tot
use_target_delta_matching = False
ppps = [{"loss": self.ppp_intensity_function, "matching": self.ppp_intensity_function}]*bs
else:
gt_box_deltas = []
for i in range(len(gt_boxes)):
# Get delta between each GT and proposal, batch-wise
tmp = torch.stack(
[
self.box2box_transform.get_deltas(
kept_proposals[i],
gt_boxes[i][j].unsqueeze(0).repeat(len(kept_proposals[i]), 1),
)
for j in range(len(gt_boxes[i]))
]
)
gt_box_deltas.append(
tmp.permute(1, 0, 2)
) # [gt,pred,boxdim] -> [pred, gt, boxdim]
use_target_delta_matching = True
ppps = [{"loss": self.ppp_intensity_function, "matching": self.ppp_intensity_function}]*bs
nll_pred_delta_chols = nll_pred_cov
nll_pred_deltas = nll_pred_deltas
nll_pred_boxes = nll_pred_deltas
nll_pred_cov = nll_pred_cov
nll, associations, decompositions = negative_log_likelihood(
nll_scores,
nll_pred_boxes,
nll_pred_cov,
gt_boxes,
nll_gt_classes,
image_shapes,
regression_dist,
ppps,
self.nll_max_num_solutions,
target_deltas=gt_box_deltas,
matching_distance=matching_distance,
use_target_delta_matching=use_target_delta_matching,
pred_deltas=nll_pred_deltas,
pred_delta_chols=nll_pred_delta_chols,
)
# Save some stats
storage = get_event_storage()
num_classes = self.num_classes
mean_variance = np.mean(
[
cov.diagonal(dim1=-2,dim2=-1)
.pow(2)
.mean()
.item()
for cov in nll_pred_cov
if cov.shape[0] > 0
]
)
storage.put_scalar("nll/mean_covariance", mean_variance)
ppp_intens = np.sum([ppp["loss"].integrate(
torch.as_tensor(image_shapes).to(self.device), num_classes
)
.mean()
.item()
for ppp in ppps
])
storage.put_scalar("nll/ppp_intensity", ppp_intens)
reg_loss = np.mean(
[
np.clip(
decomp["matched_bernoulli_reg"][0]
/ (decomp["num_matched_bernoulli"][0] + 1e-6),
-1e25,
1e25,
)
for decomp in decompositions
]
)
cls_loss_match = np.mean(
[
np.clip(
decomp["matched_bernoulli_cls"][0]
/ (decomp["num_matched_bernoulli"][0] + 1e-6),
-1e25,
1e25,
)
for decomp in decompositions
]
)
cls_loss_no_match = np.mean(
[
np.clip(
decomp["unmatched_bernoulli"][0]
/ (decomp["num_unmatched_bernoulli"][0] + 1e-6),
-1e25,
1e25,
)
for decomp in decompositions
]
)
# Collect all losses
losses = dict()
losses["loss_box_reg"] = nll
# Add losses for logging, these do not propagate gradients
losses["loss_regression"] = torch.tensor(reg_loss).to(nll.device)
losses["loss_cls_matched"] = torch.tensor(cls_loss_match).to(nll.device)
losses["loss_cls_unmatched"] = torch.tensor(cls_loss_no_match).to(nll.device)
return losses
def produce_raw_output(self, anchors, features):
"""
Given anchors and features, produces raw pre-nms output to be used for custom fusion operations.
"""
# Perform inference run
(
pred_logits,
pred_anchor_deltas,
pred_logits_vars,
pred_anchor_deltas_vars,
) = self.head(features)
# Transpose the Hi*Wi*A dimension to the middle:
pred_logits = [permute_to_N_HWA_K(x, self.num_classes) for x in pred_logits]
pred_anchor_deltas = [permute_to_N_HWA_K(x, 4) for x in pred_anchor_deltas]
if pred_logits_vars is not None:
pred_logits_vars = [
permute_to_N_HWA_K(x, self.num_classes) for x in pred_logits_vars
]
if pred_anchor_deltas_vars is not None:
pred_anchor_deltas_vars = [
permute_to_N_HWA_K(x, self.bbox_cov_dims)
for x in pred_anchor_deltas_vars
]
# Create raw output dictionary
raw_output = {"anchors": anchors}
# Shapes:
# (N x R, K) for class_logits and class_logits_var.
# (N x R, 4), (N x R x 10) for pred_anchor_deltas and pred_class_bbox_cov respectively.
raw_output.update(
{
"box_cls": pred_logits,
"box_delta": pred_anchor_deltas,
"box_cls_var": pred_logits_vars,
"box_reg_var": pred_anchor_deltas_vars,
}
)
if (
self.compute_bbox_cov
and self.bbox_cov_loss == "pmb_negative_log_likelihood"
):
ppp_output = self.ppp_intensity_function.get_weights()
raw_output.update({"ppp": ppp_output})
return raw_output
def inference(
self,
anchors: List[Boxes],
pred_logits: List[Tensor],
pred_anchor_deltas: List[Tensor],
image_sizes: List[Tuple[int, int]],
):
"""
Arguments:
anchors (list[Boxes]): A list of #feature level Boxes.
The Boxes contain anchors of this image on the specific feature level.
pred_logits, pred_anchor_deltas: list[Tensor], one per level. Each
has shape (N, Hi * Wi * Ai, K or 4)
image_sizes (List[(h, w)]): the input image sizes
Returns:
results (List[Instances]): a list of #images elements.
"""
results: List[Instances] = []
for img_idx, image_size in enumerate(image_sizes):
pred_logits_per_image = [x[img_idx] for x in pred_logits]
deltas_per_image = [x[img_idx] for x in pred_anchor_deltas]
results_per_image = self.inference_single_image(
anchors, pred_logits_per_image, deltas_per_image, image_size
)
results.append(results_per_image)
return [x[0] for x in results], [x[1] for x in results]
def inference_single_image(
self,
anchors: List[Boxes],
box_cls: List[Tensor],
box_delta: List[Tensor],
image_size: Tuple[int, int],
):
"""
Single-image inference. Return bounding-box detection results by thresholding
on scores and applying non-maximum suppression (NMS).
Arguments:
anchors (list[Boxes]): list of #feature levels. Each entry contains
a Boxes object, which contains all the anchors in that feature level.
box_cls (list[Tensor]): list of #feature levels. Each entry contains
tensor of size (H x W x A, K)
box_delta (list[Tensor]): Same shape as 'box_cls' except that K becomes 4.
image_size (tuple(H, W)): a tuple of the image height and width.
Returns:
Same as `inference`, but for only one image.
"""
boxes_all = []
scores_all = []
class_idxs_all = []
anchor_idxs_all = []
# Iterate over every feature level
for box_cls_i, box_reg_i, anchors_i in zip(box_cls, box_delta, anchors):
# (HxWxAxK,)
predicted_prob = box_cls_i.flatten().sigmoid()
# Apply two filtering below to make NMS faster.
# 1. Keep boxes with confidence score higher than threshold
keep_idxs = predicted_prob > self.test_score_thresh
predicted_prob = predicted_prob[keep_idxs]
topk_idxs = nonzero_tuple(keep_idxs)[0]
# 2. Keep top k top scoring boxes only
num_topk = min(self.test_topk_candidates, topk_idxs.size(0))
# torch.sort is actually faster than .topk (at least on GPUs)
predicted_prob, idxs = predicted_prob.sort(descending=True)
predicted_prob = predicted_prob[:num_topk]
topk_idxs = topk_idxs[idxs[:num_topk]]
anchor_idxs = topk_idxs // self.num_classes
classes_idxs = topk_idxs % self.num_classes
box_reg_i = box_reg_i[anchor_idxs]
anchors_i = anchors_i[anchor_idxs]
# predict boxes
predicted_boxes = self.box2box_transform.apply_deltas(
box_reg_i, anchors_i.tensor
)
boxes_all.append(predicted_boxes)
scores_all.append(predicted_prob)
class_idxs_all.append(classes_idxs)
anchor_idxs_all.append(anchor_idxs)
num_anchors_per_feat_lvl = [anchor.tensor.shape[0] for anchor in anchors]
accum_anchor_nums = np.cumsum(num_anchors_per_feat_lvl).tolist()
accum_anchor_nums = [0] + accum_anchor_nums
anchor_idxs_all = [
anchor_idx + prev_num_feats
for anchor_idx, prev_num_feats in zip(anchor_idxs_all, accum_anchor_nums)
]
boxes_all, scores_all, class_idxs_all, anchor_idxs_all = [
cat(x) for x in [boxes_all, scores_all, class_idxs_all, anchor_idxs_all]
]
keep = batched_nms(boxes_all, scores_all, class_idxs_all, self.test_nms_thresh)
keep = keep[: self.max_detections_per_image]
result = Instances(image_size)
result.pred_boxes = Boxes(boxes_all[keep])
result.scores = scores_all[keep]
result.pred_classes = class_idxs_all[keep]
return result, anchor_idxs_all[keep]
class ProbabilisticRetinaNetHead(RetinaNetHead):
"""
The head used in ProbabilisticRetinaNet for object class probability estimation, box regression, box covariance estimation.
It has three subnets for the three tasks, with a common structure but separate parameters.
"""
def __init__(
self,
cfg,
use_dropout,
dropout_rate,
compute_cls_var,
compute_bbox_cov,
bbox_cov_dims,
input_shape: List[ShapeSpec],
):
super().__init__(cfg, input_shape)
# Extract config information
# fmt: off
in_channels = input_shape[0].channels
num_classes = cfg.MODEL.RETINANET.NUM_CLASSES
num_convs = cfg.MODEL.RETINANET.NUM_CONVS
prior_prob = cfg.MODEL.RETINANET.PRIOR_PROB
num_anchors = build_anchor_generator(cfg, input_shape).num_cell_anchors
# fmt: on
assert (
len(set(num_anchors)) == 1
), "Using different number of anchors between levels is not currently supported!"
num_anchors = num_anchors[0]
self.compute_cls_var = compute_cls_var
self.compute_bbox_cov = compute_bbox_cov
self.bbox_cov_dims = bbox_cov_dims
# For consistency all configs are grabbed from original RetinaNet
self.use_dropout = use_dropout
self.dropout_rate = dropout_rate
cls_subnet = []
bbox_subnet = []
for _ in range(num_convs):
cls_subnet.append(
nn.Conv2d(in_channels, in_channels, kernel_size=3, stride=1, padding=1)
)
cls_subnet.append(nn.ReLU())
bbox_subnet.append(
nn.Conv2d(in_channels, in_channels, kernel_size=3, stride=1, padding=1)
)
bbox_subnet.append(nn.ReLU())
if self.use_dropout:
cls_subnet.append(nn.Dropout(p=self.dropout_rate))
bbox_subnet.append(nn.Dropout(p=self.dropout_rate))
self.cls_subnet = nn.Sequential(*cls_subnet)
self.bbox_subnet = nn.Sequential(*bbox_subnet)
self.cls_score = nn.Conv2d(
in_channels, num_anchors * num_classes, kernel_size=3, stride=1, padding=1
)
self.bbox_pred = nn.Conv2d(
in_channels, num_anchors * 4, kernel_size=3, stride=1, padding=1
)
for modules in [
self.cls_subnet,
self.bbox_subnet,
self.cls_score,
self.bbox_pred,
]:
for layer in modules.modules():
if isinstance(layer, nn.Conv2d):
torch.nn.init.normal_(layer.weight, mean=0, std=0.01)
torch.nn.init.constant_(layer.bias, 0)
# Use prior in model initialization to improve stability
bias_value = -math.log((1 - prior_prob) / prior_prob)
torch.nn.init.constant_(self.cls_score.bias, bias_value)
# Create subnet for classification variance estimation.
if self.compute_cls_var:
self.cls_var = nn.Conv2d(
in_channels,
num_anchors * num_classes,
kernel_size=3,
stride=1,
padding=1,
)
for layer in self.cls_var.modules():
if isinstance(layer, nn.Conv2d):
torch.nn.init.normal_(layer.weight, mean=0, std=0.01)
torch.nn.init.constant_(layer.bias, -10.0)
# Create subnet for bounding box covariance estimation.
if self.compute_bbox_cov:
self.bbox_cov = nn.Conv2d(
in_channels,
num_anchors * self.bbox_cov_dims,
kernel_size=3,
stride=1,
padding=1,
)
for layer in self.bbox_cov.modules():
if isinstance(layer, nn.Conv2d):
torch.nn.init.normal_(layer.weight, mean=0, std=0.0001)
torch.nn.init.constant_(layer.bias, 0)
def forward(self, features):
"""
Arguments:
features (list[Tensor]): FPN feature map tensors in high to low resolution.
Each tensor in the list correspond to different feature levels.
Returns:
logits (list[Tensor]): #lvl tensors, each has shape (N, AxK, Hi, Wi).
The tensor predicts the classification probability
at each spatial position for each of the A anchors and K object
classes.
logits_var (list[Tensor]): #lvl tensors, each has shape (N, AxK, Hi, Wi).
The tensor predicts the variance of the logits modeled as a univariate
Gaussian distribution at each spatial position for each of the A anchors and K object
classes.
bbox_reg (list[Tensor]): #lvl tensors, each has shape (N, Ax4, Hi, Wi).
The tensor predicts 4-vector (dx,dy,dw,dh) box
regression values for every anchor. These values are the
relative offset between the anchor and the ground truth box.
bbox_cov (list[Tensor]): #lvl tensors, each has shape (N, Ax4 or Ax10, Hi, Wi).
The tensor predicts elements of the box
covariance values for every anchor. The dimensions of the box covarianc
depends on estimating a full covariance (10) or a diagonal covariance matrix (4).
"""
logits = []
bbox_reg = []
logits_var = []
bbox_cov = []
for feature in features:
logits.append(self.cls_score(self.cls_subnet(feature)))
bbox_reg.append(self.bbox_pred(self.bbox_subnet(feature)))
if self.compute_cls_var:
logits_var.append(self.cls_var(self.cls_subnet(feature)))
if self.compute_bbox_cov:
bbox_cov.append(self.bbox_cov(self.bbox_subnet(feature)))
return_vector = [logits, bbox_reg]
if self.compute_cls_var:
return_vector.append(logits_var)
else:
return_vector.append(None)
if self.compute_bbox_cov:
return_vector.append(bbox_cov)
else:
return_vector.append(None)
return return_vector
| 58,037 | 39.164706 | 127 | py |
pmb-nll | pmb-nll-main/src/probabilistic_modeling/modeling_utils.py | import copy
import math
import torch
from sklearn.mixture._gaussian_mixture import _compute_precision_cholesky
from torch import nn
from torch.distributions import Distribution
from torch.distributions.categorical import Categorical
from torch.distributions.independent import Independent
from torch.distributions.laplace import Laplace
from torch.distributions.mixture_same_family import MixtureSameFamily
from torch.distributions.multivariate_normal import MultivariateNormal
class ClassRegDist(Distribution):
def __init__(
self,
loc,
reg_dist,
reg_kwargs,
probs=None,
logits=None,
independent_reg_dist=False,
):
batch_shape = loc.shape[:-1]
event_shape = torch.Size([1 + loc.shape[-1]])
self.reg_dist = reg_dist(loc, **reg_kwargs)
if independent_reg_dist:
self.reg_dist = Independent(self.reg_dist, 1)
self.cls_dist = Categorical(probs=probs, logits=logits)
self.dist_type = "log_prob"
super().__init__(batch_shape, event_shape, validate_args=False)
def log_prob(self, value):
cls_log_prob = self.cls_dist.log_prob(value[..., -1])
if self.dist_type == "euclidian":
reg_log_prob = -(self.reg_dist.mean - value[..., :-1]).pow(2).sum(-1).sqrt()
elif self.dist_type == "euclidian_squared":
reg_log_prob = -(self.reg_dist.mean - value[..., :-1]).pow(2).sum(-1)
else:
reg_log_prob = self.reg_dist.log_prob(value[..., :-1])
return cls_log_prob + reg_log_prob
def set_dist_mode(self, dist_type):
self.dist_type = dist_type
def unscented_transform(means, chols, anchors, trans_func):
""" Definition 1 in https://arxiv.org/abs/2104.01958
Args:
means (_type_): _description_
chols (_type_): _description_
anchors (_type_): _description_
trans_func (_type_): _description_
Returns:
_type_: _description_
"""
n = means.shape[-1]
kappa = n-3
if len(means.shape) > 2:
old_means_shape = means.shape
means = means.reshape(-1,n)
if len(chols > 3):
old_chol_shape = chols.shape
chols = chols.reshape(-1,n,n)
N = len(means)
weights = torch.ones((1,2*n+1,1), device=means.device)/(2*(n+kappa))
weights[0,0,0] = kappa / (n+kappa)
# means [N, n], chols [N, n, n]
# [N, 1, n]
sigma_points1 = means.unsqueeze(1)
# [N, n, n]
sigma_points2 = means.unsqueeze(1) + math.sqrt(n+kappa)*chols
# [N, n, n]
sigma_points3 = means.unsqueeze(1) - math.sqrt(n+kappa)*chols
# [N, 2n+1, n]
sigma_points = torch.cat((sigma_points1, sigma_points2, sigma_points3), dim=1)
repeated_anchors = anchors.repeat_interleave(len(means)//len(anchors),dim=0).unsqueeze(1).repeat(1,2*n+1,1).reshape(-1,n)
transformed_sigma_points = trans_func(sigma_points.reshape(-1, n), repeated_anchors)
transformed_sigma_points = transformed_sigma_points.reshape(N, 2*n+1, n)
transformed_means = (transformed_sigma_points*weights).sum(dim=1)
residuals = transformed_sigma_points-transformed_means.unsqueeze(1)
# [N, 2n+1, n, 1]
residuals = residuals.unsqueeze(-1)
# [N, n, n]
transformed_covs = (weights.unsqueeze(-1)*residuals@residuals.transpose(-1,-2)).sum(dim=1)
transformed_chols, info = torch.linalg.cholesky_ex(transformed_covs)
if not (info==0).all():
# Clamp to avoid errors
transformed_chols = torch.diag_embed(torch.diagonal(transformed_chols,dim1=-2,dim2=-1).clamp(math.exp(-7),math.exp(10)))+torch.tril(transformed_chols,-1)
print("***************************")
for cov,res,trans_mean,mean,anchor,chol in zip(transformed_covs[info!=0], residuals[info!=0].squeeze(-1), transformed_means[info!=0], means[info!=0], anchors.repeat_interleave(len(means)//len(anchors),dim=0)[info!=0], chols[info!=0]):
print(cov)
print(res)
print(trans_mean)
print(mean)
print(anchor)
print(chol)
print("+++++++++++++++++++++++++++++++++++")
print("***************************")
return transformed_means.reshape(old_means_shape), transformed_chols.reshape(old_chol_shape)
def covariance_output_to_cholesky(pred_bbox_cov):
"""
Transforms output to covariance cholesky decomposition.
Args:
pred_bbox_cov (kx4 or kx10): Output covariance matrix elements.
Returns:
predicted_cov_cholesky (kx4x4): cholesky factor matrix
"""
# Embed diagonal variance
if pred_bbox_cov.shape[0] == 0:
return pred_bbox_cov.reshape((0, 4, 4))
diag_vars = torch.sqrt(torch.exp(pred_bbox_cov[..., :4]))
predicted_cov_cholesky = torch.diag_embed(diag_vars)
if pred_bbox_cov.shape[-1] > 4:
tril_indices = torch.tril_indices(row=4, col=4, offset=-1)
predicted_cov_cholesky[..., tril_indices[0], tril_indices[1]] = pred_bbox_cov[
..., 4:
]
return predicted_cov_cholesky
def clamp_log_variance(pred_bbox_cov, clamp_min=-7.0, clamp_max=10.0):
"""
Tiny function that clamps variance for consistency across all methods.
"""
pred_bbox_var_component = torch.clamp(pred_bbox_cov[..., 0:4], clamp_min, clamp_max)
return torch.cat((pred_bbox_var_component, pred_bbox_cov[..., 4:]), dim=-1)
def get_probabilistic_loss_weight(current_step, annealing_step):
"""
Tiny function to get adaptive probabilistic loss weight for consistency across all methods.
"""
probabilistic_loss_weight = min(1.0, current_step / annealing_step)
probabilistic_loss_weight = (100 ** probabilistic_loss_weight - 1.0) / (100.0 - 1.0)
return probabilistic_loss_weight
def freeze_non_probabilistic_weights(cfg, model):
"""
Tiny function to only keep a small subset of weight non-frozen.
"""
if cfg.MODEL.TRAIN_ONLY_PPP:
print("[NLLOD]: Freezing all non-PPP weights")
for name, p in model.named_parameters():
if "ppp_intensity_function" in name:
p.requires_grad = cfg.MODEL.TRAIN_PPP
else:
p.requires_grad = False
print("[NLLOD]: Froze all non-PPP weights")
elif cfg.MODEL.TRAIN_ONLY_UNCERTAINTY_PREDS:
print("[NLLOD]: Freezing all non-probabilistic weights")
for name, p in model.named_parameters():
if "ppp_intensity_function" in name:
p.requires_grad = cfg.MODEL.TRAIN_PPP
elif "bbox_cov" in name:
p.requires_grad = True
else:
p.requires_grad = False
print("[NLLOD]: Froze all non-probabilistic weights")
else:
for name, p in model.named_parameters():
if "ppp_intensity_function" in name:
p.requires_grad = cfg.MODEL.TRAIN_PPP
class PoissonPointProcessBase(nn.Module):
def __init__(self):
super().__init__()
self.normalize_bboxes = False
def set_normalization_of_bboxes(self, normalize_bboxes):
self.normalize_bboxes = normalize_bboxes
class PoissonPointUnion(PoissonPointProcessBase):
def __init__(self):
super().__init__()
self.ppps = []
def add_ppp(self, ppp):
self.ppps.append(ppp)
def set_normalization_of_bboxes(self, normalize_bboxes):
for ppp in self.ppps:
ppp.normalize_bboxes = normalize_bboxes
def integrate(self, image_sizes, num_classes):
out = 0
for ppp in self.ppps:
out = out + ppp.integrate(image_sizes, num_classes)
return out
def forward(
self,
src,
image_sizes=[],
num_classes=-1,
integrate=False,
src_is_features=False,
dist_type="log_prob",
):
if integrate:
out = self.integrate(image_sizes, num_classes)
return out
outs = []
for ppp in self.ppps:
outs.append(
ppp(src, image_sizes, num_classes, integrate, src_is_features, dist_type)[:, None]
)
outs = torch.cat(outs, 1)
return torch.logsumexp(outs, 1)
class PoissonPointProcessUniform(PoissonPointProcessBase):
def __init__(
self,
class_dist_log,
ppp_rate,
uniform_center_pos,
device=torch.device("cpu"),
):
super().__init__()
if not type(class_dist_log) == torch.Tensor:
class_dist_log = torch.tensor(class_dist_log)
self.class_dist_log = class_dist_log.to(device)
self.ppp_rate = torch.tensor([ppp_rate]).to(device)
self.uniform_center_pos = uniform_center_pos
self.device = device
def forward(
self,
src,
image_sizes=[],
num_classes=-1,
integrate=False,
src_is_features=False,
):
if integrate:
return self.integrate(image_sizes, num_classes)
assert len(image_sizes) == 1
img_size = image_sizes[0].flip(0).repeat(2) # w,h,w,h
cls_log_probs = self.class_dist_log[src[..., -1].long()]
# log(1/(W^2/2) * 1/(H^2/2))
box_log_probs = (-image_sizes[0].log()*2+math.log(2)).sum()
total_log_probs = cls_log_probs + box_log_probs + self.ppp_rate.log()
return total_log_probs
def integrate(self, image_sizes, num_classes):
return self.ppp_rate
class PoissonPointProcessGMM(PoissonPointProcessBase):
def __init__(
self,
gmm,
class_dist_log,
ppp_rate,
uniform_center_pos,
device=torch.device("cpu"),
):
super().__init__()
if not type(class_dist_log) == torch.Tensor:
class_dist_log = torch.tensor(class_dist_log)
self.class_dist_log = class_dist_log.to(device)
self.gmm = gmm
self.ppp_rate = torch.tensor([ppp_rate]).to(device)
self.uniform_center_pos = uniform_center_pos
self.device = device
def forward(
self,
src,
image_sizes=[],
num_classes=-1,
integrate=False,
src_is_features=False,
):
if integrate:
return self.integrate(image_sizes, num_classes)
assert len(image_sizes) == 1
img_size = image_sizes[0].flip(0).repeat(2) # w,h,w,h
scale = torch.diag_embed(img_size).cpu().numpy()
gmm = copy.deepcopy(self.gmm)
boxes = src[..., :-1]
if self.uniform_center_pos:
gmm.means_ = gmm.means_ * img_size.cpu().numpy()[:2]
gmm.covariances_ = scale[:2, :2] @ gmm.covariances_ @ scale[:2, :2].T
gmm.precisions_cholesky_ = _compute_precision_cholesky(
gmm.covariances_, gmm.covariance_type
)
img_area = img_size[0] * img_size[1]
# N, 2 (w,h)
box_sizes = torch.cat(
(
(boxes[..., 2] - boxes[..., 0])[:, None],
(boxes[..., 3] - boxes[..., 1])[:, None],
),
1,
)
box_log_probs = torch.tensor(gmm.score_samples(box_sizes.cpu().numpy())).to(
box_sizes.device
)
box_log_probs = box_log_probs - img_area.log()
else:
gmm.means_ = gmm.means_ * img_size.cpu().numpy()
gmm.covariances_ = scale @ gmm.covariances_ @ scale.T
gmm.precisions_cholesky_ = _compute_precision_cholesky(
gmm.covariances_, gmm.covariance_type
)
box_log_probs = torch.tensor(gmm.score_samples(boxes.cpu().numpy())).to(
boxes.device
)
cls_log_probs = self.class_dist_log[src[..., -1].long()]
total_log_probs = cls_log_probs + box_log_probs + self.ppp_rate.log()
return total_log_probs
def integrate(self, image_sizes, num_classes):
return self.ppp_rate
class ZeroDistribution(PoissonPointProcessBase):
def __init__(self, device=torch.device("cuda"))-> None:
super().__init__()
self.device = device
self.component_distribution = None
def log_prob(self, src, *args, **kwargs):
return torch.tensor(0.0).to(src.device).unsqueeze(0).repeat(len(src)).log()
class PoissonPointProcessIntensityFunction(PoissonPointProcessBase):
"""
Class representing a Poisson Point Process RFS intensity function. Currently assuming DETR/RCNN/RetinaNet.
"""
def __init__(
self, cfg, log_intensity=None, ppp_feature_net=None, predictions=None, device="cuda"
) -> None:
super().__init__()
self.device = device
if cfg.PROBABILISTIC_INFERENCE.PPP_CONFIDENCE_THRES and predictions is not None:
self.ppp_intensity_type = "prediction_mixture"
elif log_intensity is not None:
self.ppp_intensity_type = "uniform"
self.num_classes = 1
else:
self.ppp_intensity_type = (
cfg.MODEL.PROBABILISTIC_MODELING.PPP.INTENSITY_TYPE
)
self.num_classes = cfg.MODEL.ROI_HEADS.NUM_CLASSES
self.ppp_confidence_thres = cfg.PROBABILISTIC_INFERENCE.PPP_CONFIDENCE_THRES
self.ppp_feature_net = ppp_feature_net
if self.ppp_intensity_type == "uniform":
self.ppp_intensity_per_coord = nn.Parameter(
torch.tensor(1.0).to(self.device), requires_grad=True
)
self.log_ppp_intensity_class = nn.Parameter(
torch.tensor(1.0).to(self.device), requires_grad=True
)
if log_intensity is None:
nn.init.constant_(
self.ppp_intensity_per_coord,
cfg.MODEL.PROBABILISTIC_MODELING.PPP.UNIFORM_INTENSITY,
)
nn.init.constant_(
self.log_ppp_intensity_class,
math.log(1 / cfg.MODEL.ROI_HEADS.NUM_CLASSES),
)
else:
nn.init.constant_(self.ppp_intensity_per_coord, log_intensity)
nn.init.constant_(self.log_ppp_intensity_class, 0)
self.log_ppp_intensity_class.requires_grad = False
elif self.ppp_intensity_type == "gaussian_mixture":
num_mixture_comps = cfg.MODEL.PROBABILISTIC_MODELING.PPP.NUM_GAUSS_MIXTURES
cov_type = cfg.MODEL.PROBABILISTIC_MODELING.PPP.COV_TYPE
if cov_type == "diagonal":
cov_dims = 4
elif cov_type == "full":
cov_dims = 10
else:
cov_dims = 4
self.log_gmm_weights = nn.Parameter(
(torch.ones(num_mixture_comps)*0.5).log().to(self.device),
requires_grad=True,
)
nn.init.normal_(self.log_gmm_weights, mean=0, std=0.1)
means = torch.distributions.Normal(torch.tensor([0.5]).to(self.device), scale=torch.tensor([0.16]).to(self.device)).rsample((num_mixture_comps, 4,)).squeeze(-1)
xywh_to_xyxy = torch.tensor([[1,0,-0.5,0],[0,1,0,-0.5],[1,0,0.5,0],[0,1,0,0.5]]).to(self.device)
means = (xywh_to_xyxy@(means.unsqueeze(-1))).squeeze(-1)
means = means.clamp(0,1)
self.gmm_means = nn.Parameter(
means, requires_grad=True
)
self.gmm_chols = nn.Parameter(
torch.zeros(num_mixture_comps, cov_dims).to(self.device), requires_grad=True
)
nn.init.normal_(self.gmm_chols, std=1)
cls_probs = torch.ones(num_mixture_comps, self.num_classes).to(self.device)/self.num_classes + torch.rand((num_mixture_comps, self.num_classes)).to(self.device)*0.1
cls_logits = (cls_probs/(1-cls_probs)).log()
self.class_logits = nn.Parameter(
cls_logits, requires_grad=True
) # these are softmaxed later
#self.mvn = MultivariateNormal(self.gmm_means, scale_tril=self.gmm_chols)
reg_kwargs = {"scale_tril": covariance_output_to_cholesky(self.gmm_chols)}
mixture_dict = {}
mixture_dict["means"] = self.gmm_means
mixture_dict["weights"] = self.log_gmm_weights.exp()
mixture_dict["reg_dist"] = torch.distributions.multivariate_normal.MultivariateNormal
mixture_dict["reg_kwargs"] = reg_kwargs
mixture_dict["cls_probs"] = self.class_logits.softmax(dim=-1)
mixture_dict["reg_dist_type"] = "gaussian"
mixture_dict["covs"] = None
self.mixture_from_predictions(mixture_dict)
elif self.ppp_intensity_type == "prediction_mixture":
if predictions is not None:
self.mixture_from_predictions(predictions)
elif self.ppp_intensity_type == "zero":
self.dist = ZeroDistribution(self.device)
else:
raise NotImplementedError(
f"PPP intensity type {cfg.MODEL.PROBABILISTIC_MODELING.PPP_INTENSITY_TYPE} not implemented."
)
def mixture_from_predictions(self, mixture_dict):
reg_dist_str = mixture_dict["reg_dist_type"]
means = mixture_dict["means"]
covs = mixture_dict["covs"]
weights = mixture_dict["weights"]
cls_probs = mixture_dict["cls_probs"]
reg_kwargs = mixture_dict["reg_kwargs"]
independent_reg_dist = False
reg_dist = mixture_dict["reg_dist"]
if reg_dist_str == "laplacian":
independent_reg_dist = True
if not len(weights):
self.mixture_dist = ZeroDistribution(means.device)
self.ppp_rate = torch.tensor(0.0).to(means.device)
else:
self.mixture_dist = MixtureSameFamily(
Categorical(weights),
ClassRegDist(
means,
reg_dist,
reg_kwargs,
probs=cls_probs,
independent_reg_dist=independent_reg_dist,
),
validate_args=False,
)
self.ppp_rate = weights.sum()
def get_weights(self):
weights = dict()
if self.ppp_intensity_type == "uniform":
weights["ppp_intensity_per_coord"] = self.ppp_intensity_per_coord
weights["log_ppp_intensity_class"] = self.log_ppp_intensity_class
elif self.ppp_intensity_type == "gaussian_mixture":
return weights
weights["log_gmm_weights"] = self.log_gmm_weights
weights["gmm_means"] = self.gmm_means
weights["gmm_covs"] = self.gmm_covs
weights["class_weights"] = self.class_weights
weights["log_class_scaling"] = self.log_class_scaling
return weights
def load_weights(self, weights):
if self.ppp_intensity_type == "uniform":
self.ppp_intensity_per_coord = nn.Parameter(
torch.as_tensor(weights["ppp_intensity_per_coord"])
)
self.log_ppp_intensity_class = nn.Parameter(
torch.as_tensor(weights["log_ppp_intensity_class"])
)
elif self.ppp_intensity_type == "gaussian_mixture":
self.log_gmm_weights = nn.Parameter(
torch.as_tensor(weights["log_gmm_weights"])
)
self.gmm_means = nn.Parameter(torch.as_tensor(weights["gmm_means"]))
self.gmm_covs = nn.Parameter(torch.as_tensor(weights["gmm_covs"]))
self.class_weights = nn.Parameter(torch.as_tensor(weights["class_weights"]))
self.log_class_scaling = nn.Parameter(
torch.as_tensor(weights["log_class_scaling"])
)
self.update_distribution()
def update_distribution(self):
if self.ppp_intensity_type == "gaussian_mixture":
mixture_dict = {}
mixture_dict["means"] = self.gmm_means
mixture_dict["weights"] = self.log_gmm_weights.exp()
mixture_dict["reg_dist"] = torch.distributions.multivariate_normal.MultivariateNormal
mixture_dict["reg_kwargs"] = {"scale_tril": covariance_output_to_cholesky(self.gmm_chols)}
mixture_dict["cls_probs"] = self.class_logits.softmax(dim=-1)
mixture_dict["reg_dist_type"] = "gaussian"
mixture_dict["covs"] = None
self.mixture_from_predictions(mixture_dict)
def forward_features(self, src):
print("[NLLOD] Data dependent PPP not available yet")
return
out = self.ppp_feature_net(src)
if self.ppp_intensity_type == "gaussian_mixture":
pass
# translate output to gmm params
return
def forward(
self,
src,
image_sizes=[],
num_classes=-1,
integrate=False,
src_is_features=False,
dist_type="log_prob"
):
"""Calculate log PPP intensity for given input. If numclasses =! -1, returns integral over intensity
Args:
src ([type]): [description]
image_sizes (list, optional): [description]. Defaults to [].
num_classes (int, optional): [description]. Defaults to -1.
Returns:
[type]: [description]
"""
if src_is_features:
return self.forward_features(src)
if integrate:
return self.integrate(image_sizes, num_classes)
if self.ppp_intensity_type == "uniform":
# Returns log intensity func value
coord_log_prob = self.ppp_intensity_per_coord
if src.shape[-1] > 4:
src = src[..., :4]
# keep gradients trough src, +1 to handle coodinates in zero
out = (src + 1) / (src.detach() + 1) * coord_log_prob
out = out.sum(-1)
class_log_prob = self.log_ppp_intensity_class
out = out + class_log_prob
elif self.ppp_intensity_type == "gaussian_mixture":
if self.normalize_bboxes:
# H,W -> (flip) -> W,H -> (repeat) -> W,H,W,H
box_scaling = 1/image_sizes.flip((-1)).repeat(1,2).float()
class_scaling = torch.ones((len(image_sizes),1)).to(src.device)
# [1, 5]
scaling = torch.cat([box_scaling, class_scaling], dim=-1)
# [num_gt, 5]
scaling = scaling.repeat(src.shape[0],1)
src = src*scaling
else:
scaling = torch.ones_like(src)
if self.mixture_dist.component_distribution:
self.mixture_dist.component_distribution.set_dist_mode(dist_type)
out = self.mixture_dist.log_prob(src)
out = out + self.ppp_rate.log()
out = out + scaling.log().sum(dim=-1)
elif self.ppp_intensity_type == "prediction_mixture":
if self.mixture_dist.component_distribution:
self.mixture_dist.component_distribution.set_dist_mode(dist_type)
out = self.mixture_dist.log_prob(src)
out = out + self.ppp_rate.log()
elif self.ppp_intensity_type == "zero":
out = self.dist.log_prob(src)
return out
def integrate(self, image_sizes, num_classes):
if self.ppp_intensity_type == "uniform":
# Evaluate the integral of the intensity funciton of all possible inputs
coord_log_prob = self.ppp_intensity_per_coord
class_log_prob = self.log_ppp_intensity_class
# Divide by 2 because x1 < x2 and y1 < y2
image_part = torch.log(
image_sizes[:, 0] ** 2 / 2 * image_sizes[:, 1] ** 2 / 2
) + (4 * coord_log_prob)
class_part = math.log(num_classes) + class_log_prob
out = (image_part + class_part).exp()
elif self.ppp_intensity_type == "gaussian_mixture":
out = self.ppp_rate
elif self.ppp_intensity_type == "prediction_mixture":
out = self.ppp_rate
elif self.ppp_intensity_type == "zero":
out = torch.zeros(len(image_sizes)).to(image_sizes.device)
else:
out = torch.zeros(len(image_sizes)).to(image_sizes.device)
return out
| 24,254 | 36.488408 | 242 | py |
pmb-nll | pmb-nll-main/src/probabilistic_modeling/probabilistic_generalized_rcnn.py | import logging
from typing import Dict, List, Optional, Tuple, Union
# Detectron imports
import fvcore.nn.weight_init as weight_init
import numpy as np
import torch
from detectron2.config import configurable
from detectron2.data.detection_utils import convert_image_to_rgb
from detectron2.layers import Conv2d, Linear, ShapeSpec, cat, get_norm
from detectron2.modeling.box_regression import Box2BoxTransform
from detectron2.modeling.meta_arch.build import META_ARCH_REGISTRY
from detectron2.modeling.meta_arch.rcnn import GeneralizedRCNN
from detectron2.modeling.roi_heads import ROI_HEADS_REGISTRY, StandardROIHeads
from detectron2.modeling.roi_heads.box_head import ROI_BOX_HEAD_REGISTRY
from detectron2.modeling.roi_heads.fast_rcnn import fast_rcnn_inference
from detectron2.structures import Boxes, ImageList, Instances
from detectron2.utils.events import get_event_storage
from detectron2.utils.logger import log_first_n
from fvcore.nn import smooth_l1_loss
# Project imports
from probabilistic_inference.inference_utils import get_dir_alphas
from torch import distributions, nn
from torch.nn import functional as F
from probabilistic_modeling.losses import negative_log_likelihood, reshape_box_preds
from probabilistic_modeling.modeling_utils import (
PoissonPointProcessIntensityFunction,
clamp_log_variance,
covariance_output_to_cholesky,
get_probabilistic_loss_weight,
unscented_transform,
PoissonPointUnion,
)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
@META_ARCH_REGISTRY.register()
class ProbabilisticGeneralizedRCNN(GeneralizedRCNN):
"""
Probabilistic GeneralizedRCNN class.
"""
def __init__(self, cfg):
super().__init__(cfg)
# Parse configs
self.cls_var_loss = cfg.MODEL.PROBABILISTIC_MODELING.CLS_VAR_LOSS.NAME
self.compute_cls_var = self.cls_var_loss != "none"
self.cls_var_num_samples = (
cfg.MODEL.PROBABILISTIC_MODELING.CLS_VAR_LOSS.NUM_SAMPLES
)
self.bbox_cov_loss = cfg.MODEL.PROBABILISTIC_MODELING.BBOX_COV_LOSS.NAME
self.compute_bbox_cov = self.bbox_cov_loss != "none"
self.bbox_cov_num_samples = (
cfg.MODEL.PROBABILISTIC_MODELING.BBOX_COV_LOSS.NUM_SAMPLES
)
self.bbox_cov_dist_type = (
cfg.MODEL.PROBABILISTIC_MODELING.BBOX_COV_LOSS.DISTRIBUTION_TYPE
)
self.bbox_cov_type = (
cfg.MODEL.PROBABILISTIC_MODELING.BBOX_COV_LOSS.COVARIANCE_TYPE
)
if self.bbox_cov_type == "diagonal":
# Diagonal covariance matrix has N elements
self.bbox_cov_dims = 4
else:
# Number of elements required to describe an NxN covariance matrix is
# computed as: (N * (N + 1)) / 2
self.bbox_cov_dims = 10
self.dropout_rate = cfg.MODEL.PROBABILISTIC_MODELING.DROPOUT_RATE
self.use_dropout = self.dropout_rate != 0.0
self.num_mc_dropout_runs = -1
if (
self.compute_bbox_cov
and self.bbox_cov_loss == "pmb_negative_log_likelihood"
):
ppp_constructor = lambda x: PoissonPointProcessIntensityFunction(
cfg, **x
)
self.nll_max_num_solutions = (
cfg.MODEL.PROBABILISTIC_MODELING.NLL_MAX_NUM_SOLUTIONS
)
self.current_step = 0
# Define custom probabilistic head
self.roi_heads.box_predictor = ProbabilisticFastRCNNOutputLayers(
cfg,
input_shape=self.roi_heads.box_head.output_shape,
compute_cls_var=self.compute_cls_var,
cls_var_loss=self.cls_var_loss,
cls_var_num_samples=self.cls_var_num_samples,
compute_bbox_cov=self.compute_bbox_cov,
bbox_cov_loss=self.bbox_cov_loss,
bbox_cov_type=self.bbox_cov_type,
bbox_cov_dims=self.bbox_cov_dims,
bbox_cov_num_samples=self.bbox_cov_num_samples,
ppp_constructor=ppp_constructor,
nll_max_num_solutions=self.nll_max_num_solutions,
bbox_cov_dist_type=self.bbox_cov_dist_type,
matching_distance=cfg.MODEL.PROBABILISTIC_MODELING.MATCHING_DISTANCE,
use_prediction_mixture=cfg.MODEL.PROBABILISTIC_MODELING.PPP.USE_PREDICTION_MIXTURE,
)
# Send to device
self.to(self.device)
def get_ppp_intensity_function(self):
return self.roi_heads.box_predictor.ppp_intensity_function
def forward(
self, batched_inputs, return_anchorwise_output=False, num_mc_dropout_runs=-1
):
"""
Args:
batched_inputs: a list, batched outputs of :class:`DatasetMapper` .
Each item in the list contains the inputs for one image.
For now, each item in the list is a dict that contains:
* image: Tensor, image in (C, H, W) format.
* instances (optional): groundtruth :class:`Instances`
* proposals (optional): :class:`Instances`, precomputed proposals.
Other information that's included in the original dicts, such as:
* "height", "width" (int): the output resolution of the model, used in inference.
See :meth:`postprocess` for details.
return_anchorwise_output (bool): returns raw output for probabilistic inference
num_mc_dropout_runs (int): perform efficient monte-carlo dropout runs by running only the head and
not full neural network.
Returns:
dict[str: Tensor]:
mapping from a named loss to a tensor storing the loss. Used during training only.
"""
try:
self.current_step += get_event_storage().iter
except:
self.current_step += 1
if not self.training and num_mc_dropout_runs == -1:
if return_anchorwise_output:
return self.produce_raw_output(batched_inputs)
else:
return self.inference(batched_inputs)
elif self.training and num_mc_dropout_runs > 1:
self.num_mc_dropout_runs = num_mc_dropout_runs
output_list = []
for i in range(num_mc_dropout_runs):
output_list.append(self.produce_raw_output(batched_inputs))
return output_list
images = self.preprocess_image(batched_inputs)
if "instances" in batched_inputs[0]:
gt_instances = [x["instances"].to(self.device) for x in batched_inputs]
elif "targets" in batched_inputs[0]:
log_first_n(
logging.WARN,
"'targets' in the model inputs is now renamed to 'instances'!",
n=10,
)
gt_instances = [x["targets"].to(self.device) for x in batched_inputs]
else:
gt_instances = None
features = self.backbone(images.tensor)
if self.proposal_generator:
proposals, proposal_losses = self.proposal_generator(
images, features, gt_instances
)
else:
assert "proposals" in batched_inputs[0]
proposals = [x["proposals"].to(self.device) for x in batched_inputs]
proposal_losses = {}
_, detector_losses = self.roi_heads(
images, features, proposals, gt_instances, current_step=self.current_step
)
if self.vis_period > 0:
storage = get_event_storage()
if storage.iter % self.vis_period == 0:
# TODO: implement to visualize probabilistic outputs
self.visualize_training(batched_inputs, proposals)
losses = {}
losses.update(detector_losses)
losses.update(proposal_losses)
return losses
def produce_raw_output(self, batched_inputs, detected_instances=None):
"""
Run inference on the given inputs and return proposal-wise output for later postprocessing.
Args:
batched_inputs (list[dict]): same as in :meth:`forward`
detected_instances (None or list[Instances]): if not None, it
contains an `Instances` object per image. The `Instances`
object contains "pred_boxes" and "pred_classes" which are
known boxes in the image.
The inference will then skip the detection of bounding boxes,
and only predict other per-ROI outputs.
Returns:
same as in :meth:`forward`.
"""
raw_output = dict()
images = self.preprocess_image(batched_inputs)
features = self.backbone(images.tensor)
if detected_instances is None:
if self.proposal_generator:
proposals, _ = self.proposal_generator(images, features, None)
else:
assert "proposals" in batched_inputs[0]
proposals = [x["proposals"].to(self.device) for x in batched_inputs]
# Create raw output dictionary
raw_output.update({"proposals": proposals[0]})
results, _ = self.roi_heads(
images,
features,
proposals,
None,
produce_raw_output=True,
num_mc_dropout_runs=self.num_mc_dropout_runs,
)
else:
detected_instances = [x.to(self.device) for x in detected_instances]
results = self.roi_heads.forward_with_given_boxes(
features, detected_instances
)
box_cls, box_delta, box_cls_var, box_reg_var = results
raw_output.update(
{
"box_cls": box_cls,
"box_delta": box_delta,
"box_cls_var": box_cls_var,
"box_reg_var": box_reg_var,
}
)
if (
self.compute_bbox_cov
and self.bbox_cov_loss == "pmb_negative_log_likelihood"
):
ppp_output = (
self.roi_heads.box_predictor.ppp_intensity_function.get_weights()
)
raw_output.update({"ppp": ppp_output})
return raw_output
def visualize_training(self, batched_inputs, proposals):
"""
A function used to visualize images and proposals. It shows ground truth
bounding boxes on the original image and up to 20 top-scoring predicted
object proposals on the original image. Users can implement different
visualization functions for different models.
Args:
batched_inputs (list): a list that contains input to the model.
proposals (list): a list that contains predicted proposals. Both
batched_inputs and proposals should have the same length.
"""
from core.visualization_tools.probabilistic_visualizer import (
ProbabilisticVisualizer as Visualizer,
)
storage = get_event_storage()
max_vis_prop = 20
with torch.no_grad():
self.eval()
predictions = self.produce_raw_output(batched_inputs)
self.train()
predictions = (
predictions["box_cls"],
predictions["box_delta"],
predictions["box_cls_var"],
predictions["box_reg_var"],
)
_, _, _, pred_covs = predictions
boxes = self.roi_heads.box_predictor.predict_boxes(predictions, proposals)
scores = self.roi_heads.box_predictor.predict_probs(predictions, proposals)
image_shapes = [x.image_size for x in proposals]
# Apply NMS without score threshold
instances, kept_idx = fast_rcnn_inference(
boxes,
scores,
image_shapes,
0.0,
self.roi_heads.box_predictor.test_nms_thresh,
self.roi_heads.box_predictor.test_topk_per_image,
)
num_prop_per_image = [len(p) for p in proposals]
pred_covs = pred_covs.split(num_prop_per_image)
pred_covs = [pred_cov[kept] for pred_cov, kept in zip(pred_covs, kept_idx)]
pred_scores = [score[kept] for score, kept in zip(scores, kept_idx)]
pred_boxes = [box[kept] for box, kept in zip(boxes, kept_idx)]
for i, (input, prop) in enumerate(zip(batched_inputs, proposals)):
img = input["image"]
img = convert_image_to_rgb(img.permute(1, 2, 0), self.input_format)
v_gt = Visualizer(img, None)
v_gt = v_gt.overlay_instances(boxes=input["instances"].gt_boxes)
anno_img = v_gt.get_image()
box_size = min(len(prop.proposal_boxes), max_vis_prop)
v_pred = Visualizer(img, None)
boxes = pred_boxes[i][0:box_size, :4].cpu().numpy()
pred_cov_matrix = pred_covs[i][0:box_size, :4]
pred_cov_matrix = clamp_log_variance(pred_cov_matrix)
chol = covariance_output_to_cholesky(pred_cov_matrix)
cov = (
torch.matmul(chol, torch.transpose(chol, -1, -2)).cpu().detach().numpy()
)
v_pred = v_pred.overlay_covariance_instances(
boxes=boxes, covariance_matrices=cov
)
prop_img = v_pred.get_image()
vis_img = np.concatenate((anno_img, prop_img), axis=1)
vis_img = vis_img.transpose(2, 0, 1)
vis_name = "Left: GT bounding boxes; Right: Predicted proposals"
storage.put_image(vis_name, vis_img)
break # only visualize one image in a batch
@ROI_HEADS_REGISTRY.register()
class ProbabilisticROIHeads(StandardROIHeads):
"""
Probabilistic ROI heads, inherit from standard ROI heads so can be used with mask RCNN in theory.
"""
def __init__(self, cfg, input_shape):
super(ProbabilisticROIHeads, self).__init__(cfg, input_shape)
self.is_mc_dropout_inference = False
self.produce_raw_output = False
self.current_step = 0
def forward(
self,
images: ImageList,
features: Dict[str, torch.Tensor],
proposals: List[Instances],
targets: Optional[List[Instances]] = None,
num_mc_dropout_runs=-1,
produce_raw_output=False,
current_step=0.0,
) -> Tuple[List[Instances], Dict[str, torch.Tensor]]:
"""
See :class:`ROIHeads.forward`.
"""
self.is_mc_dropout_inference = num_mc_dropout_runs > 1
self.produce_raw_output = produce_raw_output
self.current_step = current_step
del images
if self.training and not self.is_mc_dropout_inference:
assert targets
proposals = self.label_and_sample_proposals(proposals, targets)
# del targets
if self.training and not self.is_mc_dropout_inference:
losses = self._forward_box(features, proposals, targets)
# Usually the original proposals used by the box head are used by the mask, keypoint
# heads. But when `self.train_on_pred_boxes is True`, proposals will contain boxes
# predicted by the box head.
losses.update(self._forward_mask(features, proposals))
losses.update(self._forward_keypoint(features, proposals))
return proposals, losses
else:
pred_instances = self._forward_box(features, proposals, targets)
if self.produce_raw_output:
return pred_instances, {}
# During inference cascaded prediction is used: the mask and keypoints heads are only
# applied to the top scoring box detections.
pred_instances = self.forward_with_given_boxes(features, pred_instances)
return pred_instances, {}
def _forward_box(
self,
features: Dict[str, torch.Tensor],
proposals: List[Instances],
gt_instances: List[Instances],
) -> Union[Dict[str, torch.Tensor], List[Instances]]:
"""
Forward logic of the box prediction branch. If `self.train_on_pred_boxes is True`,
the function puts predicted boxes in the `proposal_boxes` field of `proposals` argument.
Args:
features (dict[str, Tensor]): mapping from feature map names to tensor.
Same as in :meth:`ROIHeads.forward`.
proposals (list[Instances]): the per-image object proposals with
their matching ground truth.
Each has fields "proposal_boxes", and "objectness_logits",
"gt_classes", "gt_boxes".
Returns:
In training, a dict of losses.
In inference, a list of `Instances`, the predicted instances.
"""
features = [features[f] for f in self.in_features]
box_features = self.box_pooler(features, [x.proposal_boxes for x in proposals])
box_features = self.box_head(box_features)
predictions = self.box_predictor(box_features)
del box_features
if self.produce_raw_output:
return predictions
if self.training:
losses = self.box_predictor.losses(
predictions, proposals, self.current_step, gt_instances
)
# proposals is modified in-place below, so losses must be computed first.
if self.train_on_pred_boxes:
with torch.no_grad():
pred_boxes = self.box_predictor.predict_boxes_for_gt_classes(
predictions, proposals
)
for proposals_per_image, pred_boxes_per_image in zip(
proposals, pred_boxes
):
proposals_per_image.proposal_boxes = Boxes(pred_boxes_per_image)
return losses
else:
pred_instances, _ = self.box_predictor.inference(predictions, proposals)
return pred_instances
class ProbabilisticFastRCNNOutputLayers(nn.Module):
"""
Four linear layers for predicting Fast R-CNN outputs:
(1) proposal-to-detection box regression deltas
(2) classification scores
(3) box regression deltas covariance parameters (if needed)
(4) classification logits variance (if needed)
"""
@configurable
def __init__(
self,
input_shape,
*,
box2box_transform,
num_classes,
cls_agnostic_bbox_reg=False,
smooth_l1_beta=0.0,
test_score_thresh=0.0,
test_nms_thresh=0.5,
test_topk_per_image=100,
compute_cls_var=False,
compute_bbox_cov=False,
bbox_cov_dims=4,
cls_var_loss="none",
cls_var_num_samples=10,
bbox_cov_loss="none",
bbox_cov_type="diagonal",
dropout_rate=0.0,
annealing_step=0,
bbox_cov_num_samples=1000,
ppp_constructor=None,
nll_max_num_solutions=5,
bbox_cov_dist_type=None,
matching_distance="log_prob",
use_prediction_mixture=False,
):
"""
NOTE: this interface is experimental.
Args:
input_shape (ShapeSpec): shape of the input feature to this module
box2box_transform (Box2BoxTransform or Box2BoxTransformRotated):
num_classes (int): number of foreground classes
cls_agnostic_bbox_reg (bool): whether to use class agnostic for bbox regression
smooth_l1_beta (float): transition point from L1 to L2 loss.
test_score_thresh (float): threshold to filter predictions results.
test_nms_thresh (float): NMS threshold for prediction results.
test_topk_per_image (int): number of top predictions to produce per image.
compute_cls_var (bool): compute classification variance
compute_bbox_cov (bool): compute box covariance regression parameters.
bbox_cov_dims (int): 4 for diagonal covariance, 10 for full covariance.
cls_var_loss (str): name of classification variance loss.
cls_var_num_samples (int): number of samples to be used for loss computation. Usually between 10-100.
bbox_cov_loss (str): name of box covariance loss.
bbox_cov_type (str): 'diagonal' or 'full'. This is used to train with loss functions that accept both types.
dropout_rate (float): 0-1, probability of drop.
annealing_step (int): step used for KL-divergence in evidential loss to fully be functional.
ppp_intensity_function (func): function that returns PPP intensity given sample box
nll_max_num_solutions (int): Maximum NLL solutions to consider when computing NLL-PMB loss
"""
super().__init__()
if isinstance(input_shape, int): # some backward compatibility
input_shape = ShapeSpec(channels=input_shape)
self.num_classes = num_classes
input_size = (
input_shape.channels * (input_shape.width or 1) * (input_shape.height or 1)
)
self.compute_cls_var = compute_cls_var
self.compute_bbox_cov = compute_bbox_cov
self.bbox_cov_dims = bbox_cov_dims
self.bbox_cov_num_samples = bbox_cov_num_samples
self.dropout_rate = dropout_rate
self.use_dropout = self.dropout_rate != 0.0
self.cls_var_loss = cls_var_loss
self.cls_var_num_samples = cls_var_num_samples
self.annealing_step = annealing_step
self.bbox_cov_loss = bbox_cov_loss
self.bbox_cov_type = bbox_cov_type
self.bbox_cov_dist_type = bbox_cov_dist_type
# The prediction layer for num_classes foreground classes and one background class
# (hence + 1)
self.cls_score = Linear(input_size, num_classes + 1)
num_bbox_reg_classes = 1.0 if cls_agnostic_bbox_reg else num_classes
box_dim = len(box2box_transform.weights)
self.bbox_pred = Linear(input_size, num_bbox_reg_classes * box_dim)
nn.init.normal_(self.cls_score.weight, std=0.01)
nn.init.normal_(self.bbox_pred.weight, std=0.001)
for l in [self.cls_score, self.bbox_pred]:
nn.init.constant_(l.bias, 0)
if self.compute_cls_var:
self.cls_var = Linear(input_size, num_classes + 1)
nn.init.normal_(self.cls_var.weight, std=0.0001)
nn.init.constant_(self.cls_var.bias, 0)
if self.compute_bbox_cov:
self.bbox_cov = Linear(input_size, num_bbox_reg_classes * bbox_cov_dims)
nn.init.normal_(self.bbox_cov.weight, std=0.0001)
nn.init.constant_(self.bbox_cov.bias, 0.0)
self.box2box_transform = box2box_transform
self.smooth_l1_beta = smooth_l1_beta
self.test_score_thresh = test_score_thresh
self.test_nms_thresh = test_nms_thresh
self.test_topk_per_image = test_topk_per_image
self.ppp_intensity_function = ppp_constructor({"device": device}) if ppp_constructor is not None else None
self.ppp_constructor = ppp_constructor
self.nll_max_num_solutions = nll_max_num_solutions
self.matching_distance = matching_distance
self.use_prediction_mixture = use_prediction_mixture
@classmethod
def from_config(
cls,
cfg,
input_shape,
compute_cls_var,
cls_var_loss,
cls_var_num_samples,
compute_bbox_cov,
bbox_cov_loss,
bbox_cov_type,
bbox_cov_dims,
bbox_cov_num_samples,
ppp_constructor,
nll_max_num_solutions,
):
return {
"input_shape": input_shape,
"box2box_transform": Box2BoxTransform(
weights=cfg.MODEL.ROI_BOX_HEAD.BBOX_REG_WEIGHTS
),
# fmt: off
"num_classes": cfg.MODEL.ROI_HEADS.NUM_CLASSES,
"cls_agnostic_bbox_reg": cfg.MODEL.ROI_BOX_HEAD.CLS_AGNOSTIC_BBOX_REG,
"smooth_l1_beta": cfg.MODEL.ROI_BOX_HEAD.SMOOTH_L1_BETA,
"test_score_thresh": cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST,
"test_nms_thresh": cfg.MODEL.ROI_HEADS.NMS_THRESH_TEST,
"test_topk_per_image": cfg.TEST.DETECTIONS_PER_IMAGE,
"compute_cls_var": compute_cls_var,
"cls_var_loss": cls_var_loss,
"cls_var_num_samples": cls_var_num_samples,
"compute_bbox_cov": compute_bbox_cov,
"bbox_cov_dims": bbox_cov_dims,
"bbox_cov_loss": bbox_cov_loss,
"bbox_cov_type": bbox_cov_type,
"dropout_rate": cfg.MODEL.PROBABILISTIC_MODELING.DROPOUT_RATE,
"annealing_step": cfg.SOLVER.STEPS[1] if cfg.MODEL.PROBABILISTIC_MODELING.ANNEALING_STEP <= 0 else cfg.MODEL.PROBABILISTIC_MODELING.ANNEALING_STEP,
"bbox_cov_num_samples": bbox_cov_num_samples,
"ppp_constructor": ppp_constructor,
"nll_max_num_solutions" : nll_max_num_solutions,
'bbox_cov_dist_type': cfg.MODEL.PROBABILISTIC_MODELING.BBOX_COV_LOSS.COVARIANCE_TYPE,
"use_prediction_mixture": cfg.MODEL.PROBABILISTIC_MODELING.PPP.USE_PREDICTION_MIXTURE
# fmt: on
}
def forward(self, x):
"""
Args:
x: per-region features of shape (N, ...) for N bounding boxes to predict.
Returns:
Tensor: Nx(K+1) logits for each box
Tensor: Nx4 or Nx(Kx4) bounding box regression deltas.
Tensor: Nx(K+1) logits variance for each box.
Tensor: Nx4(10) or Nx(Kx4(10)) covariance matrix parameters. 4 if diagonal, 10 if full.
"""
if x.dim() > 2:
x = torch.flatten(x, start_dim=1)
scores = self.cls_score(x)
proposal_deltas = self.bbox_pred(x)
# Compute logits variance if needed
if self.compute_cls_var:
score_vars = self.cls_var(x)
else:
score_vars = None
# Compute box covariance if needed
if self.compute_bbox_cov:
proposal_covs = self.bbox_cov(x)
else:
proposal_covs = None
return scores, proposal_deltas, score_vars, proposal_covs
def losses(self, predictions, proposals, current_step=0, gt_instances=None):
"""
Args:
predictions: return values of :meth:`forward()`.
proposals (list[Instances]): proposals that match the features
that were used to compute predictions.
current_step: current optimizer step. Used for losses with an annealing component.
gt_instances: list of ground truth instances
Returns:
Dict[str, Tensor]: dict of losses
"""
global device
# Overwrite later
use_nll_loss = False
(
pred_class_logits,
pred_proposal_deltas,
pred_class_logits_var,
pred_proposal_covs,
) = predictions
if len(proposals):
box_type = type(proposals[0].proposal_boxes)
# cat(..., dim=0) concatenates over all images in the batch
proposals_boxes = box_type.cat([p.proposal_boxes for p in proposals])
assert (
not proposals_boxes.tensor.requires_grad
), "Proposals should not require gradients!"
# The following fields should exist only when training.
if proposals[0].has("gt_boxes"):
gt_boxes = box_type.cat([p.gt_boxes for p in proposals])
assert proposals[0].has("gt_classes")
gt_classes = cat([p.gt_classes for p in proposals], dim=0)
else:
proposals_boxes = Boxes(
torch.zeros(0, 4, device=pred_proposal_deltas.device)
)
no_instances = len(proposals) == 0 # no instances found
# Compute Classification Loss
if no_instances:
# TODO 0.0 * pred.sum() is enough since PT1.6
loss_cls = 0.0 * F.cross_entropy(
pred_class_logits,
torch.zeros(0, dtype=torch.long, device=pred_class_logits.device),
reduction="sum",
)
else:
if self.compute_cls_var:
# Compute classification variance according to:
# "What Uncertainties Do We Need in Bayesian Deep Learning for Computer Vision?", NIPS 2017
if self.cls_var_loss == "loss_attenuation":
num_samples = self.cls_var_num_samples
# Compute standard deviation
pred_class_logits_var = torch.sqrt(torch.exp(pred_class_logits_var))
# Produce normal samples using logits as the mean and the standard deviation computed above
# Scales with GPU memory. 12 GB ---> 3 Samples per anchor for
# COCO dataset.
univariate_normal_dists = distributions.normal.Normal(
pred_class_logits, scale=pred_class_logits_var
)
pred_class_stochastic_logits = univariate_normal_dists.rsample(
(num_samples,)
)
pred_class_stochastic_logits = pred_class_stochastic_logits.view(
(
pred_class_stochastic_logits.shape[1] * num_samples,
pred_class_stochastic_logits.shape[2],
-1,
)
)
pred_class_logits = pred_class_stochastic_logits.squeeze(2)
# Produce copies of the target classes to match the number of
# stochastic samples.
gt_classes_target = torch.unsqueeze(gt_classes, 0)
gt_classes_target = torch.repeat_interleave(
gt_classes_target, num_samples, dim=0
).view((gt_classes_target.shape[1] * num_samples, -1))
gt_classes_target = gt_classes_target.squeeze(1)
loss_cls = F.cross_entropy(
pred_class_logits, gt_classes_target, reduction="mean"
)
elif self.cls_var_loss == "evidential":
# ToDo: Currently does not provide any reasonable mAP Results
# (15% mAP)
# Assume dirichlet parameters are output.
alphas = get_dir_alphas(pred_class_logits)
# Get sum of all alphas
dirichlet_s = alphas.sum(1).unsqueeze(1)
# Generate one hot vectors for ground truth
one_hot_vectors = torch.nn.functional.one_hot(
gt_classes, alphas.shape[1]
)
# Compute loss. This loss attempts to put all evidence on the
# correct location.
per_instance_loss = one_hot_vectors * (
torch.digamma(dirichlet_s) - torch.digamma(alphas)
)
# Compute KL divergence regularizer loss
estimated_dirichlet = torch.distributions.dirichlet.Dirichlet(
(alphas - 1.0) * (1.0 - one_hot_vectors) + 1.0
)
uniform_dirichlet = torch.distributions.dirichlet.Dirichlet(
torch.ones_like(one_hot_vectors).type(torch.FloatTensor).to(device)
)
kl_regularization_loss = torch.distributions.kl.kl_divergence(
estimated_dirichlet, uniform_dirichlet
)
# Compute final loss
annealing_multiplier = torch.min(
torch.as_tensor(current_step / self.annealing_step).to(device),
torch.as_tensor(1.0).to(device),
)
per_proposal_loss = (
per_instance_loss.sum(1)
+ annealing_multiplier * kl_regularization_loss
)
# Compute evidence auxiliary loss
evidence_maximization_loss = smooth_l1_loss(
dirichlet_s,
100.0 * torch.ones_like(dirichlet_s).to(device),
beta=self.smooth_l1_beta,
reduction="mean",
)
evidence_maximization_loss *= annealing_multiplier
# Compute final loss
foreground_loss = per_proposal_loss[
(gt_classes >= 0) & (gt_classes < pred_class_logits.shape[1] - 1)
]
background_loss = per_proposal_loss[
gt_classes == pred_class_logits.shape[1] - 1
]
loss_cls = (
torch.mean(foreground_loss) + torch.mean(background_loss)
) / 2 + 0.01 * evidence_maximization_loss
else:
loss_cls = F.cross_entropy(
pred_class_logits, gt_classes, reduction="mean"
)
# Compute regression loss:
if no_instances:
# TODO 0.0 * pred.sum() is enough since PT1.6
loss_box_reg = 0.0 * smooth_l1_loss(
pred_proposal_deltas,
torch.zeros_like(pred_proposal_deltas),
0.0,
reduction="sum",
)
else:
gt_proposal_deltas = self.box2box_transform.get_deltas(
proposals_boxes.tensor, gt_boxes.tensor
)
box_dim = gt_proposal_deltas.size(1) # 4 or 5
cls_agnostic_bbox_reg = pred_proposal_deltas.size(1) == box_dim
device = pred_proposal_deltas.device
bg_class_ind = pred_class_logits.shape[1] - 1
# Box delta loss is only computed between the prediction for the gt class k
# (if 0 <= k < bg_class_ind) and the target; there is no loss defined on predictions
# for non-gt classes and background.
# Empty fg_inds produces a valid loss of zero as long as the size_average
# arg to smooth_l1_loss is False (otherwise it uses torch.mean internally
# and would produce a nan loss).
fg_inds = torch.nonzero(
(gt_classes >= 0) & (gt_classes < bg_class_ind), as_tuple=True
)[0]
if cls_agnostic_bbox_reg:
# pred_proposal_deltas only corresponds to foreground class for
# agnostic
gt_class_cols = torch.arange(box_dim, device=device)
else:
fg_gt_classes = gt_classes[fg_inds]
# pred_proposal_deltas for class k are located in columns [b * k : b * k + b],
# where b is the dimension of box representation (4 or 5)
# Note that compared to Detectron1,
# we do not perform bounding box regression for background
# classes.
gt_class_cols = box_dim * fg_gt_classes[:, None] + torch.arange(
box_dim, device=device
)
gt_covar_class_cols = self.bbox_cov_dims * fg_gt_classes[
:, None
] + torch.arange(self.bbox_cov_dims, device=device)
loss_reg_normalizer = gt_classes.numel()
pred_proposal_deltas = pred_proposal_deltas[fg_inds[:, None], gt_class_cols]
gt_proposals_delta = gt_proposal_deltas[fg_inds]
if self.compute_bbox_cov:
pred_proposal_covs = pred_proposal_covs[
fg_inds[:, None], gt_covar_class_cols
]
pred_proposal_covs = clamp_log_variance(pred_proposal_covs)
if self.bbox_cov_loss == "negative_log_likelihood":
if self.bbox_cov_type == "diagonal":
# Ger foreground proposals.
_proposals_boxes = proposals_boxes.tensor[fg_inds]
# Compute regression negative log likelihood loss according to:
# "What Uncertainties Do We Need in Bayesian Deep Learning for Computer Vision?", NIPS 2017
loss_box_reg = (
0.5
* torch.exp(-pred_proposal_covs)
* smooth_l1_loss(
pred_proposal_deltas,
gt_proposals_delta,
beta=self.smooth_l1_beta,
)
)
loss_covariance_regularize = 0.5 * pred_proposal_covs
loss_box_reg += loss_covariance_regularize
loss_box_reg = torch.sum(loss_box_reg) / loss_reg_normalizer
else:
# Multivariate Gaussian Negative Log Likelihood loss using pytorch
# distributions.multivariate_normal.log_prob()
forecaster_cholesky = covariance_output_to_cholesky(
pred_proposal_covs
)
multivariate_normal_dists = (
distributions.multivariate_normal.MultivariateNormal(
pred_proposal_deltas, scale_tril=forecaster_cholesky
)
)
loss_box_reg = -multivariate_normal_dists.log_prob(
gt_proposals_delta
)
loss_box_reg = torch.sum(loss_box_reg) / loss_reg_normalizer
elif self.bbox_cov_loss == "second_moment_matching":
# Compute regression covariance using second moment
# matching.
loss_box_reg = smooth_l1_loss(
pred_proposal_deltas, gt_proposals_delta, self.smooth_l1_beta
)
errors = pred_proposal_deltas - gt_proposals_delta
if self.bbox_cov_type == "diagonal":
# Handel diagonal case
second_moment_matching_term = smooth_l1_loss(
torch.exp(pred_proposal_covs),
errors ** 2,
beta=self.smooth_l1_beta,
)
loss_box_reg += second_moment_matching_term
loss_box_reg = torch.sum(loss_box_reg) / loss_reg_normalizer
else:
# Handel full covariance case
errors = torch.unsqueeze(errors, 2)
gt_error_covar = torch.matmul(
errors, torch.transpose(errors, 2, 1)
)
# This is the cholesky decomposition of the covariance matrix.
# We reconstruct it from 10 estimated parameters as a
# lower triangular matrix.
forecaster_cholesky = covariance_output_to_cholesky(
pred_proposal_covs
)
predicted_covar = torch.matmul(
forecaster_cholesky,
torch.transpose(forecaster_cholesky, 2, 1),
)
second_moment_matching_term = smooth_l1_loss(
predicted_covar,
gt_error_covar,
beta=self.smooth_l1_beta,
reduction="sum",
)
loss_box_reg = (
torch.sum(loss_box_reg) + second_moment_matching_term
) / loss_reg_normalizer
elif self.bbox_cov_loss == "energy_loss":
forecaster_cholesky = covariance_output_to_cholesky(
pred_proposal_covs
)
# Define per-anchor Distributions
multivariate_normal_dists = (
distributions.multivariate_normal.MultivariateNormal(
pred_proposal_deltas, scale_tril=forecaster_cholesky
)
)
# Define Monte-Carlo Samples
distributions_samples = multivariate_normal_dists.rsample(
(self.bbox_cov_num_samples + 1,)
)
distributions_samples_1 = distributions_samples[
0 : self.bbox_cov_num_samples, :, :
]
distributions_samples_2 = distributions_samples[
1 : self.bbox_cov_num_samples + 1, :, :
]
# Compute energy score
loss_covariance_regularize = (
-smooth_l1_loss(
distributions_samples_1,
distributions_samples_2,
beta=self.smooth_l1_beta,
reduction="sum",
)
/ self.bbox_cov_num_samples
) # Second term
gt_proposals_delta_samples = torch.repeat_interleave(
gt_proposals_delta.unsqueeze(0),
self.bbox_cov_num_samples,
dim=0,
)
loss_first_moment_match = (
2.0
* smooth_l1_loss(
distributions_samples_1,
gt_proposals_delta_samples,
beta=self.smooth_l1_beta,
reduction="sum",
)
/ self.bbox_cov_num_samples
) # First term
# Final Loss
loss_box_reg = (
loss_first_moment_match + loss_covariance_regularize
) / loss_reg_normalizer
elif self.bbox_cov_loss == "pmb_negative_log_likelihood":
losses = self.nll_od_loss_with_nms(
predictions, proposals, gt_instances
)
loss_box_reg = losses["loss_box_reg"]
use_nll_loss = True
else:
raise ValueError(
"Invalid regression loss name {}.".format(self.bbox_cov_loss)
)
# Perform loss annealing. Not really essential in Generalized-RCNN case, but good practice for more
# elaborate regression variance losses.
standard_regression_loss = smooth_l1_loss(
pred_proposal_deltas,
gt_proposals_delta,
self.smooth_l1_beta,
reduction="sum",
)
standard_regression_loss = (
standard_regression_loss / loss_reg_normalizer
)
probabilistic_loss_weight = get_probabilistic_loss_weight(
current_step, self.annealing_step
)
loss_box_reg = (
(1.0 - probabilistic_loss_weight) * standard_regression_loss
+ probabilistic_loss_weight * loss_box_reg
)
if use_nll_loss:
loss_cls = (1.0 - probabilistic_loss_weight) * loss_cls
else:
loss_box_reg = smooth_l1_loss(
pred_proposal_deltas,
gt_proposals_delta,
self.smooth_l1_beta,
reduction="sum",
)
loss_box_reg = loss_box_reg / loss_reg_normalizer
if use_nll_loss:
losses["loss_cls"] = loss_cls
losses["loss_box_reg"] = loss_box_reg
else:
losses = {"loss_cls": loss_cls, "loss_box_reg": loss_box_reg}
return losses
def nll_od_loss_with_nms(
self,
predictions: Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor],
proposals: List[Instances],
gt_instances,
):
if "log_prob" in self.matching_distance and self.matching_distance != "log_prob":
covar_scaling = float(self.matching_distance.split("_")[-1])
matching_distance = "log_prob"
else:
covar_scaling = 1
matching_distance = self.matching_distance
self.ppp_intensity_function.update_distribution()
_, pred_deltas, _, pred_covs = predictions
boxes = self.predict_boxes(predictions, proposals)
scores = self.predict_probs(predictions, proposals)
scores = [score.clamp(1e-6, 1 - 1e-6) for score in scores]
_, num_classes = scores[0].shape
num_classes -= 1 # do not count background class
image_shapes = [x.image_size for x in proposals]
num_prop_per_image = [len(p) for p in proposals]
# Apply NMS without score threshold
instances, kept_idx = fast_rcnn_inference(
boxes,
scores,
image_shapes,
0.0,
self.test_nms_thresh,
self.test_topk_per_image,
)
kept_idx = [k.unique() for k in kept_idx]
pred_covs = pred_covs.split(num_prop_per_image)
pred_deltas = pred_deltas.split(num_prop_per_image)
kept_proposals = [
prop.proposal_boxes.tensor[idx] for prop, idx in zip(proposals, kept_idx)
]
pred_covs = [pred_cov[kept] for pred_cov, kept in zip(pred_covs, kept_idx)]
nll_pred_cov = [
covariance_output_to_cholesky(clamp_log_variance(reshape_box_preds(cov, num_classes)))
for cov in pred_covs
]
nll_scores = [score[kept] for score, kept in zip(scores, kept_idx)]
nll_pred_deltas = [
reshape_box_preds(delta[kept], num_classes)
for delta, kept in zip(pred_deltas, kept_idx)
]
trans_func = lambda x,y: self.box2box_transform.apply_deltas(x,y)
box_means = []
box_chols = []
bs = len(nll_pred_deltas)
for i in range(bs):
box_mean, box_chol = unscented_transform(nll_pred_deltas[i], nll_pred_cov[i], kept_proposals[i], trans_func)
box_means.append(box_mean)
box_chols.append(box_chol)
nll_gt_classes = [instances.gt_classes for instances in gt_instances]
gt_boxes = [instances.gt_boxes.tensor for instances in gt_instances]
if self.bbox_cov_dist_type == "gaussian":
regression_dist = (
lambda x, y: distributions.multivariate_normal.MultivariateNormal(
loc=x, scale_tril=y
)
)
elif self.bbox_cov_dist_type == "laplacian":
regression_dist = lambda x, y: distributions.laplace.Laplace(
loc=x, scale=y.diagonal(dim1=-2, dim2=-1) / np.sqrt(2)
)
else:
raise Exception(
f"Bounding box uncertainty distribution {self.bbox_cov_dist_type} is not available."
)
if self.use_prediction_mixture:
ppps = []
src_boxes_tot = []
src_box_chol_tot = []
src_boxes_deltas_tot = []
src_boxes_deltas_chol_tot = []
src_scores_tot = []
gt_box_deltas = []
for i in range(bs):
image_shape = image_shapes[i]
h,w = image_shape
scaling = torch.tensor([1/w,1/h],device=box_means[i].device).repeat(2)
pred_box_means = box_means[i]*scaling
pred_box_chols = torch.diag_embed(scaling)@box_chols[i]
pred_box_deltas = nll_pred_deltas[i]
pred_box_delta_chols = nll_pred_cov[i]
pred_cls_probs = nll_scores[i]
#max_conf = pred_cls_probs[..., :num_classes].max(dim=1)[0]
max_conf = 1 - pred_cls_probs[..., -1]
ppp_preds_idx = (
max_conf <= self.ppp_intensity_function.ppp_confidence_thres
)
props = kept_proposals[i][ppp_preds_idx.logical_not()]
# Get delta between each GT and proposal, batch-wise
tmp = torch.stack(
[
self.box2box_transform.get_deltas(
props,
gt_boxes[i][j].unsqueeze(0).repeat(len(props), 1),
)
for j in range(len(gt_boxes[i]))
]
)
gt_box_deltas.append(
tmp.permute(1, 0, 2)
) # [gt,pred,boxdim] -> [pred, gt, boxdim]
gt_boxes[i] = gt_boxes[i]*scaling
mixture_dict = {}
mixture_dict["weights"] = max_conf[ppp_preds_idx]
mixture_dict["means"] = pred_box_means[ppp_preds_idx, 0]
selected_chols = pred_box_chols[ppp_preds_idx, 0]
mixture_dict["covs"] = selected_chols@(selected_chols.transpose(-1,-2))
mixture_dict["cls_probs"] = pred_cls_probs[ppp_preds_idx, :self.num_classes]
mixture_dict["reg_dist_type"] = self.bbox_cov_dist_type
if self.bbox_cov_dist_type == "gaussian":
mixture_dict[
"reg_dist"
] = distributions.multivariate_normal.MultivariateNormal
mixture_dict["reg_kwargs"] = {
"scale_tril": selected_chols
}
elif self.bbox_cov_dist_type == "laplacian":
mixture_dict["reg_dist"] = distributions.laplace.Laplace
mixture_dict["reg_kwargs"] = {
"scale": (
selected_chols.diagonal(dim1=-2, dim2=-1)
/ np.sqrt(2)
)
}
loss_ppp = PoissonPointUnion()
loss_ppp.add_ppp(self.ppp_constructor({"predictions": mixture_dict}))
loss_ppp.add_ppp(self.ppp_intensity_function)
mixture_dict = {}
mixture_dict["weights"] = max_conf[ppp_preds_idx]
mixture_dict["means"] = pred_box_means[ppp_preds_idx, 0]
scale_mat = torch.eye(pred_box_chols.shape[-1]).to(pred_box_chols.device)*covar_scaling
scaled_chol = scale_mat@pred_box_chols[ppp_preds_idx, 0]
mixture_dict["covs"] = (scaled_chol)@(scaled_chol.transpose(-1,-2))
mixture_dict["cls_probs"] = pred_cls_probs[ppp_preds_idx, :self.num_classes]
mixture_dict["reg_dist_type"] = self.bbox_cov_dist_type
if self.bbox_cov_dist_type == "gaussian":
mixture_dict[
"reg_dist"
] = distributions.multivariate_normal.MultivariateNormal
mixture_dict["reg_kwargs"] = {
"scale_tril": scaled_chol
}
elif self.bbox_cov_dist_type == "laplacian":
mixture_dict["reg_dist"] = distributions.laplace.Laplace
mixture_dict["reg_kwargs"] = {
"scale": (
(scaled_chol).diagonal(dim1=-2, dim2=-1)
/ np.sqrt(2)
)
}
match_ppp = PoissonPointUnion()
match_ppp.add_ppp(self.ppp_constructor({"predictions": mixture_dict}))
match_ppp.add_ppp(self.ppp_intensity_function)
ppps.append({"matching": match_ppp, "loss": loss_ppp})
src_boxes_tot.append(pred_box_means[ppp_preds_idx.logical_not()])
src_box_chol_tot.append(pred_box_chols[ppp_preds_idx.logical_not()])
src_scores_tot.append(pred_cls_probs[ppp_preds_idx.logical_not()])
src_boxes_deltas_tot.append(pred_box_deltas[ppp_preds_idx.logical_not()])
src_boxes_deltas_chol_tot.append(pred_box_delta_chols[ppp_preds_idx.logical_not()])
nll_pred_deltas = src_boxes_deltas_tot
nll_pred_delta_chols = src_boxes_deltas_chol_tot
nll_pred_boxes = src_boxes_tot
nll_pred_cov = src_box_chol_tot
nll_scores = src_scores_tot
use_target_delta_matching = False
elif self.ppp_intensity_function.ppp_intensity_type == "gaussian_mixture":
ppps = []
src_boxes_tot = []
src_box_chol_tot = []
src_boxes_deltas_tot = []
src_boxes_deltas_chol_tot = []
src_scores_tot = []
gt_box_deltas = []
for i in range(bs):
image_shape = image_shapes[i]
h,w = image_shape
scaling = torch.tensor([1/w,1/h],device=box_means[i].device).repeat(2)
pred_box_means = box_means[i]*scaling
pred_box_chols = torch.diag_embed(scaling)@box_chols[i]
pred_box_deltas = nll_pred_deltas[i]
pred_box_delta_chols = nll_pred_cov[i]
pred_cls_probs = nll_scores[i]
props = kept_proposals[i]
# Get delta between each GT and proposal, batch-wise
tmp = torch.stack(
[
self.box2box_transform.get_deltas(
props,
gt_boxes[i][j].unsqueeze(0).repeat(len(props), 1),
)
for j in range(len(gt_boxes[i]))
]
)
gt_box_deltas.append(
tmp.permute(1, 0, 2)
) # [gt,pred,boxdim] -> [pred, gt, boxdim]
gt_boxes[i] = gt_boxes[i]*scaling
src_boxes_tot.append(pred_box_means)
src_box_chol_tot.append(pred_box_chols)
src_scores_tot.append(pred_cls_probs)
src_boxes_deltas_tot.append(pred_box_deltas)
src_boxes_deltas_chol_tot.append(pred_box_delta_chols)
nll_pred_deltas = src_boxes_deltas_tot
nll_pred_delta_chols = src_boxes_deltas_chol_tot
nll_pred_boxes = src_boxes_tot
nll_pred_cov = src_box_chol_tot
nll_scores = src_scores_tot
use_target_delta_matching = False
ppps = [{"loss": self.ppp_intensity_function, "matching": self.ppp_intensity_function}]*bs
else:
gt_box_deltas = []
for i in range(len(gt_boxes)):
# Get delta between each GT and proposal, batch-wise
tmp = torch.stack(
[
self.box2box_transform.get_deltas(
kept_proposals[i],
gt_boxes[i][j].unsqueeze(0).repeat(len(kept_proposals[i]), 1),
)
for j in range(len(gt_boxes[i]))
]
)
gt_box_deltas.append(
tmp.permute(1, 0, 2)
) # [gt,pred,boxdim] -> [pred, gt, boxdim]
use_target_delta_matching = True
ppps = [{"loss": self.ppp_intensity_function, "matching": self.ppp_intensity_function}]*bs
nll_pred_delta_chols = nll_pred_cov
nll_pred_deltas = nll_pred_deltas
nll_pred_boxes = nll_pred_deltas
nll_pred_cov = nll_pred_cov
nll, associations, decompositions = negative_log_likelihood(
nll_scores,
nll_pred_boxes,
nll_pred_cov,
gt_boxes,
nll_gt_classes,
image_shapes,
regression_dist,
ppps,
self.nll_max_num_solutions,
scores_have_bg_cls=True,
target_deltas=gt_box_deltas,
matching_distance=matching_distance,
use_target_delta_matching=use_target_delta_matching,
pred_deltas=nll_pred_deltas,
pred_delta_chols=nll_pred_delta_chols,
)
# Save some stats
storage = get_event_storage()
num_classes = self.num_classes
mean_variance = np.mean(
[
cov.diagonal(dim1=-2,dim2=-1)
.pow(2)
.mean()
.item()
for cov in nll_pred_cov
if cov.shape[0] > 0
]
)
storage.put_scalar("nll/mean_covariance", mean_variance)
ppp_intens = np.sum([ppp["loss"].integrate(
torch.as_tensor(image_shapes).to(device), num_classes
)
.mean()
.item()
for ppp in ppps
])
storage.put_scalar("nll/ppp_intensity", ppp_intens)
reg_loss = np.mean(
[
np.clip(
decomp["matched_bernoulli_reg"][0]
/ (decomp["num_matched_bernoulli"][0] + 1e-6),
-1e25,
1e25,
)
for decomp in decompositions
]
)
cls_loss_match = np.mean(
[
np.clip(
decomp["matched_bernoulli_cls"][0]
/ (decomp["num_matched_bernoulli"][0] + 1e-6),
-1e25,
1e25,
)
for decomp in decompositions
]
)
cls_loss_no_match = np.mean(
[
np.clip(
decomp["unmatched_bernoulli"][0]
/ (decomp["num_unmatched_bernoulli"][0] + 1e-6),
-1e25,
1e25,
)
for decomp in decompositions
]
)
# Collect all losses
losses = dict()
losses["loss_box_reg"] = nll
# Add losses for logging, these do not propagate gradients
losses["loss_regression"] = torch.tensor(reg_loss).to(nll.device)
losses["loss_cls_matched"] = torch.tensor(cls_loss_match).to(nll.device)
losses["loss_cls_unmatched"] = torch.tensor(cls_loss_no_match).to(nll.device)
return losses
def inference(self, predictions, proposals):
"""
Returns:
list[Instances]: same as `fast_rcnn_inference`.
list[Tensor]: same as `fast_rcnn_inference`.
"""
boxes = self.predict_boxes(predictions, proposals)
scores = self.predict_probs(predictions, proposals)
image_shapes = [x.image_size for x in proposals]
return fast_rcnn_inference(
boxes,
scores,
image_shapes,
self.test_score_thresh,
self.test_nms_thresh,
self.test_topk_per_image,
)
def predict_boxes_for_gt_classes(self, predictions, proposals):
"""
Returns:
list[Tensor]: A list of Tensors of predicted boxes for GT classes in case of
class-specific box head. Element i of the list has shape (Ri, B), where Ri is
the number of predicted objects for image i and B is the box dimension (4 or 5)
"""
if not len(proposals):
return []
scores, proposal_deltas = predictions
proposal_boxes = [p.proposal_boxes for p in proposals]
proposal_boxes = proposal_boxes[0].cat(proposal_boxes).tensor
N, B = proposal_boxes.shape
predict_boxes = self.box2box_transform.apply_deltas(
proposal_deltas, proposal_boxes
) # Nx(KxB)
K = predict_boxes.shape[1] // B
if K > 1:
gt_classes = torch.cat([p.gt_classes for p in proposals], dim=0)
# Some proposals are ignored or have a background class. Their gt_classes
# cannot be used as index.
gt_classes = gt_classes.clamp_(0, K - 1)
predict_boxes = predict_boxes.view(N, K, B)[
torch.arange(N, dtype=torch.long, device=predict_boxes.device),
gt_classes,
]
num_prop_per_image = [len(p) for p in proposals]
return predict_boxes.split(num_prop_per_image)
def predict_boxes(self, predictions, proposals):
"""
Args:
predictions: return values of :meth:`forward()`.
proposals (list[Instances]): proposals that match the features that were
used to compute predictions. The ``proposal_boxes`` field is expected.
Returns:
list[Tensor]: A list of Tensors of predicted class-specific or class-agnostic boxes
for each image. Element i has shape (Ri, K * B) or (Ri, B), where Ri is
the number of predicted objects for image i and B is the box dimension (4 or 5)
"""
if not len(proposals):
return []
_, proposal_deltas, _, _ = predictions
num_prop_per_image = [len(p) for p in proposals]
proposal_boxes = [p.proposal_boxes for p in proposals]
proposal_boxes = proposal_boxes[0].cat(proposal_boxes).tensor
predict_boxes = self.box2box_transform.apply_deltas(
proposal_deltas, proposal_boxes
) # Nx(KxB)
return predict_boxes.split(num_prop_per_image)
def predict_probs(self, predictions, proposals):
"""
Args:
predictions: return values of :meth:`forward()`.
proposals (list[Instances]): proposals that match the features that were
used to compute predictions.
Returns:
list[Tensor]: A list of Tensors of predicted class probabilities for each image.
Element i has shape (Ri, K + 1), where Ri is the number of predicted objects
for image i.
"""
scores, _, _, _ = predictions
num_inst_per_image = [len(p) for p in proposals]
if self.cls_var_loss == "evidential":
alphas = get_dir_alphas(scores)
dirichlet_s = alphas.sum(1).unsqueeze(1)
# Compute probabilities
probs = alphas / dirichlet_s
else:
probs = F.softmax(scores, dim=-1)
return probs.split(num_inst_per_image, dim=0)
# Todo: new detectron interface required copying code. Check for better
# way to inherit from FastRCNNConvFCHead.
@ROI_BOX_HEAD_REGISTRY.register()
class DropoutFastRCNNConvFCHead(nn.Module):
"""
A head with several 3x3 conv layers (each followed by norm & relu) and then
several fc layers (each followed by relu) and dropout.
"""
@configurable
def __init__(
self,
input_shape: ShapeSpec,
*,
conv_dims: List[int],
fc_dims: List[int],
conv_norm="",
dropout_rate,
):
"""
NOTE: this interface is experimental.
Args:
input_shape (ShapeSpec): shape of the input feature.
conv_dims (list[int]): the output dimensions of the conv layers
fc_dims (list[int]): the output dimensions of the fc layers
conv_norm (str or callable): normalization for the conv layers.
See :func:`detectron2.layers.get_norm` for supported types.
dropout_rate (float): p for dropout layer
"""
super().__init__()
assert len(conv_dims) + len(fc_dims) > 0
self.dropout_rate = dropout_rate
self.use_dropout = self.dropout_rate != 0.0
self._output_size = (
input_shape.channels,
input_shape.height,
input_shape.width,
)
self.conv_norm_relus = []
for k, conv_dim in enumerate(conv_dims):
conv = Conv2d(
self._output_size[0],
conv_dim,
kernel_size=3,
padding=1,
bias=not conv_norm,
norm=get_norm(conv_norm, conv_dim),
activation=F.relu,
)
self.add_module("conv{}".format(k + 1), conv)
self.conv_norm_relus.append(conv)
self._output_size = (conv_dim, self._output_size[1], self._output_size[2])
self.fcs = []
self.fcs_dropout = []
for k, fc_dim in enumerate(fc_dims):
fc = Linear(np.prod(self._output_size), fc_dim)
fc_dropout = nn.Dropout(p=self.dropout_rate)
self.add_module("fc{}".format(k + 1), fc)
self.add_module("fc_dropout{}".format(k + 1), fc_dropout)
self.fcs.append(fc)
self.fcs_dropout.append(fc_dropout)
self._output_size = fc_dim
for layer in self.conv_norm_relus:
weight_init.c2_msra_fill(layer)
for layer in self.fcs:
weight_init.c2_xavier_fill(layer)
@classmethod
def from_config(cls, cfg, input_shape):
num_conv = cfg.MODEL.ROI_BOX_HEAD.NUM_CONV
conv_dim = cfg.MODEL.ROI_BOX_HEAD.CONV_DIM
num_fc = cfg.MODEL.ROI_BOX_HEAD.NUM_FC
fc_dim = cfg.MODEL.ROI_BOX_HEAD.FC_DIM
return {
"input_shape": input_shape,
"conv_dims": [conv_dim] * num_conv,
"fc_dims": [fc_dim] * num_fc,
"conv_norm": cfg.MODEL.ROI_BOX_HEAD.NORM,
"dropout_rate": cfg.MODEL.PROBABILISTIC_MODELING.DROPOUT_RATE,
}
def forward(self, x):
for layer in self.conv_norm_relus:
x = layer(x)
if len(self.fcs):
if x.dim() > 2:
x = torch.flatten(x, start_dim=1)
for layer, dropout in zip(self.fcs, self.fcs_dropout):
x = F.relu(dropout(layer(x)))
return x
@property
def output_shape(self):
"""
Returns:
ShapeSpec: the output feature shape
"""
o = self._output_size
if isinstance(o, int):
return ShapeSpec(channels=o)
else:
return ShapeSpec(channels=o[0], height=o[1], width=o[2])
| 66,644 | 40.523364 | 159 | py |
pmb-nll | pmb-nll-main/src/probabilistic_modeling/probabilistic_detr.py | import numpy as np
import torch
import torch.nn.functional as F
# Detectron imports
from detectron2.modeling import META_ARCH_REGISTRY, detector_postprocess
from detectron2.utils.events import get_event_storage
# Detr imports
from models.detr import DETR, MLP, SetCriterion
from torch import distributions, nn
from torch._C import device
from util import box_ops
from util.misc import NestedTensor, accuracy, nested_tensor_from_tensor_list
from probabilistic_modeling.losses import negative_log_likelihood
# Project imports
from probabilistic_modeling.modeling_utils import (
PoissonPointProcessIntensityFunction, clamp_log_variance,
covariance_output_to_cholesky, get_probabilistic_loss_weight, PoissonPointUnion)
@META_ARCH_REGISTRY.register()
class ProbabilisticDetr(META_ARCH_REGISTRY.get("Detr")):
"""
Implement Probabilistic Detr
"""
def __init__(self, cfg):
super().__init__(cfg)
# Parse configs
self.cls_var_loss = cfg.MODEL.PROBABILISTIC_MODELING.CLS_VAR_LOSS.NAME
self.compute_cls_var = self.cls_var_loss != "none"
self.cls_var_num_samples = (
cfg.MODEL.PROBABILISTIC_MODELING.CLS_VAR_LOSS.NUM_SAMPLES
)
self.bbox_cov_loss = cfg.MODEL.PROBABILISTIC_MODELING.BBOX_COV_LOSS.NAME
self.compute_bbox_cov = self.bbox_cov_loss != "none"
self.bbox_cov_num_samples = (
cfg.MODEL.PROBABILISTIC_MODELING.BBOX_COV_LOSS.NUM_SAMPLES
)
self.bbox_cov_dist_type = (
cfg.MODEL.PROBABILISTIC_MODELING.BBOX_COV_LOSS.DISTRIBUTION_TYPE
)
self.bbox_cov_type = (
cfg.MODEL.PROBABILISTIC_MODELING.BBOX_COV_LOSS.COVARIANCE_TYPE
)
if self.bbox_cov_type == "diagonal":
# Diagonal covariance matrix has N elements
self.bbox_cov_dims = 4
else:
# Number of elements required to describe an NxN covariance matrix is
# computed as: (N * (N + 1)) / 2
self.bbox_cov_dims = 10
self.dropout_rate = cfg.MODEL.PROBABILISTIC_MODELING.DROPOUT_RATE
self.use_dropout = self.dropout_rate != 0.0
self.current_step = 0
self.annealing_step = (
cfg.SOLVER.STEPS[0]
if cfg.MODEL.PROBABILISTIC_MODELING.ANNEALING_STEP <= 0
else cfg.MODEL.PROBABILISTIC_MODELING.ANNEALING_STEP
)
if self.bbox_cov_loss == "pmb_negative_log_likelihood":
ppp_intensity_function = lambda x: PoissonPointProcessIntensityFunction(
cfg, device=self.device, **x
)
self.nll_max_num_solutions = (
cfg.MODEL.PROBABILISTIC_MODELING.NLL_MAX_NUM_SOLUTIONS
)
else:
ppp_intensity_function = None
self.nll_max_num_solutions = 0
# Create probabilistic output layers
self.detr = CustomDetr(
self.detr.backbone,
self.detr.transformer,
num_classes=self.num_classes,
num_queries=self.detr.num_queries,
aux_loss=self.detr.aux_loss,
compute_cls_var=self.compute_cls_var,
compute_bbox_cov=self.compute_bbox_cov,
bbox_cov_dims=self.bbox_cov_dims,
)
self.detr.to(self.device)
losses = ["cardinality"]
if self.compute_cls_var:
losses.append("labels_" + self.cls_var_loss)
elif not self.bbox_cov_loss == "pmb_negative_log_likelihood":
losses.append("labels")
if self.compute_bbox_cov:
losses.append("boxes_" + self.bbox_cov_loss)
else:
losses.append("boxes")
# Replace setcriterion with our own implementation
self.criterion = ProbabilisticSetCriterion(
self.num_classes,
matcher=self.criterion.matcher,
weight_dict=self.criterion.weight_dict,
eos_coef=self.criterion.eos_coef,
losses=losses,
nll_max_num_solutions=self.nll_max_num_solutions,
ppp=ppp_intensity_function,
bbox_cov_dist_type=self.bbox_cov_dist_type,
matching_distance=cfg.MODEL.PROBABILISTIC_MODELING.MATCHING_DISTANCE,
use_prediction_mixture=cfg.MODEL.PROBABILISTIC_MODELING.PPP.USE_PREDICTION_MIXTURE,
)
self.criterion.set_bbox_cov_num_samples(self.bbox_cov_num_samples)
self.criterion.set_cls_var_num_samples(self.cls_var_num_samples)
self.criterion.to(self.device)
self.input_format = "RGB"
def get_ppp_intensity_function(self):
return self.criterion.ppp_intensity_function
def forward(self, batched_inputs, return_raw_results=False, is_mc_dropout=False):
"""
Args:
batched_inputs: a list, batched outputs of :class:`DatasetMapper` .
Each item in the list contains the inputs for one image.
For now, each item in the list is a dict that contains:
* image: Tensor, image in (C, H, W) format.
* instances: Instances
Other information that's included in the original dicts, such as:
* "height", "width" (int): the output resolution of the model, used in inference.
See :meth:`postprocess` for details.
return_raw_results (bool): if True return unprocessed results for probabilistic inference.
is_mc_dropout (bool): if True, return unprocessed results even if self.is_training flag is on.
Returns:
dict[str: Tensor]:
mapping from a named loss to a tensor storing the loss. Used during training only.
"""
try:
self.current_step += get_event_storage().iter
except:
self.current_step += 1
images = self.preprocess_image(batched_inputs)
output = self.detr(images)
if self.training and not is_mc_dropout:
gt_instances = [x["instances"].to(self.device) for x in batched_inputs]
targets = self.prepare_targets(gt_instances)
loss_dict = self.criterion(output, targets)
weight_dict = self.criterion.weight_dict
prob_weight = get_probabilistic_loss_weight(
self.current_step, self.annealing_step
)
for k in loss_dict.keys():
if k in weight_dict:
loss_dict[k] *= weight_dict[k]
if not "loss" in k: # some "losses" are here for logging purposes only
probabilistic_loss_weight = 1
elif "nll" in k:
probabilistic_loss_weight = prob_weight
else:
probabilistic_loss_weight = 1 - prob_weight
# uncomment for weighted prob loss
# loss_dict[k] *= probabilistic_loss_weight
return loss_dict
elif return_raw_results:
if (
self.compute_bbox_cov
and self.bbox_cov_loss == "pmb_negative_log_likelihood"
):
output["ppp"] = self.criterion.ppp_intensity_function.get_weights()
return output
else:
box_cls = output["pred_logits"]
box_pred = output["pred_boxes"]
mask_pred = output["pred_masks"] if self.mask_on else None
results = self.inference(box_cls, box_pred, mask_pred, images.image_sizes)
processed_results = []
for results_per_image, input_per_image, image_size in zip(
results, batched_inputs, images.image_sizes
):
height = input_per_image.get("height", image_size[0])
width = input_per_image.get("width", image_size[1])
r = detector_postprocess(results_per_image, height, width)
processed_results.append({"instances": r})
return processed_results
class CustomDetr(DETR):
"""This is the DETR module that performs PROBABILISTIC object detection"""
def __init__(
self,
backbone,
transformer,
num_classes,
num_queries,
aux_loss=False,
compute_cls_var=False,
compute_bbox_cov=False,
bbox_cov_dims=4,
):
super().__init__(backbone, transformer, num_classes, num_queries, aux_loss)
hidden_dim = self.transformer.d_model
self.compute_cls_var = compute_cls_var
if self.compute_cls_var:
self.class_var_embed = nn.Linear(hidden_dim, num_classes + 1)
nn.init.normal_(self.class_var_embed.weight, std=0.0001)
nn.init.constant_(self.class_var_embed.bias, 2 * np.log(0.01))
self.compute_bbox_cov = compute_bbox_cov
if self.compute_bbox_cov:
self.bbox_covar_embed = MLP(hidden_dim, hidden_dim, bbox_cov_dims, 3)
def forward(self, samples: NestedTensor):
if isinstance(samples, (list, torch.Tensor)):
samples = nested_tensor_from_tensor_list(samples)
features, pos = self.backbone(samples)
src, mask = features[-1].decompose()
assert mask is not None
hs = self.transformer(
self.input_proj(src), mask, self.query_embed.weight, pos[-1]
)[0]
outputs_class = self.class_embed(hs)
outputs_coord = self.bbox_embed(hs).sigmoid()
# Only change to detr code happens here. We need to expose the features from
# the transformer to compute variance parameters.
out = {"pred_logits": outputs_class[-1], "pred_boxes": outputs_coord[-1]}
if self.compute_cls_var:
cls_var_out = self.class_var_embed(hs[-1])
out.update({"pred_logits_var": cls_var_out})
if self.compute_bbox_cov:
bbox_cov_out = self.bbox_covar_embed(hs)
out.update({"pred_boxes_cov": bbox_cov_out[-1]})
else:
bbox_cov_out = None
if self.aux_loss:
out["aux_outputs"] = self._set_aux_loss(
outputs_class, outputs_coord, bbox_cov_out
)
return out
def _set_aux_loss(self, outputs_class, outputs_coord, bbox_cov_out=None):
# this is a workaround to make torchscript happy, as torchscript
# doesn't support dictionary with non-homogeneous values, such
# as a dict having both a Tensor and a list.
if bbox_cov_out is None:
return [
{"pred_logits": a, "pred_boxes": b}
for a, b in zip(outputs_class[:-1], outputs_coord[:-1])
]
else:
return [
{"pred_logits": a, "pred_boxes": b, "pred_boxes_cov": c}
for a, b, c in zip(
outputs_class[:-1], outputs_coord[:-1], bbox_cov_out[:-1]
)
]
class ProbabilisticSetCriterion(SetCriterion):
"""
This is custom set criterion to allow probabilistic estimates
"""
def __init__(
self,
num_classes,
matcher,
weight_dict,
eos_coef,
losses,
nll_max_num_solutions,
ppp,
bbox_cov_dist_type,
matching_distance,
use_prediction_mixture,
):
super().__init__(num_classes, matcher, weight_dict, eos_coef, losses)
self.probabilistic_loss_weight = 0.0
self.bbox_cov_num_samples = 1000
self.cls_var_num_samples = 1000
self.nll_max_num_solutions = nll_max_num_solutions
self.ppp_intensity_function = ppp({})
self.ppp_constructor = ppp
self.bbox_cov_dist_type = bbox_cov_dist_type
self.matching_distance = matching_distance
self.use_prediction_mixture = use_prediction_mixture
def set_bbox_cov_num_samples(self, bbox_cov_num_samples):
self.bbox_cov_num_samples = bbox_cov_num_samples
def set_cls_var_num_samples(self, cls_var_num_samples):
self.cls_var_num_samples = cls_var_num_samples
def loss_labels_att(self, outputs, targets, indices, num_boxes, log=True):
"""Classification loss (NLL + Loss attenuation)
targets dicts must contain the key "labels" containing a tensor of dim [nb_target_boxes]
outputs must contain the mean pred_logits and the variance pred_logits_var
"""
if "pred_logits_var" not in outputs:
return self.loss_labels(outputs, targets, indices, num_boxes, log)
assert "pred_logits" in outputs
src_logits = outputs["pred_logits"]
src_logits_var = outputs["pred_logits_var"]
src_logits_var = torch.sqrt(torch.exp(src_logits_var))
univariate_normal_dists = distributions.normal.Normal(
src_logits, scale=src_logits_var
)
pred_class_stochastic_logits = univariate_normal_dists.rsample(
(self.cls_var_num_samples,)
)
pred_class_stochastic_logits = pred_class_stochastic_logits.view(
pred_class_stochastic_logits.shape[1],
pred_class_stochastic_logits.shape[2]
* pred_class_stochastic_logits.shape[0],
-1,
)
idx = self._get_src_permutation_idx(indices)
target_classes_o = torch.cat(
[t["labels"][J] for t, (_, J) in zip(targets, indices)]
)
target_classes = torch.full(
src_logits.shape[:2],
self.num_classes,
dtype=torch.int64,
device=src_logits.device,
)
target_classes[idx] = target_classes_o
target_classes = torch.unsqueeze(target_classes, dim=0)
target_classes = torch.repeat_interleave(
target_classes, self.cls_var_num_samples, dim=0
)
target_classes = target_classes.view(
target_classes.shape[1], target_classes.shape[2] * target_classes.shape[0]
)
loss_ce = F.cross_entropy(
pred_class_stochastic_logits.transpose(1, 2),
target_classes,
self.empty_weight,
)
losses = {"loss_ce": loss_ce}
if log:
# TODO this should probably be a separate loss, not hacked in this
# one here
losses["class_error"] = 100 - accuracy(src_logits[idx], target_classes_o)[0]
return losses
def loss_boxes_var_nll(self, outputs, targets, indices, num_boxes):
"""Compute the losses related to the bounding boxes, the nll probabilistic regression loss and the GIoU loss
targets dicts must contain the key "boxes" containing a tensor of dim [nb_target_boxes, 4]
The target boxes are expected in format (center_x, center_y, w, h), normalized by the image size.
"""
if "pred_boxes_cov" not in outputs:
return self.loss_boxes(outputs, targets, indices, num_boxes)
assert "pred_boxes" in outputs
idx = self._get_src_permutation_idx(indices)
src_boxes = outputs["pred_boxes"][idx]
src_vars = clamp_log_variance(outputs["pred_boxes_cov"][idx])
target_boxes = torch.cat(
[t["boxes"][i] for t, (_, i) in zip(targets, indices)], dim=0
)
loss_bbox = F.l1_loss(src_boxes, target_boxes, reduction="none")
if src_vars.shape[1] == 4:
loss_nll = 0.5 * torch.exp(-src_vars) * loss_bbox + 0.5 * src_vars
else:
forecaster_cholesky = covariance_output_to_cholesky(src_vars)
if forecaster_cholesky.shape[0] != 0:
multivariate_normal_dists = (
distributions.multivariate_normal.MultivariateNormal(
src_boxes, scale_tril=forecaster_cholesky
)
)
loss_nll = -multivariate_normal_dists.log_prob(target_boxes)
else:
loss_nll = loss_bbox
loss_nll_final = loss_nll.sum() / num_boxes
# Collect all losses
losses = dict()
losses["loss_bbox"] = loss_nll_final
# Add iou loss
losses = update_with_iou_loss(losses, src_boxes, target_boxes, num_boxes)
return losses
def loss_boxes_energy(self, outputs, targets, indices, num_boxes):
"""Compute the losses related to the bounding boxes, the energy distance loss and the GIoU loss
targets dicts must contain the key "boxes" containing a tensor of dim [nb_target_boxes, 4]
The target boxes are expected in format (center_x, center_y, w, h), normalized by the image size.
"""
if "pred_boxes_cov" not in outputs:
return self.loss_boxes(outputs, targets, indices, num_boxes)
assert "pred_boxes" in outputs
idx = self._get_src_permutation_idx(indices)
src_boxes = outputs["pred_boxes"][idx]
target_boxes = torch.cat(
[t["boxes"][i] for t, (_, i) in zip(targets, indices)], dim=0
)
# Begin probabilistic loss computation
src_vars = clamp_log_variance(outputs["pred_boxes_cov"][idx])
forecaster_cholesky = covariance_output_to_cholesky(src_vars)
multivariate_normal_dists = (
distributions.multivariate_normal.MultivariateNormal(
src_boxes, scale_tril=forecaster_cholesky
)
)
# Define Monte-Carlo Samples
distributions_samples = multivariate_normal_dists.rsample(
(self.bbox_cov_num_samples + 1,)
)
distributions_samples_1 = distributions_samples[
0 : self.bbox_cov_num_samples, :, :
]
distributions_samples_2 = distributions_samples[
1 : self.bbox_cov_num_samples + 1, :, :
]
# Compute energy score. Smooth L1 loss is preferred in this case to
# maintain the proper scoring properties.
loss_covariance_regularize = (
-F.l1_loss(
distributions_samples_1, distributions_samples_2, reduction="sum"
)
/ self.bbox_cov_num_samples
) # Second term
gt_proposals_delta_samples = torch.repeat_interleave(
target_boxes.unsqueeze(0), self.bbox_cov_num_samples, dim=0
)
loss_first_moment_match = (
2
* F.l1_loss(
distributions_samples_1, gt_proposals_delta_samples, reduction="sum"
)
/ self.bbox_cov_num_samples
) # First term
loss_energy = loss_first_moment_match + loss_covariance_regularize
# Normalize and add losses
loss_energy_final = loss_energy.sum() / num_boxes
# Collect all losses
losses = dict()
losses["loss_bbox"] = loss_energy_final
# Add iou loss
losses = update_with_iou_loss(losses, src_boxes, target_boxes, num_boxes)
return losses
def loss_boxes_smm(self, outputs, targets, indices, num_boxes):
"""Compute the losses related to the bounding boxes, the L1 regression loss, SMM variance and Covariance loss and the GIoU loss
targets dicts must contain the key "boxes" containing a tensor of dim [nb_target_boxes, 4]
The target boxes are expected in format (center_x, center_y, w, h), normalized by the image size.
"""
if "pred_boxes_cov" not in outputs:
return self.loss_boxes(outputs, targets, indices, num_boxes)
assert "pred_boxes" in outputs
idx = self._get_src_permutation_idx(indices)
src_boxes = outputs["pred_boxes"][idx]
target_boxes = torch.cat(
[t["boxes"][i] for t, (_, i) in zip(targets, indices)], dim=0
)
loss_bbox = F.l1_loss(src_boxes, target_boxes, reduction="none")
# Begin probabilistic loss computation
src_vars = clamp_log_variance(outputs["pred_boxes_cov"][idx])
errors = src_boxes - target_boxes
if src_vars.shape[1] == 4:
second_moment_matching_term = F.l1_loss(
torch.exp(src_vars), errors ** 2, reduction="none"
)
else:
errors = torch.unsqueeze(errors, 2)
gt_error_covar = torch.matmul(errors, torch.transpose(errors, 2, 1))
# This is the cholesky decomposition of the covariance matrix.
# We reconstruct it from 10 estimated parameters as a
# lower triangular matrix.
forecaster_cholesky = covariance_output_to_cholesky(src_vars)
predicted_covar = torch.matmul(
forecaster_cholesky, torch.transpose(forecaster_cholesky, 2, 1)
)
second_moment_matching_term = F.l1_loss(
predicted_covar, gt_error_covar, reduction="none"
)
loss_smm = second_moment_matching_term.sum() / num_boxes
# Normalize and add losses
loss_bbox_final = loss_bbox.sum() / num_boxes
loss_smm_final = loss_smm + loss_bbox_final
# Collect all losses
losses = dict()
losses["loss_bbox"] = loss_smm_final
# Add iou loss
losses = update_with_iou_loss(losses, src_boxes, target_boxes, num_boxes)
return losses
def loss_pmb_nll(self, outputs, targets, indices, num_boxes):
if "pred_boxes_cov" not in outputs:
return self.loss_boxes(outputs, targets, indices, num_boxes)
assert "pred_logits" in outputs
src_logits = outputs["pred_logits"]
src_scores = src_logits.softmax(-1).clamp(1e-6, 1 - 1e-6)
num_classes = src_scores.shape[-1] - 1
assert "pred_boxes" in outputs
src_boxes = outputs["pred_boxes"]
src_boxes = src_boxes.unsqueeze(2).repeat(1, 1, num_classes, 1)
assert "pred_boxes_cov" in outputs
src_box_cov = outputs["pred_boxes_cov"]
src_box_chol = covariance_output_to_cholesky(src_box_cov)
src_box_chol = src_box_chol.unsqueeze(2).repeat(1, 1, num_classes, 1, 1)
tgt_classes = [t["labels"] for t in targets]
tgt_boxes = [t["boxes"] for t in targets]
self.ppp_intensity_function.update_distribution()
if self.bbox_cov_dist_type == "gaussian":
regression_dist = (
lambda x, y: distributions.multivariate_normal.MultivariateNormal(
loc=x, scale_tril=y
)
)
elif self.bbox_cov_dist_type == "laplacian":
regression_dist = lambda x, y: distributions.laplace.Laplace(
loc=x, scale=(y.diagonal(dim1=-2, dim2=-1) / np.sqrt(2))
)
else:
raise Exception(
f"Bounding box uncertainty distribution {self.bbox_cov_dist_type} is not available."
)
if "log_prob" in self.matching_distance and self.matching_distance != "log_prob":
covar_scaling = float(self.matching_distance.split("_")[-1])
matching_distance = "log_prob"
else:
covar_scaling = 1
matching_distance = self.matching_distance
bs = src_logits.shape[0]
image_shapes = torch.as_tensor([[1, 1] for i in range(bs)]).to(src_boxes.device)
if self.use_prediction_mixture:
ppps = []
src_boxes_tot = []
src_box_chol_tot = []
src_scores_tot = []
for i in range(bs):
pred_box_means = src_boxes[i]
pred_box_chols = src_box_chol[i]
pred_cls_probs = src_scores[i]
#max_conf = pred_cls_probs[..., :num_classes].max(dim=1)[0]
max_conf = 1 - pred_cls_probs[..., -1]
ppp_preds_idx = (
max_conf <= self.ppp_intensity_function.ppp_confidence_thres
)
mixture_dict = {}
mixture_dict["weights"] = max_conf[ppp_preds_idx]
mixture_dict["means"] = pred_box_means[ppp_preds_idx, 0]
mixture_dict["covs"] = pred_box_chols[ppp_preds_idx, 0]@pred_box_chols[ppp_preds_idx, 0].transpose(-1,-2)
mixture_dict["cls_probs"] = pred_cls_probs[ppp_preds_idx, :num_classes]
mixture_dict["reg_dist_type"] = self.bbox_cov_dist_type
if self.bbox_cov_dist_type == "gaussian":
mixture_dict[
"reg_dist"
] = distributions.multivariate_normal.MultivariateNormal
mixture_dict["reg_kwargs"] = {
"scale_tril": pred_box_chols[ppp_preds_idx, 0]
}
elif self.bbox_cov_dist_type == "laplacian":
mixture_dict["reg_dist"] = distributions.laplace.Laplace
mixture_dict["reg_kwargs"] = {
"scale": (
pred_box_chols[ppp_preds_idx, 0].diagonal(dim1=-2, dim2=-1)
/ np.sqrt(2)
)
}
loss_ppp = PoissonPointUnion()
loss_ppp.add_ppp(self.ppp_constructor({"predictions": mixture_dict}))
loss_ppp.add_ppp(self.ppp_intensity_function)
mixture_dict = {}
mixture_dict["weights"] = max_conf[ppp_preds_idx]
mixture_dict["means"] = pred_box_means[ppp_preds_idx, 0]
scale_mat = torch.eye(pred_box_chols.shape[-1]).to(pred_box_chols.device)*covar_scaling
scaled_cov = scale_mat@pred_box_chols[ppp_preds_idx, 0]
mixture_dict["covs"] = (scaled_cov)@(scaled_cov).transpose(-1,-2)
mixture_dict["cls_probs"] = pred_cls_probs[ppp_preds_idx, :num_classes]
mixture_dict["reg_dist_type"] = self.bbox_cov_dist_type
if self.bbox_cov_dist_type == "gaussian":
mixture_dict[
"reg_dist"
] = distributions.multivariate_normal.MultivariateNormal
mixture_dict["reg_kwargs"] = {
"scale_tril": scale_mat@pred_box_chols[ppp_preds_idx, 0]
}
elif self.bbox_cov_dist_type == "laplacian":
mixture_dict["reg_dist"] = distributions.laplace.Laplace
mixture_dict["reg_kwargs"] = {
"scale": (
(scale_mat@pred_box_chols[ppp_preds_idx, 0]).diagonal(dim1=-2, dim2=-1)
/ np.sqrt(2)
)
}
match_ppp = PoissonPointUnion()
match_ppp.add_ppp(self.ppp_constructor({"predictions": mixture_dict}))
match_ppp.add_ppp(self.ppp_intensity_function)
ppps.append({"matching": match_ppp, "loss": loss_ppp})
src_boxes_tot.append(pred_box_means[ppp_preds_idx.logical_not()])
src_box_chol_tot.append(pred_box_chols[ppp_preds_idx.logical_not()])
src_scores_tot.append(pred_cls_probs[ppp_preds_idx.logical_not()])
src_boxes = src_boxes_tot
src_box_chol = src_box_chol_tot
src_scores = src_scores_tot
elif self.ppp_intensity_function.ppp_intensity_type == "gaussian_mixture":
ppps = [{"loss": self.ppp_intensity_function, "matching": self.ppp_intensity_function}]*bs
else:
ppps = [{"loss": self.ppp_intensity_function, "matching": self.ppp_intensity_function}]*bs
nll, associations, decompositions = negative_log_likelihood(
src_scores,
src_boxes,
src_box_chol,
tgt_boxes,
tgt_classes,
image_shapes,
regression_dist,
ppps,
self.nll_max_num_solutions,
scores_have_bg_cls=True,
matching_distance=matching_distance,
covar_scaling=covar_scaling
)
# Save some stats
storage = get_event_storage()
num_classes = self.num_classes
mean_variance = np.mean(
[
cov.diagonal(dim1=-2,dim2=-1)
.pow(2)
.mean()
.item()
for cov in src_box_chol
if cov.shape[0] > 0
]
)
storage.put_scalar("nll/mean_covariance", mean_variance)
ppp_intens = np.sum([ppp["loss"].integrate(
image_shapes, num_classes
)
.mean()
.item()
for ppp in ppps
])
storage.put_scalar("nll/ppp_intensity", ppp_intens)
reg_loss = np.mean(
[
np.clip(
decomp["matched_bernoulli_reg"][0]
/ (decomp["num_matched_bernoulli"][0] + 1e-6),
-1e25,
1e25,
)
for decomp in decompositions
]
)
cls_loss_match = np.mean(
[
np.clip(
decomp["matched_bernoulli_cls"][0]
/ (decomp["num_matched_bernoulli"][0] + 1e-6),
-1e25,
1e25,
)
for decomp in decompositions
]
)
cls_loss_no_match = np.mean(
[
np.clip(
decomp["unmatched_bernoulli"][0]
/ (decomp["num_unmatched_bernoulli"][0] + 1e-6),
-1e25,
1e25,
)
for decomp in decompositions
]
)
# Collect all losses
losses = dict()
losses["loss_nll"] = nll
# Add losses for logging, these do not propagate gradients
losses["regression_matched_nll"] = torch.tensor(reg_loss).to(nll.device)
losses["cls_matched_nll"] = torch.tensor(cls_loss_match).to(nll.device)
losses["cls_unmatched_nll"] = torch.tensor(cls_loss_no_match).to(nll.device)
# Extract matched boxes
iou_src_boxes = []
iou_target_boxes = []
for i, association in enumerate(associations):
association = torch.as_tensor(association).to(src_boxes[i].device).long()
permutation_association = association[
0, association[0, :, 1] >= 0
] # select all predictions associated with GT
permutation_association = permutation_association[
permutation_association[:, 0] < src_boxes[i].shape[0]
]
iou_src_boxes.append(src_boxes[i][permutation_association[:, 0], 0])
iou_target_boxes.append(tgt_boxes[i][permutation_association[:, 1]])
# Add iou loss
losses = update_with_iou_loss(
losses, torch.cat(iou_src_boxes), torch.cat(iou_target_boxes), num_boxes
)
return losses
def get_loss(self, loss, outputs, targets, indices, num_boxes, **kwargs):
loss_map = {
"labels": self.loss_labels,
"labels_loss_attenuation": self.loss_labels_att,
"cardinality": self.loss_cardinality,
"boxes": self.loss_boxes,
"boxes_negative_log_likelihood": self.loss_boxes_var_nll,
"boxes_energy_loss": self.loss_boxes_energy,
"boxes_second_moment_matching": self.loss_boxes_smm,
"boxes_pmb_negative_log_likelihood": self.loss_pmb_nll,
"masks": self.loss_masks,
}
assert loss in loss_map, f"do you really want to compute {loss} loss?"
return loss_map[loss](outputs, targets, indices, num_boxes, **kwargs)
def update_with_iou_loss(losses, src_boxes, target_boxes, num_boxes):
loss_giou = 1 - torch.diag(
box_ops.generalized_box_iou(
box_ops.box_cxcywh_to_xyxy(src_boxes),
box_ops.box_cxcywh_to_xyxy(target_boxes),
)
)
losses["loss_giou"] = loss_giou.sum() / num_boxes
return losses
| 31,909 | 38.541512 | 135 | py |
pmb-nll | pmb-nll-main/src/detr/main.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import argparse
import datetime
import json
import random
import time
from pathlib import Path
import numpy as np
import torch
from torch.utils.data import DataLoader, DistributedSampler
import datasets
import util.misc as utils
from datasets import build_dataset, get_coco_api_from_dataset
from engine import evaluate, train_one_epoch
from models import build_model
def get_args_parser():
parser = argparse.ArgumentParser('Set transformer detector', add_help=False)
parser.add_argument('--lr', default=1e-4, type=float)
parser.add_argument('--lr_backbone', default=1e-5, type=float)
parser.add_argument('--batch_size', default=2, type=int)
parser.add_argument('--weight_decay', default=1e-4, type=float)
parser.add_argument('--epochs', default=300, type=int)
parser.add_argument('--lr_drop', default=200, type=int)
parser.add_argument('--clip_max_norm', default=0.1, type=float,
help='gradient clipping max norm')
# Model parameters
parser.add_argument('--frozen_weights', type=str, default=None,
help="Path to the pretrained model. If set, only the mask head will be trained")
# * Backbone
parser.add_argument('--backbone', default='resnet50', type=str,
help="Name of the convolutional backbone to use")
parser.add_argument('--dilation', action='store_true',
help="If true, we replace stride with dilation in the last convolutional block (DC5)")
parser.add_argument('--position_embedding', default='sine', type=str, choices=('sine', 'learned'),
help="Type of positional embedding to use on top of the image features")
# * Transformer
parser.add_argument('--enc_layers', default=6, type=int,
help="Number of encoding layers in the transformer")
parser.add_argument('--dec_layers', default=6, type=int,
help="Number of decoding layers in the transformer")
parser.add_argument('--dim_feedforward', default=2048, type=int,
help="Intermediate size of the feedforward layers in the transformer blocks")
parser.add_argument('--hidden_dim', default=256, type=int,
help="Size of the embeddings (dimension of the transformer)")
parser.add_argument('--dropout', default=0.1, type=float,
help="Dropout applied in the transformer")
parser.add_argument('--nheads', default=8, type=int,
help="Number of attention heads inside the transformer's attentions")
parser.add_argument('--num_queries', default=100, type=int,
help="Number of query slots")
parser.add_argument('--pre_norm', action='store_true')
# * Segmentation
parser.add_argument('--masks', action='store_true',
help="Train segmentation head if the flag is provided")
# Loss
parser.add_argument('--no_aux_loss', dest='aux_loss', action='store_false',
help="Disables auxiliary decoding losses (loss at each layer)")
# * Matcher
parser.add_argument('--set_cost_class', default=1, type=float,
help="Class coefficient in the matching cost")
parser.add_argument('--set_cost_bbox', default=5, type=float,
help="L1 box coefficient in the matching cost")
parser.add_argument('--set_cost_giou', default=2, type=float,
help="giou box coefficient in the matching cost")
# * Loss coefficients
parser.add_argument('--mask_loss_coef', default=1, type=float)
parser.add_argument('--dice_loss_coef', default=1, type=float)
parser.add_argument('--bbox_loss_coef', default=5, type=float)
parser.add_argument('--giou_loss_coef', default=2, type=float)
parser.add_argument('--eos_coef', default=0.1, type=float,
help="Relative classification weight of the no-object class")
# dataset parameters
parser.add_argument('--dataset_file', default='coco')
parser.add_argument('--coco_path', type=str)
parser.add_argument('--coco_panoptic_path', type=str)
parser.add_argument('--remove_difficult', action='store_true')
parser.add_argument('--output_dir', default='',
help='path where to save, empty for no saving')
parser.add_argument('--device', default='cuda',
help='device to use for training / testing')
parser.add_argument('--seed', default=42, type=int)
parser.add_argument('--resume', default='', help='resume from checkpoint')
parser.add_argument('--start_epoch', default=0, type=int, metavar='N',
help='start epoch')
parser.add_argument('--eval', action='store_true')
parser.add_argument('--num_workers', default=2, type=int)
# distributed training parameters
parser.add_argument('--world_size', default=1, type=int,
help='number of distributed processes')
parser.add_argument('--dist_url', default='env://', help='url used to set up distributed training')
return parser
def main(args):
utils.init_distributed_mode(args)
print("git:\n {}\n".format(utils.get_sha()))
if args.frozen_weights is not None:
assert args.masks, "Frozen training is meant for segmentation only"
print(args)
device = torch.device(args.device)
# fix the seed for reproducibility
seed = args.seed + utils.get_rank()
torch.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
model, criterion, postprocessors = build_model(args)
model.to(device)
model_without_ddp = model
if args.distributed:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
model_without_ddp = model.module
n_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad)
print('number of params:', n_parameters)
param_dicts = [
{"params": [p for n, p in model_without_ddp.named_parameters() if "backbone" not in n and p.requires_grad]},
{
"params": [p for n, p in model_without_ddp.named_parameters() if "backbone" in n and p.requires_grad],
"lr": args.lr_backbone,
},
]
optimizer = torch.optim.AdamW(param_dicts, lr=args.lr,
weight_decay=args.weight_decay)
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, args.lr_drop)
dataset_train = build_dataset(image_set='train', args=args)
dataset_val = build_dataset(image_set='val', args=args)
if args.distributed:
sampler_train = DistributedSampler(dataset_train)
sampler_val = DistributedSampler(dataset_val, shuffle=False)
else:
sampler_train = torch.utils.data.RandomSampler(dataset_train)
sampler_val = torch.utils.data.SequentialSampler(dataset_val)
batch_sampler_train = torch.utils.data.BatchSampler(
sampler_train, args.batch_size, drop_last=True)
data_loader_train = DataLoader(dataset_train, batch_sampler=batch_sampler_train,
collate_fn=utils.collate_fn, num_workers=args.num_workers)
data_loader_val = DataLoader(dataset_val, args.batch_size, sampler=sampler_val,
drop_last=False, collate_fn=utils.collate_fn, num_workers=args.num_workers)
if args.dataset_file == "coco_panoptic":
# We also evaluate AP during panoptic training, on original coco DS
coco_val = datasets.coco.build("val", args)
base_ds = get_coco_api_from_dataset(coco_val)
else:
base_ds = get_coco_api_from_dataset(dataset_val)
if args.frozen_weights is not None:
checkpoint = torch.load(args.frozen_weights, map_location='cpu')
model_without_ddp.detr.load_state_dict(checkpoint['model'])
output_dir = Path(args.output_dir)
if args.resume:
if args.resume.startswith('https'):
checkpoint = torch.hub.load_state_dict_from_url(
args.resume, map_location='cpu', check_hash=True)
else:
checkpoint = torch.load(args.resume, map_location='cpu')
model_without_ddp.load_state_dict(checkpoint['model'])
if not args.eval and 'optimizer' in checkpoint and 'lr_scheduler' in checkpoint and 'epoch' in checkpoint:
optimizer.load_state_dict(checkpoint['optimizer'])
lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])
args.start_epoch = checkpoint['epoch'] + 1
if args.eval:
test_stats, coco_evaluator = evaluate(model, criterion, postprocessors,
data_loader_val, base_ds, device, args.output_dir)
if args.output_dir:
utils.save_on_master(coco_evaluator.coco_eval["bbox"].eval, output_dir / "eval.pth")
return
print("Start training")
start_time = time.time()
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
sampler_train.set_epoch(epoch)
train_stats = train_one_epoch(
model, criterion, data_loader_train, optimizer, device, epoch,
args.clip_max_norm)
lr_scheduler.step()
if args.output_dir:
checkpoint_paths = [output_dir / 'checkpoint.pth']
# extra checkpoint before LR drop and every 100 epochs
if (epoch + 1) % args.lr_drop == 0 or (epoch + 1) % 100 == 0:
checkpoint_paths.append(output_dir / f'checkpoint{epoch:04}.pth')
for checkpoint_path in checkpoint_paths:
utils.save_on_master({
'model': model_without_ddp.state_dict(),
'optimizer': optimizer.state_dict(),
'lr_scheduler': lr_scheduler.state_dict(),
'epoch': epoch,
'args': args,
}, checkpoint_path)
test_stats, coco_evaluator = evaluate(
model, criterion, postprocessors, data_loader_val, base_ds, device, args.output_dir
)
log_stats = {**{f'train_{k}': v for k, v in train_stats.items()},
**{f'test_{k}': v for k, v in test_stats.items()},
'epoch': epoch,
'n_parameters': n_parameters}
if args.output_dir and utils.is_main_process():
with (output_dir / "log.txt").open("a") as f:
f.write(json.dumps(log_stats) + "\n")
# for evaluation logs
if coco_evaluator is not None:
(output_dir / 'eval').mkdir(exist_ok=True)
if "bbox" in coco_evaluator.coco_eval:
filenames = ['latest.pth']
if epoch % 50 == 0:
filenames.append(f'{epoch:03}.pth')
for name in filenames:
torch.save(coco_evaluator.coco_eval["bbox"].eval,
output_dir / "eval" / name)
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('Training time {}'.format(total_time_str))
if __name__ == '__main__':
parser = argparse.ArgumentParser('DETR training and evaluation script', parents=[get_args_parser()])
args = parser.parse_args()
if args.output_dir:
Path(args.output_dir).mkdir(parents=True, exist_ok=True)
main(args)
| 11,532 | 45.317269 | 116 | py |
pmb-nll | pmb-nll-main/src/detr/engine.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
Train and eval functions used in main.py
"""
import math
import os
import sys
from typing import Iterable
import torch
import util.misc as utils
from datasets.coco_eval import CocoEvaluator
from datasets.panoptic_eval import PanopticEvaluator
def train_one_epoch(model: torch.nn.Module, criterion: torch.nn.Module,
data_loader: Iterable, optimizer: torch.optim.Optimizer,
device: torch.device, epoch: int, max_norm: float = 0):
model.train()
criterion.train()
metric_logger = utils.MetricLogger(delimiter=" ")
metric_logger.add_meter('lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}'))
metric_logger.add_meter('class_error', utils.SmoothedValue(window_size=1, fmt='{value:.2f}'))
header = 'Epoch: [{}]'.format(epoch)
print_freq = 10
for samples, targets in metric_logger.log_every(data_loader, print_freq, header):
samples = samples.to(device)
targets = [{k: v.to(device) for k, v in t.items()} for t in targets]
outputs = model(samples)
loss_dict = criterion(outputs, targets)
weight_dict = criterion.weight_dict
losses = sum(loss_dict[k] * weight_dict[k] for k in loss_dict.keys() if k in weight_dict)
# reduce losses over all GPUs for logging purposes
loss_dict_reduced = utils.reduce_dict(loss_dict)
loss_dict_reduced_unscaled = {f'{k}_unscaled': v
for k, v in loss_dict_reduced.items()}
loss_dict_reduced_scaled = {k: v * weight_dict[k]
for k, v in loss_dict_reduced.items() if k in weight_dict}
losses_reduced_scaled = sum(loss_dict_reduced_scaled.values())
loss_value = losses_reduced_scaled.item()
if not math.isfinite(loss_value):
print("Loss is {}, stopping training".format(loss_value))
print(loss_dict_reduced)
sys.exit(1)
optimizer.zero_grad()
losses.backward()
if max_norm > 0:
torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm)
optimizer.step()
metric_logger.update(loss=loss_value, **loss_dict_reduced_scaled, **loss_dict_reduced_unscaled)
metric_logger.update(class_error=loss_dict_reduced['class_error'])
metric_logger.update(lr=optimizer.param_groups[0]["lr"])
# gather the stats from all processes
metric_logger.synchronize_between_processes()
print("Averaged stats:", metric_logger)
return {k: meter.global_avg for k, meter in metric_logger.meters.items()}
@torch.no_grad()
def evaluate(model, criterion, postprocessors, data_loader, base_ds, device, output_dir):
model.eval()
criterion.eval()
metric_logger = utils.MetricLogger(delimiter=" ")
metric_logger.add_meter('class_error', utils.SmoothedValue(window_size=1, fmt='{value:.2f}'))
header = 'Test:'
iou_types = tuple(k for k in ('segm', 'bbox') if k in postprocessors.keys())
coco_evaluator = CocoEvaluator(base_ds, iou_types)
# coco_evaluator.coco_eval[iou_types[0]].params.iouThrs = [0, 0.1, 0.5, 0.75]
panoptic_evaluator = None
if 'panoptic' in postprocessors.keys():
panoptic_evaluator = PanopticEvaluator(
data_loader.dataset.ann_file,
data_loader.dataset.ann_folder,
output_dir=os.path.join(output_dir, "panoptic_eval"),
)
for samples, targets in metric_logger.log_every(data_loader, 10, header):
samples = samples.to(device)
targets = [{k: v.to(device) for k, v in t.items()} for t in targets]
outputs = model(samples)
loss_dict = criterion(outputs, targets)
weight_dict = criterion.weight_dict
# reduce losses over all GPUs for logging purposes
loss_dict_reduced = utils.reduce_dict(loss_dict)
loss_dict_reduced_scaled = {k: v * weight_dict[k]
for k, v in loss_dict_reduced.items() if k in weight_dict}
loss_dict_reduced_unscaled = {f'{k}_unscaled': v
for k, v in loss_dict_reduced.items()}
metric_logger.update(loss=sum(loss_dict_reduced_scaled.values()),
**loss_dict_reduced_scaled,
**loss_dict_reduced_unscaled)
metric_logger.update(class_error=loss_dict_reduced['class_error'])
orig_target_sizes = torch.stack([t["orig_size"] for t in targets], dim=0)
results = postprocessors['bbox'](outputs, orig_target_sizes)
if 'segm' in postprocessors.keys():
target_sizes = torch.stack([t["size"] for t in targets], dim=0)
results = postprocessors['segm'](results, outputs, orig_target_sizes, target_sizes)
res = {target['image_id'].item(): output for target, output in zip(targets, results)}
if coco_evaluator is not None:
coco_evaluator.update(res)
if panoptic_evaluator is not None:
res_pano = postprocessors["panoptic"](outputs, target_sizes, orig_target_sizes)
for i, target in enumerate(targets):
image_id = target["image_id"].item()
file_name = f"{image_id:012d}.png"
res_pano[i]["image_id"] = image_id
res_pano[i]["file_name"] = file_name
panoptic_evaluator.update(res_pano)
# gather the stats from all processes
metric_logger.synchronize_between_processes()
print("Averaged stats:", metric_logger)
if coco_evaluator is not None:
coco_evaluator.synchronize_between_processes()
if panoptic_evaluator is not None:
panoptic_evaluator.synchronize_between_processes()
# accumulate predictions from all images
if coco_evaluator is not None:
coco_evaluator.accumulate()
coco_evaluator.summarize()
panoptic_res = None
if panoptic_evaluator is not None:
panoptic_res = panoptic_evaluator.summarize()
stats = {k: meter.global_avg for k, meter in metric_logger.meters.items()}
if coco_evaluator is not None:
if 'bbox' in postprocessors.keys():
stats['coco_eval_bbox'] = coco_evaluator.coco_eval['bbox'].stats.tolist()
if 'segm' in postprocessors.keys():
stats['coco_eval_masks'] = coco_evaluator.coco_eval['segm'].stats.tolist()
if panoptic_res is not None:
stats['PQ_all'] = panoptic_res["All"]
stats['PQ_th'] = panoptic_res["Things"]
stats['PQ_st'] = panoptic_res["Stuff"]
return stats, coco_evaluator
| 6,626 | 42.598684 | 103 | py |
pmb-nll | pmb-nll-main/src/detr/hubconf.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import torch
from models.backbone import Backbone, Joiner
from models.detr import DETR, PostProcess
from models.position_encoding import PositionEmbeddingSine
from models.segmentation import DETRsegm, PostProcessPanoptic
from models.transformer import Transformer
dependencies = ["torch", "torchvision"]
def _make_detr(backbone_name: str, dilation=False, num_classes=91, mask=False):
hidden_dim = 256
backbone = Backbone(backbone_name, train_backbone=True, return_interm_layers=mask, dilation=dilation)
pos_enc = PositionEmbeddingSine(hidden_dim // 2, normalize=True)
backbone_with_pos_enc = Joiner(backbone, pos_enc)
backbone_with_pos_enc.num_channels = backbone.num_channels
transformer = Transformer(d_model=hidden_dim, return_intermediate_dec=True)
detr = DETR(backbone_with_pos_enc, transformer, num_classes=num_classes, num_queries=100)
if mask:
return DETRsegm(detr)
return detr
def detr_resnet50(pretrained=False, num_classes=91, return_postprocessor=False):
"""
DETR R50 with 6 encoder and 6 decoder layers.
Achieves 42/62.4 AP/AP50 on COCO val5k.
"""
model = _make_detr("resnet50", dilation=False, num_classes=num_classes)
if pretrained:
checkpoint = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/detr/detr-r50-e632da11.pth", map_location="cpu", check_hash=True
)
model.load_state_dict(checkpoint["model"])
if return_postprocessor:
return model, PostProcess()
return model
def detr_resnet50_dc5(pretrained=False, num_classes=91, return_postprocessor=False):
"""
DETR-DC5 R50 with 6 encoder and 6 decoder layers.
The last block of ResNet-50 has dilation to increase
output resolution.
Achieves 43.3/63.1 AP/AP50 on COCO val5k.
"""
model = _make_detr("resnet50", dilation=True, num_classes=num_classes)
if pretrained:
checkpoint = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/detr/detr-r50-dc5-f0fb7ef5.pth", map_location="cpu", check_hash=True
)
model.load_state_dict(checkpoint["model"])
if return_postprocessor:
return model, PostProcess()
return model
def detr_resnet101(pretrained=False, num_classes=91, return_postprocessor=False):
"""
DETR-DC5 R101 with 6 encoder and 6 decoder layers.
Achieves 43.5/63.8 AP/AP50 on COCO val5k.
"""
model = _make_detr("resnet101", dilation=False, num_classes=num_classes)
if pretrained:
checkpoint = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/detr/detr-r101-2c7b67e5.pth", map_location="cpu", check_hash=True
)
model.load_state_dict(checkpoint["model"])
if return_postprocessor:
return model, PostProcess()
return model
def detr_resnet101_dc5(pretrained=False, num_classes=91, return_postprocessor=False):
"""
DETR-DC5 R101 with 6 encoder and 6 decoder layers.
The last block of ResNet-101 has dilation to increase
output resolution.
Achieves 44.9/64.7 AP/AP50 on COCO val5k.
"""
model = _make_detr("resnet101", dilation=True, num_classes=num_classes)
if pretrained:
checkpoint = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/detr/detr-r101-dc5-a2e86def.pth", map_location="cpu", check_hash=True
)
model.load_state_dict(checkpoint["model"])
if return_postprocessor:
return model, PostProcess()
return model
def detr_resnet50_panoptic(
pretrained=False, num_classes=250, threshold=0.85, return_postprocessor=False
):
"""
DETR R50 with 6 encoder and 6 decoder layers.
Achieves 43.4 PQ on COCO val5k.
threshold is the minimum confidence required for keeping segments in the prediction
"""
model = _make_detr("resnet50", dilation=False, num_classes=num_classes, mask=True)
is_thing_map = {i: i <= 90 for i in range(250)}
if pretrained:
checkpoint = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/detr/detr-r50-panoptic-00ce5173.pth",
map_location="cpu",
check_hash=True,
)
model.load_state_dict(checkpoint["model"])
if return_postprocessor:
return model, PostProcessPanoptic(is_thing_map, threshold=threshold)
return model
def detr_resnet50_dc5_panoptic(
pretrained=False, num_classes=250, threshold=0.85, return_postprocessor=False
):
"""
DETR-DC5 R50 with 6 encoder and 6 decoder layers.
The last block of ResNet-50 has dilation to increase
output resolution.
Achieves 44.6 on COCO val5k.
threshold is the minimum confidence required for keeping segments in the prediction
"""
model = _make_detr("resnet50", dilation=True, num_classes=num_classes, mask=True)
is_thing_map = {i: i <= 90 for i in range(250)}
if pretrained:
checkpoint = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/detr/detr-r50-dc5-panoptic-da08f1b1.pth",
map_location="cpu",
check_hash=True,
)
model.load_state_dict(checkpoint["model"])
if return_postprocessor:
return model, PostProcessPanoptic(is_thing_map, threshold=threshold)
return model
def detr_resnet101_panoptic(
pretrained=False, num_classes=250, threshold=0.85, return_postprocessor=False
):
"""
DETR-DC5 R101 with 6 encoder and 6 decoder layers.
Achieves 45.1 PQ on COCO val5k.
threshold is the minimum confidence required for keeping segments in the prediction
"""
model = _make_detr("resnet101", dilation=False, num_classes=num_classes, mask=True)
is_thing_map = {i: i <= 90 for i in range(250)}
if pretrained:
checkpoint = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/detr/detr-r101-panoptic-40021d53.pth",
map_location="cpu",
check_hash=True,
)
model.load_state_dict(checkpoint["model"])
if return_postprocessor:
return model, PostProcessPanoptic(is_thing_map, threshold=threshold)
return model
| 6,265 | 36.076923 | 117 | py |
pmb-nll | pmb-nll-main/src/detr/test_all.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import io
import unittest
import torch
from torch import nn, Tensor
from typing import List
from models.matcher import HungarianMatcher
from models.position_encoding import PositionEmbeddingSine, PositionEmbeddingLearned
from models.backbone import Backbone, Joiner, BackboneBase
from util import box_ops
from util.misc import nested_tensor_from_tensor_list
from hubconf import detr_resnet50, detr_resnet50_panoptic
# onnxruntime requires python 3.5 or above
try:
import onnxruntime
except ImportError:
onnxruntime = None
class Tester(unittest.TestCase):
def test_box_cxcywh_to_xyxy(self):
t = torch.rand(10, 4)
r = box_ops.box_xyxy_to_cxcywh(box_ops.box_cxcywh_to_xyxy(t))
self.assertLess((t - r).abs().max(), 1e-5)
@staticmethod
def indices_torch2python(indices):
return [(i.tolist(), j.tolist()) for i, j in indices]
def test_hungarian(self):
n_queries, n_targets, n_classes = 100, 15, 91
logits = torch.rand(1, n_queries, n_classes + 1)
boxes = torch.rand(1, n_queries, 4)
tgt_labels = torch.randint(high=n_classes, size=(n_targets,))
tgt_boxes = torch.rand(n_targets, 4)
matcher = HungarianMatcher()
targets = [{'labels': tgt_labels, 'boxes': tgt_boxes}]
indices_single = matcher({'pred_logits': logits, 'pred_boxes': boxes}, targets)
indices_batched = matcher({'pred_logits': logits.repeat(2, 1, 1),
'pred_boxes': boxes.repeat(2, 1, 1)}, targets * 2)
self.assertEqual(len(indices_single[0][0]), n_targets)
self.assertEqual(len(indices_single[0][1]), n_targets)
self.assertEqual(self.indices_torch2python(indices_single),
self.indices_torch2python([indices_batched[0]]))
self.assertEqual(self.indices_torch2python(indices_single),
self.indices_torch2python([indices_batched[1]]))
# test with empty targets
tgt_labels_empty = torch.randint(high=n_classes, size=(0,))
tgt_boxes_empty = torch.rand(0, 4)
targets_empty = [{'labels': tgt_labels_empty, 'boxes': tgt_boxes_empty}]
indices = matcher({'pred_logits': logits.repeat(2, 1, 1),
'pred_boxes': boxes.repeat(2, 1, 1)}, targets + targets_empty)
self.assertEqual(len(indices[1][0]), 0)
indices = matcher({'pred_logits': logits.repeat(2, 1, 1),
'pred_boxes': boxes.repeat(2, 1, 1)}, targets_empty * 2)
self.assertEqual(len(indices[0][0]), 0)
def test_position_encoding_script(self):
m1, m2 = PositionEmbeddingSine(), PositionEmbeddingLearned()
mm1, mm2 = torch.jit.script(m1), torch.jit.script(m2) # noqa
def test_backbone_script(self):
backbone = Backbone('resnet50', True, False, False)
torch.jit.script(backbone) # noqa
def test_model_script_detection(self):
model = detr_resnet50(pretrained=False).eval()
scripted_model = torch.jit.script(model)
x = nested_tensor_from_tensor_list([torch.rand(3, 200, 200), torch.rand(3, 200, 250)])
out = model(x)
out_script = scripted_model(x)
self.assertTrue(out["pred_logits"].equal(out_script["pred_logits"]))
self.assertTrue(out["pred_boxes"].equal(out_script["pred_boxes"]))
def test_model_script_panoptic(self):
model = detr_resnet50_panoptic(pretrained=False).eval()
scripted_model = torch.jit.script(model)
x = nested_tensor_from_tensor_list([torch.rand(3, 200, 200), torch.rand(3, 200, 250)])
out = model(x)
out_script = scripted_model(x)
self.assertTrue(out["pred_logits"].equal(out_script["pred_logits"]))
self.assertTrue(out["pred_boxes"].equal(out_script["pred_boxes"]))
self.assertTrue(out["pred_masks"].equal(out_script["pred_masks"]))
def test_model_detection_different_inputs(self):
model = detr_resnet50(pretrained=False).eval()
# support NestedTensor
x = nested_tensor_from_tensor_list([torch.rand(3, 200, 200), torch.rand(3, 200, 250)])
out = model(x)
self.assertIn('pred_logits', out)
# and 4d Tensor
x = torch.rand(1, 3, 200, 200)
out = model(x)
self.assertIn('pred_logits', out)
# and List[Tensor[C, H, W]]
x = torch.rand(3, 200, 200)
out = model([x])
self.assertIn('pred_logits', out)
def test_warpped_model_script_detection(self):
class WrappedDETR(nn.Module):
def __init__(self, model):
super().__init__()
self.model = model
def forward(self, inputs: List[Tensor]):
sample = nested_tensor_from_tensor_list(inputs)
return self.model(sample)
model = detr_resnet50(pretrained=False)
wrapped_model = WrappedDETR(model)
wrapped_model.eval()
scripted_model = torch.jit.script(wrapped_model)
x = [torch.rand(3, 200, 200), torch.rand(3, 200, 250)]
out = wrapped_model(x)
out_script = scripted_model(x)
self.assertTrue(out["pred_logits"].equal(out_script["pred_logits"]))
self.assertTrue(out["pred_boxes"].equal(out_script["pred_boxes"]))
@unittest.skipIf(onnxruntime is None, 'ONNX Runtime unavailable')
class ONNXExporterTester(unittest.TestCase):
@classmethod
def setUpClass(cls):
torch.manual_seed(123)
def run_model(self, model, inputs_list, tolerate_small_mismatch=False, do_constant_folding=True, dynamic_axes=None,
output_names=None, input_names=None):
model.eval()
onnx_io = io.BytesIO()
# export to onnx with the first input
torch.onnx.export(model, inputs_list[0], onnx_io,
do_constant_folding=do_constant_folding, opset_version=12,
dynamic_axes=dynamic_axes, input_names=input_names, output_names=output_names)
# validate the exported model with onnx runtime
for test_inputs in inputs_list:
with torch.no_grad():
if isinstance(test_inputs, torch.Tensor) or isinstance(test_inputs, list):
test_inputs = (nested_tensor_from_tensor_list(test_inputs),)
test_ouputs = model(*test_inputs)
if isinstance(test_ouputs, torch.Tensor):
test_ouputs = (test_ouputs,)
self.ort_validate(onnx_io, test_inputs, test_ouputs, tolerate_small_mismatch)
def ort_validate(self, onnx_io, inputs, outputs, tolerate_small_mismatch=False):
inputs, _ = torch.jit._flatten(inputs)
outputs, _ = torch.jit._flatten(outputs)
def to_numpy(tensor):
if tensor.requires_grad:
return tensor.detach().cpu().numpy()
else:
return tensor.cpu().numpy()
inputs = list(map(to_numpy, inputs))
outputs = list(map(to_numpy, outputs))
ort_session = onnxruntime.InferenceSession(onnx_io.getvalue())
# compute onnxruntime output prediction
ort_inputs = dict((ort_session.get_inputs()[i].name, inpt) for i, inpt in enumerate(inputs))
ort_outs = ort_session.run(None, ort_inputs)
for i in range(0, len(outputs)):
try:
torch.testing.assert_allclose(outputs[i], ort_outs[i], rtol=1e-03, atol=1e-05)
except AssertionError as error:
if tolerate_small_mismatch:
self.assertIn("(0.00%)", str(error), str(error))
else:
raise
def test_model_onnx_detection(self):
model = detr_resnet50(pretrained=False).eval()
dummy_image = torch.ones(1, 3, 800, 800) * 0.3
model(dummy_image)
# Test exported model on images of different size, or dummy input
self.run_model(
model,
[(torch.rand(1, 3, 750, 800),)],
input_names=["inputs"],
output_names=["pred_logits", "pred_boxes"],
tolerate_small_mismatch=True,
)
@unittest.skip("CI doesn't have enough memory")
def test_model_onnx_detection_panoptic(self):
model = detr_resnet50_panoptic(pretrained=False).eval()
dummy_image = torch.ones(1, 3, 800, 800) * 0.3
model(dummy_image)
# Test exported model on images of different size, or dummy input
self.run_model(
model,
[(torch.rand(1, 3, 750, 800),)],
input_names=["inputs"],
output_names=["pred_logits", "pred_boxes", "pred_masks"],
tolerate_small_mismatch=True,
)
if __name__ == '__main__':
unittest.main()
| 8,804 | 40.928571 | 119 | py |
pmb-nll | pmb-nll-main/src/detr/models/detr.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
DETR model and criterion classes.
"""
import torch
import torch.nn.functional as F
from torch import nn
from util import box_ops
from util.misc import (NestedTensor, nested_tensor_from_tensor_list,
accuracy, get_world_size, interpolate,
is_dist_avail_and_initialized)
from .backbone import build_backbone
from .matcher import build_matcher
from .segmentation import (DETRsegm, PostProcessPanoptic, PostProcessSegm,
dice_loss, sigmoid_focal_loss)
from .transformer import build_transformer
class DETR(nn.Module):
""" This is the DETR module that performs object detection """
def __init__(self, backbone, transformer, num_classes, num_queries, aux_loss=False):
""" Initializes the model.
Parameters:
backbone: torch module of the backbone to be used. See backbone.py
transformer: torch module of the transformer architecture. See transformer.py
num_classes: number of object classes
num_queries: number of object queries, ie detection slot. This is the maximal number of objects
DETR can detect in a single image. For COCO, we recommend 100 queries.
aux_loss: True if auxiliary decoding losses (loss at each decoder layer) are to be used.
"""
super().__init__()
self.num_queries = num_queries
self.transformer = transformer
hidden_dim = transformer.d_model
self.class_embed = nn.Linear(hidden_dim, num_classes + 1)
self.bbox_embed = MLP(hidden_dim, hidden_dim, 4, 3)
self.query_embed = nn.Embedding(num_queries, hidden_dim)
self.input_proj = nn.Conv2d(backbone.num_channels, hidden_dim, kernel_size=1)
self.backbone = backbone
self.aux_loss = aux_loss
def forward(self, samples: NestedTensor):
""" The forward expects a NestedTensor, which consists of:
- samples.tensor: batched images, of shape [batch_size x 3 x H x W]
- samples.mask: a binary mask of shape [batch_size x H x W], containing 1 on padded pixels
It returns a dict with the following elements:
- "pred_logits": the classification logits (including no-object) for all queries.
Shape= [batch_size x num_queries x (num_classes + 1)]
- "pred_boxes": The normalized boxes coordinates for all queries, represented as
(center_x, center_y, height, width). These values are normalized in [0, 1],
relative to the size of each individual image (disregarding possible padding).
See PostProcess for information on how to retrieve the unnormalized bounding box.
- "aux_outputs": Optional, only returned when auxilary losses are activated. It is a list of
dictionnaries containing the two above keys for each decoder layer.
"""
if isinstance(samples, (list, torch.Tensor)):
samples = nested_tensor_from_tensor_list(samples)
features, pos = self.backbone(samples)
src, mask = features[-1].decompose()
assert mask is not None
hs = self.transformer(self.input_proj(src), mask, self.query_embed.weight, pos[-1])[0]
outputs_class = self.class_embed(hs)
outputs_coord = self.bbox_embed(hs).sigmoid()
out = {'pred_logits': outputs_class[-1], 'pred_boxes': outputs_coord[-1]}
if self.aux_loss:
out['aux_outputs'] = self._set_aux_loss(outputs_class, outputs_coord)
return out
@torch.jit.unused
def _set_aux_loss(self, outputs_class, outputs_coord):
# this is a workaround to make torchscript happy, as torchscript
# doesn't support dictionary with non-homogeneous values, such
# as a dict having both a Tensor and a list.
return [{'pred_logits': a, 'pred_boxes': b}
for a, b in zip(outputs_class[:-1], outputs_coord[:-1])]
class SetCriterion(nn.Module):
""" This class computes the loss for DETR.
The process happens in two steps:
1) we compute hungarian assignment between ground truth boxes and the outputs of the model
2) we supervise each pair of matched ground-truth / prediction (supervise class and box)
"""
def __init__(self, num_classes, matcher, weight_dict, eos_coef, losses):
""" Create the criterion.
Parameters:
num_classes: number of object categories, omitting the special no-object category
matcher: module able to compute a matching between targets and proposals
weight_dict: dict containing as key the names of the losses and as values their relative weight.
eos_coef: relative classification weight applied to the no-object category
losses: list of all the losses to be applied. See get_loss for list of available losses.
"""
super().__init__()
self.num_classes = num_classes
self.matcher = matcher
self.weight_dict = weight_dict
self.eos_coef = eos_coef
self.losses = losses
empty_weight = torch.ones(self.num_classes + 1)
empty_weight[-1] = self.eos_coef
self.register_buffer('empty_weight', empty_weight)
def loss_labels(self, outputs, targets, indices, num_boxes, log=True):
"""Classification loss (NLL)
targets dicts must contain the key "labels" containing a tensor of dim [nb_target_boxes]
"""
assert 'pred_logits' in outputs
src_logits = outputs['pred_logits']
idx = self._get_src_permutation_idx(indices)
target_classes_o = torch.cat([t["labels"][J] for t, (_, J) in zip(targets, indices)])
target_classes = torch.full(src_logits.shape[:2], self.num_classes,
dtype=torch.int64, device=src_logits.device)
target_classes[idx] = target_classes_o
loss_ce = F.cross_entropy(src_logits.transpose(1, 2), target_classes, self.empty_weight)
losses = {'loss_ce': loss_ce}
if log:
# TODO this should probably be a separate loss, not hacked in this one here
losses['class_error'] = 100 - accuracy(src_logits[idx], target_classes_o)[0]
return losses
@torch.no_grad()
def loss_cardinality(self, outputs, targets, indices, num_boxes):
""" Compute the cardinality error, ie the absolute error in the number of predicted non-empty boxes
This is not really a loss, it is intended for logging purposes only. It doesn't propagate gradients
"""
pred_logits = outputs['pred_logits']
device = pred_logits.device
tgt_lengths = torch.as_tensor([len(v["labels"]) for v in targets], device=device)
# Count the number of predictions that are NOT "no-object" (which is the last class)
card_pred = (pred_logits.argmax(-1) != pred_logits.shape[-1] - 1).sum(1)
card_err = F.l1_loss(card_pred.float(), tgt_lengths.float())
losses = {'cardinality_error': card_err}
return losses
def loss_boxes(self, outputs, targets, indices, num_boxes):
"""Compute the losses related to the bounding boxes, the L1 regression loss and the GIoU loss
targets dicts must contain the key "boxes" containing a tensor of dim [nb_target_boxes, 4]
The target boxes are expected in format (center_x, center_y, w, h), normalized by the image size.
"""
assert 'pred_boxes' in outputs
idx = self._get_src_permutation_idx(indices)
src_boxes = outputs['pred_boxes'][idx]
target_boxes = torch.cat([t['boxes'][i] for t, (_, i) in zip(targets, indices)], dim=0)
loss_bbox = F.l1_loss(src_boxes, target_boxes, reduction='none')
losses = {}
losses['loss_bbox'] = loss_bbox.sum() / num_boxes
loss_giou = 1 - torch.diag(box_ops.generalized_box_iou(
box_ops.box_cxcywh_to_xyxy(src_boxes),
box_ops.box_cxcywh_to_xyxy(target_boxes)))
losses['loss_giou'] = loss_giou.sum() / num_boxes
return losses
def loss_masks(self, outputs, targets, indices, num_boxes):
"""Compute the losses related to the masks: the focal loss and the dice loss.
targets dicts must contain the key "masks" containing a tensor of dim [nb_target_boxes, h, w]
"""
assert "pred_masks" in outputs
src_idx = self._get_src_permutation_idx(indices)
tgt_idx = self._get_tgt_permutation_idx(indices)
src_masks = outputs["pred_masks"]
src_masks = src_masks[src_idx]
masks = [t["masks"] for t in targets]
# TODO use valid to mask invalid areas due to padding in loss
target_masks, valid = nested_tensor_from_tensor_list(masks).decompose()
target_masks = target_masks.to(src_masks)
target_masks = target_masks[tgt_idx]
# upsample predictions to the target size
src_masks = interpolate(src_masks[:, None], size=target_masks.shape[-2:],
mode="bilinear", align_corners=False)
src_masks = src_masks[:, 0].flatten(1)
target_masks = target_masks.flatten(1)
target_masks = target_masks.view(src_masks.shape)
losses = {
"loss_mask": sigmoid_focal_loss(src_masks, target_masks, num_boxes),
"loss_dice": dice_loss(src_masks, target_masks, num_boxes),
}
return losses
def _get_src_permutation_idx(self, indices):
# permute predictions following indices
batch_idx = torch.cat([torch.full_like(src, i) for i, (src, _) in enumerate(indices)])
src_idx = torch.cat([src for (src, _) in indices])
return batch_idx, src_idx
def _get_tgt_permutation_idx(self, indices):
# permute targets following indices
batch_idx = torch.cat([torch.full_like(tgt, i) for i, (_, tgt) in enumerate(indices)])
tgt_idx = torch.cat([tgt for (_, tgt) in indices])
return batch_idx, tgt_idx
def get_loss(self, loss, outputs, targets, indices, num_boxes, **kwargs):
loss_map = {
'labels': self.loss_labels,
'cardinality': self.loss_cardinality,
'boxes': self.loss_boxes,
'masks': self.loss_masks
}
assert loss in loss_map, f'do you really want to compute {loss} loss?'
return loss_map[loss](outputs, targets, indices, num_boxes, **kwargs)
def forward(self, outputs, targets):
""" This performs the loss computation.
Parameters:
outputs: dict of tensors, see the output specification of the model for the format
targets: list of dicts, such that len(targets) == batch_size.
The expected keys in each dict depends on the losses applied, see each loss' doc
"""
outputs_without_aux = {k: v for k, v in outputs.items() if k != 'aux_outputs'}
# Retrieve the matching between the outputs of the last layer and the targets
indices = self.matcher(outputs_without_aux, targets)
# Compute the average number of target boxes accross all nodes, for normalization purposes
num_boxes = sum(len(t["labels"]) for t in targets)
num_boxes = torch.as_tensor([num_boxes], dtype=torch.float, device=next(iter(outputs.values())).device)
if is_dist_avail_and_initialized():
torch.distributed.all_reduce(num_boxes)
num_boxes = torch.clamp(num_boxes / get_world_size(), min=1).item()
# Compute all the requested losses
losses = {}
for loss in self.losses:
losses.update(self.get_loss(loss, outputs, targets, indices, num_boxes))
# In case of auxiliary losses, we repeat this process with the output of each intermediate layer.
if 'aux_outputs' in outputs:
for i, aux_outputs in enumerate(outputs['aux_outputs']):
indices = self.matcher(aux_outputs, targets)
for loss in self.losses:
if loss == 'masks':
# Intermediate masks losses are too costly to compute, we ignore them.
continue
kwargs = {}
if loss == 'labels':
# Logging is enabled only for the last layer
kwargs = {'log': False}
l_dict = self.get_loss(loss, aux_outputs, targets, indices, num_boxes, **kwargs)
l_dict = {k + f'_{i}': v for k, v in l_dict.items()}
losses.update(l_dict)
return losses
class PostProcess(nn.Module):
""" This module converts the model's output into the format expected by the coco api"""
@torch.no_grad()
def forward(self, outputs, target_sizes):
""" Perform the computation
Parameters:
outputs: raw outputs of the model
target_sizes: tensor of dimension [batch_size x 2] containing the size of each images of the batch
For evaluation, this must be the original image size (before any data augmentation)
For visualization, this should be the image size after data augment, but before padding
"""
out_logits, out_bbox = outputs['pred_logits'], outputs['pred_boxes']
assert len(out_logits) == len(target_sizes)
assert target_sizes.shape[1] == 2
prob = F.softmax(out_logits, -1)
scores, labels = prob[..., :-1].max(-1)
# convert to [x0, y0, x1, y1] format
boxes = box_ops.box_cxcywh_to_xyxy(out_bbox)
# and from relative [0, 1] to absolute [0, height] coordinates
img_h, img_w = target_sizes.unbind(1)
scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1)
boxes = boxes * scale_fct[:, None, :]
results = [{'scores': s, 'labels': l, 'boxes': b} for s, l, b in zip(scores, labels, boxes)]
return results
class MLP(nn.Module):
""" Very simple multi-layer perceptron (also called FFN)"""
def __init__(self, input_dim, hidden_dim, output_dim, num_layers):
super().__init__()
self.num_layers = num_layers
h = [hidden_dim] * (num_layers - 1)
self.layers = nn.ModuleList(nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim]))
def forward(self, x):
for i, layer in enumerate(self.layers):
x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x)
return x
def build(args):
# the `num_classes` naming here is somewhat misleading.
# it indeed corresponds to `max_obj_id + 1`, where max_obj_id
# is the maximum id for a class in your dataset. For example,
# COCO has a max_obj_id of 90, so we pass `num_classes` to be 91.
# As another example, for a dataset that has a single class with id 1,
# you should pass `num_classes` to be 2 (max_obj_id + 1).
# For more details on this, check the following discussion
# https://github.com/facebookresearch/detr/issues/108#issuecomment-650269223
num_classes = 20 if args.dataset_file != 'coco' else 91
if args.dataset_file == "coco_panoptic":
# for panoptic, we just add a num_classes that is large enough to hold
# max_obj_id + 1, but the exact value doesn't really matter
num_classes = 250
device = torch.device(args.device)
backbone = build_backbone(args)
transformer = build_transformer(args)
model = DETR(
backbone,
transformer,
num_classes=num_classes,
num_queries=args.num_queries,
aux_loss=args.aux_loss,
)
if args.masks:
model = DETRsegm(model, freeze_detr=(args.frozen_weights is not None))
matcher = build_matcher(args)
weight_dict = {'loss_ce': 1, 'loss_bbox': args.bbox_loss_coef}
weight_dict['loss_giou'] = args.giou_loss_coef
if args.masks:
weight_dict["loss_mask"] = args.mask_loss_coef
weight_dict["loss_dice"] = args.dice_loss_coef
# TODO this is a hack
if args.aux_loss:
aux_weight_dict = {}
for i in range(args.dec_layers - 1):
aux_weight_dict.update({k + f'_{i}': v for k, v in weight_dict.items()})
weight_dict.update(aux_weight_dict)
losses = ['labels', 'boxes', 'cardinality']
if args.masks:
losses += ["masks"]
criterion = SetCriterion(num_classes, matcher=matcher, weight_dict=weight_dict,
eos_coef=args.eos_coef, losses=losses)
criterion.to(device)
postprocessors = {'bbox': PostProcess()}
if args.masks:
postprocessors['segm'] = PostProcessSegm()
if args.dataset_file == "coco_panoptic":
is_thing_map = {i: i <= 90 for i in range(201)}
postprocessors["panoptic"] = PostProcessPanoptic(is_thing_map, threshold=0.85)
return model, criterion, postprocessors
| 17,088 | 46.469444 | 113 | py |
pmb-nll | pmb-nll-main/src/detr/models/matcher.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
Modules to compute the matching cost and solve the corresponding LSAP.
"""
import torch
from scipy.optimize import linear_sum_assignment
from torch import nn
from util.box_ops import box_cxcywh_to_xyxy, generalized_box_iou
class HungarianMatcher(nn.Module):
"""This class computes an assignment between the targets and the predictions of the network
For efficiency reasons, the targets don't include the no_object. Because of this, in general,
there are more predictions than targets. In this case, we do a 1-to-1 matching of the best predictions,
while the others are un-matched (and thus treated as non-objects).
"""
def __init__(self, cost_class: float = 1, cost_bbox: float = 1, cost_giou: float = 1):
"""Creates the matcher
Params:
cost_class: This is the relative weight of the classification error in the matching cost
cost_bbox: This is the relative weight of the L1 error of the bounding box coordinates in the matching cost
cost_giou: This is the relative weight of the giou loss of the bounding box in the matching cost
"""
super().__init__()
self.cost_class = cost_class
self.cost_bbox = cost_bbox
self.cost_giou = cost_giou
assert cost_class != 0 or cost_bbox != 0 or cost_giou != 0, "all costs cant be 0"
@torch.no_grad()
def forward(self, outputs, targets):
""" Performs the matching
Params:
outputs: This is a dict that contains at least these entries:
"pred_logits": Tensor of dim [batch_size, num_queries, num_classes] with the classification logits
"pred_boxes": Tensor of dim [batch_size, num_queries, 4] with the predicted box coordinates
targets: This is a list of targets (len(targets) = batch_size), where each target is a dict containing:
"labels": Tensor of dim [num_target_boxes] (where num_target_boxes is the number of ground-truth
objects in the target) containing the class labels
"boxes": Tensor of dim [num_target_boxes, 4] containing the target box coordinates
Returns:
A list of size batch_size, containing tuples of (index_i, index_j) where:
- index_i is the indices of the selected predictions (in order)
- index_j is the indices of the corresponding selected targets (in order)
For each batch element, it holds:
len(index_i) = len(index_j) = min(num_queries, num_target_boxes)
"""
bs, num_queries = outputs["pred_logits"].shape[:2]
# We flatten to compute the cost matrices in a batch
out_prob = outputs["pred_logits"].flatten(0, 1).softmax(-1) # [batch_size * num_queries, num_classes]
out_bbox = outputs["pred_boxes"].flatten(0, 1) # [batch_size * num_queries, 4]
# Also concat the target labels and boxes
tgt_ids = torch.cat([v["labels"] for v in targets])
tgt_bbox = torch.cat([v["boxes"] for v in targets])
# Compute the classification cost. Contrary to the loss, we don't use the NLL,
# but approximate it in 1 - proba[target class].
# The 1 is a constant that doesn't change the matching, it can be ommitted.
cost_class = -out_prob[:, tgt_ids]
# Compute the L1 cost between boxes
cost_bbox = torch.cdist(out_bbox, tgt_bbox, p=1)
# Compute the giou cost betwen boxes
cost_giou = -generalized_box_iou(box_cxcywh_to_xyxy(out_bbox), box_cxcywh_to_xyxy(tgt_bbox))
# Final cost matrix
C = self.cost_bbox * cost_bbox + self.cost_class * cost_class + self.cost_giou * cost_giou
C = C.view(bs, num_queries, -1).cpu()
sizes = [len(v["boxes"]) for v in targets]
indices = [linear_sum_assignment(c[i]) for i, c in enumerate(C.split(sizes, -1))]
return [(torch.as_tensor(i, dtype=torch.int64), torch.as_tensor(j, dtype=torch.int64)) for i, j in indices]
def build_matcher(args):
return HungarianMatcher(cost_class=args.set_cost_class, cost_bbox=args.set_cost_bbox, cost_giou=args.set_cost_giou)
| 4,250 | 47.862069 | 119 | py |
pmb-nll | pmb-nll-main/src/detr/models/segmentation.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
This file provides the definition of the convolutional heads used to predict masks, as well as the losses
"""
import io
from collections import defaultdict
from typing import List, Optional
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
from PIL import Image
import util.box_ops as box_ops
from util.misc import NestedTensor, interpolate, nested_tensor_from_tensor_list
try:
from panopticapi.utils import id2rgb, rgb2id
except ImportError:
pass
class DETRsegm(nn.Module):
def __init__(self, detr, freeze_detr=False):
super().__init__()
self.detr = detr
if freeze_detr:
for p in self.parameters():
p.requires_grad_(False)
hidden_dim, nheads = detr.transformer.d_model, detr.transformer.nhead
self.bbox_attention = MHAttentionMap(hidden_dim, hidden_dim, nheads, dropout=0.0)
self.mask_head = MaskHeadSmallConv(hidden_dim + nheads, [1024, 512, 256], hidden_dim)
def forward(self, samples: NestedTensor):
if isinstance(samples, (list, torch.Tensor)):
samples = nested_tensor_from_tensor_list(samples)
features, pos = self.detr.backbone(samples)
bs = features[-1].tensors.shape[0]
src, mask = features[-1].decompose()
assert mask is not None
src_proj = self.detr.input_proj(src)
hs, memory = self.detr.transformer(src_proj, mask, self.detr.query_embed.weight, pos[-1])
outputs_class = self.detr.class_embed(hs)
outputs_coord = self.detr.bbox_embed(hs).sigmoid()
out = {"pred_logits": outputs_class[-1], "pred_boxes": outputs_coord[-1]}
if self.detr.aux_loss:
out['aux_outputs'] = self.detr._set_aux_loss(outputs_class, outputs_coord)
# FIXME h_boxes takes the last one computed, keep this in mind
bbox_mask = self.bbox_attention(hs[-1], memory, mask=mask)
seg_masks = self.mask_head(src_proj, bbox_mask, [features[2].tensors, features[1].tensors, features[0].tensors])
outputs_seg_masks = seg_masks.view(bs, self.detr.num_queries, seg_masks.shape[-2], seg_masks.shape[-1])
out["pred_masks"] = outputs_seg_masks
return out
def _expand(tensor, length: int):
return tensor.unsqueeze(1).repeat(1, int(length), 1, 1, 1).flatten(0, 1)
class MaskHeadSmallConv(nn.Module):
"""
Simple convolutional head, using group norm.
Upsampling is done using a FPN approach
"""
def __init__(self, dim, fpn_dims, context_dim):
super().__init__()
inter_dims = [dim, context_dim // 2, context_dim // 4, context_dim // 8, context_dim // 16, context_dim // 64]
self.lay1 = torch.nn.Conv2d(dim, dim, 3, padding=1)
self.gn1 = torch.nn.GroupNorm(8, dim)
self.lay2 = torch.nn.Conv2d(dim, inter_dims[1], 3, padding=1)
self.gn2 = torch.nn.GroupNorm(8, inter_dims[1])
self.lay3 = torch.nn.Conv2d(inter_dims[1], inter_dims[2], 3, padding=1)
self.gn3 = torch.nn.GroupNorm(8, inter_dims[2])
self.lay4 = torch.nn.Conv2d(inter_dims[2], inter_dims[3], 3, padding=1)
self.gn4 = torch.nn.GroupNorm(8, inter_dims[3])
self.lay5 = torch.nn.Conv2d(inter_dims[3], inter_dims[4], 3, padding=1)
self.gn5 = torch.nn.GroupNorm(8, inter_dims[4])
self.out_lay = torch.nn.Conv2d(inter_dims[4], 1, 3, padding=1)
self.dim = dim
self.adapter1 = torch.nn.Conv2d(fpn_dims[0], inter_dims[1], 1)
self.adapter2 = torch.nn.Conv2d(fpn_dims[1], inter_dims[2], 1)
self.adapter3 = torch.nn.Conv2d(fpn_dims[2], inter_dims[3], 1)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_uniform_(m.weight, a=1)
nn.init.constant_(m.bias, 0)
def forward(self, x: Tensor, bbox_mask: Tensor, fpns: List[Tensor]):
x = torch.cat([_expand(x, bbox_mask.shape[1]), bbox_mask.flatten(0, 1)], 1)
x = self.lay1(x)
x = self.gn1(x)
x = F.relu(x)
x = self.lay2(x)
x = self.gn2(x)
x = F.relu(x)
cur_fpn = self.adapter1(fpns[0])
if cur_fpn.size(0) != x.size(0):
cur_fpn = _expand(cur_fpn, x.size(0) // cur_fpn.size(0))
x = cur_fpn + F.interpolate(x, size=cur_fpn.shape[-2:], mode="nearest")
x = self.lay3(x)
x = self.gn3(x)
x = F.relu(x)
cur_fpn = self.adapter2(fpns[1])
if cur_fpn.size(0) != x.size(0):
cur_fpn = _expand(cur_fpn, x.size(0) // cur_fpn.size(0))
x = cur_fpn + F.interpolate(x, size=cur_fpn.shape[-2:], mode="nearest")
x = self.lay4(x)
x = self.gn4(x)
x = F.relu(x)
cur_fpn = self.adapter3(fpns[2])
if cur_fpn.size(0) != x.size(0):
cur_fpn = _expand(cur_fpn, x.size(0) // cur_fpn.size(0))
x = cur_fpn + F.interpolate(x, size=cur_fpn.shape[-2:], mode="nearest")
x = self.lay5(x)
x = self.gn5(x)
x = F.relu(x)
x = self.out_lay(x)
return x
class MHAttentionMap(nn.Module):
"""This is a 2D attention module, which only returns the attention softmax (no multiplication by value)"""
def __init__(self, query_dim, hidden_dim, num_heads, dropout=0.0, bias=True):
super().__init__()
self.num_heads = num_heads
self.hidden_dim = hidden_dim
self.dropout = nn.Dropout(dropout)
self.q_linear = nn.Linear(query_dim, hidden_dim, bias=bias)
self.k_linear = nn.Linear(query_dim, hidden_dim, bias=bias)
nn.init.zeros_(self.k_linear.bias)
nn.init.zeros_(self.q_linear.bias)
nn.init.xavier_uniform_(self.k_linear.weight)
nn.init.xavier_uniform_(self.q_linear.weight)
self.normalize_fact = float(hidden_dim / self.num_heads) ** -0.5
def forward(self, q, k, mask: Optional[Tensor] = None):
q = self.q_linear(q)
k = F.conv2d(k, self.k_linear.weight.unsqueeze(-1).unsqueeze(-1), self.k_linear.bias)
qh = q.view(q.shape[0], q.shape[1], self.num_heads, self.hidden_dim // self.num_heads)
kh = k.view(k.shape[0], self.num_heads, self.hidden_dim // self.num_heads, k.shape[-2], k.shape[-1])
weights = torch.einsum("bqnc,bnchw->bqnhw", qh * self.normalize_fact, kh)
if mask is not None:
weights.masked_fill_(mask.unsqueeze(1).unsqueeze(1), float("-inf"))
weights = F.softmax(weights.flatten(2), dim=-1).view(weights.size())
weights = self.dropout(weights)
return weights
def dice_loss(inputs, targets, num_boxes):
"""
Compute the DICE loss, similar to generalized IOU for masks
Args:
inputs: A float tensor of arbitrary shape.
The predictions for each example.
targets: A float tensor with the same shape as inputs. Stores the binary
classification label for each element in inputs
(0 for the negative class and 1 for the positive class).
"""
inputs = inputs.sigmoid()
inputs = inputs.flatten(1)
numerator = 2 * (inputs * targets).sum(1)
denominator = inputs.sum(-1) + targets.sum(-1)
loss = 1 - (numerator + 1) / (denominator + 1)
return loss.sum() / num_boxes
def sigmoid_focal_loss(inputs, targets, num_boxes, alpha: float = 0.25, gamma: float = 2):
"""
Loss used in RetinaNet for dense detection: https://arxiv.org/abs/1708.02002.
Args:
inputs: A float tensor of arbitrary shape.
The predictions for each example.
targets: A float tensor with the same shape as inputs. Stores the binary
classification label for each element in inputs
(0 for the negative class and 1 for the positive class).
alpha: (optional) Weighting factor in range (0,1) to balance
positive vs negative examples. Default = -1 (no weighting).
gamma: Exponent of the modulating factor (1 - p_t) to
balance easy vs hard examples.
Returns:
Loss tensor
"""
prob = inputs.sigmoid()
ce_loss = F.binary_cross_entropy_with_logits(inputs, targets, reduction="none")
p_t = prob * targets + (1 - prob) * (1 - targets)
loss = ce_loss * ((1 - p_t) ** gamma)
if alpha >= 0:
alpha_t = alpha * targets + (1 - alpha) * (1 - targets)
loss = alpha_t * loss
return loss.mean(1).sum() / num_boxes
class PostProcessSegm(nn.Module):
def __init__(self, threshold=0.5):
super().__init__()
self.threshold = threshold
@torch.no_grad()
def forward(self, results, outputs, orig_target_sizes, max_target_sizes):
assert len(orig_target_sizes) == len(max_target_sizes)
max_h, max_w = max_target_sizes.max(0)[0].tolist()
outputs_masks = outputs["pred_masks"].squeeze(2)
outputs_masks = F.interpolate(outputs_masks, size=(max_h, max_w), mode="bilinear", align_corners=False)
outputs_masks = (outputs_masks.sigmoid() > self.threshold).cpu()
for i, (cur_mask, t, tt) in enumerate(zip(outputs_masks, max_target_sizes, orig_target_sizes)):
img_h, img_w = t[0], t[1]
results[i]["masks"] = cur_mask[:, :img_h, :img_w].unsqueeze(1)
results[i]["masks"] = F.interpolate(
results[i]["masks"].float(), size=tuple(tt.tolist()), mode="nearest"
).byte()
return results
class PostProcessPanoptic(nn.Module):
"""This class converts the output of the model to the final panoptic result, in the format expected by the
coco panoptic API """
def __init__(self, is_thing_map, threshold=0.85):
"""
Parameters:
is_thing_map: This is a whose keys are the class ids, and the values a boolean indicating whether
the class is a thing (True) or a stuff (False) class
threshold: confidence threshold: segments with confidence lower than this will be deleted
"""
super().__init__()
self.threshold = threshold
self.is_thing_map = is_thing_map
def forward(self, outputs, processed_sizes, target_sizes=None):
""" This function computes the panoptic prediction from the model's predictions.
Parameters:
outputs: This is a dict coming directly from the model. See the model doc for the content.
processed_sizes: This is a list of tuples (or torch tensors) of sizes of the images that were passed to the
model, ie the size after data augmentation but before batching.
target_sizes: This is a list of tuples (or torch tensors) corresponding to the requested final size
of each prediction. If left to None, it will default to the processed_sizes
"""
if target_sizes is None:
target_sizes = processed_sizes
assert len(processed_sizes) == len(target_sizes)
out_logits, raw_masks, raw_boxes = outputs["pred_logits"], outputs["pred_masks"], outputs["pred_boxes"]
assert len(out_logits) == len(raw_masks) == len(target_sizes)
preds = []
def to_tuple(tup):
if isinstance(tup, tuple):
return tup
return tuple(tup.cpu().tolist())
for cur_logits, cur_masks, cur_boxes, size, target_size in zip(
out_logits, raw_masks, raw_boxes, processed_sizes, target_sizes
):
# we filter empty queries and detection below threshold
scores, labels = cur_logits.softmax(-1).max(-1)
keep = labels.ne(outputs["pred_logits"].shape[-1] - 1) & (scores > self.threshold)
cur_scores, cur_classes = cur_logits.softmax(-1).max(-1)
cur_scores = cur_scores[keep]
cur_classes = cur_classes[keep]
cur_masks = cur_masks[keep]
cur_masks = interpolate(cur_masks[:, None], to_tuple(size), mode="bilinear").squeeze(1)
cur_boxes = box_ops.box_cxcywh_to_xyxy(cur_boxes[keep])
h, w = cur_masks.shape[-2:]
assert len(cur_boxes) == len(cur_classes)
# It may be that we have several predicted masks for the same stuff class.
# In the following, we track the list of masks ids for each stuff class (they are merged later on)
cur_masks = cur_masks.flatten(1)
stuff_equiv_classes = defaultdict(lambda: [])
for k, label in enumerate(cur_classes):
if not self.is_thing_map[label.item()]:
stuff_equiv_classes[label.item()].append(k)
def get_ids_area(masks, scores, dedup=False):
# This helper function creates the final panoptic segmentation image
# It also returns the area of the masks that appears on the image
m_id = masks.transpose(0, 1).softmax(-1)
if m_id.shape[-1] == 0:
# We didn't detect any mask :(
m_id = torch.zeros((h, w), dtype=torch.long, device=m_id.device)
else:
m_id = m_id.argmax(-1).view(h, w)
if dedup:
# Merge the masks corresponding to the same stuff class
for equiv in stuff_equiv_classes.values():
if len(equiv) > 1:
for eq_id in equiv:
m_id.masked_fill_(m_id.eq(eq_id), equiv[0])
final_h, final_w = to_tuple(target_size)
seg_img = Image.fromarray(id2rgb(m_id.view(h, w).cpu().numpy()))
seg_img = seg_img.resize(size=(final_w, final_h), resample=Image.NEAREST)
np_seg_img = (
torch.ByteTensor(torch.ByteStorage.from_buffer(seg_img.tobytes())).view(final_h, final_w, 3).numpy()
)
m_id = torch.from_numpy(rgb2id(np_seg_img))
area = []
for i in range(len(scores)):
area.append(m_id.eq(i).sum().item())
return area, seg_img
area, seg_img = get_ids_area(cur_masks, cur_scores, dedup=True)
if cur_classes.numel() > 0:
# We know filter empty masks as long as we find some
while True:
filtered_small = torch.as_tensor(
[area[i] <= 4 for i, c in enumerate(cur_classes)], dtype=torch.bool, device=keep.device
)
if filtered_small.any().item():
cur_scores = cur_scores[~filtered_small]
cur_classes = cur_classes[~filtered_small]
cur_masks = cur_masks[~filtered_small]
area, seg_img = get_ids_area(cur_masks, cur_scores)
else:
break
else:
cur_classes = torch.ones(1, dtype=torch.long, device=cur_classes.device)
segments_info = []
for i, a in enumerate(area):
cat = cur_classes[i].item()
segments_info.append({"id": i, "isthing": self.is_thing_map[cat], "category_id": cat, "area": a})
del cur_classes
with io.BytesIO() as out:
seg_img.save(out, format="PNG")
predictions = {"png_string": out.getvalue(), "segments_info": segments_info}
preds.append(predictions)
return preds
| 15,573 | 41.785714 | 120 | py |
pmb-nll | pmb-nll-main/src/detr/models/position_encoding.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
Various positional encodings for the transformer.
"""
import math
import torch
from torch import nn
from util.misc import NestedTensor
class PositionEmbeddingSine(nn.Module):
"""
This is a more standard version of the position embedding, very similar to the one
used by the Attention is all you need paper, generalized to work on images.
"""
def __init__(self, num_pos_feats=64, temperature=10000, normalize=False, scale=None):
super().__init__()
self.num_pos_feats = num_pos_feats
self.temperature = temperature
self.normalize = normalize
if scale is not None and normalize is False:
raise ValueError("normalize should be True if scale is passed")
if scale is None:
scale = 2 * math.pi
self.scale = scale
def forward(self, tensor_list: NestedTensor):
x = tensor_list.tensors
mask = tensor_list.mask
assert mask is not None
not_mask = ~mask
y_embed = not_mask.cumsum(1, dtype=torch.float32)
x_embed = not_mask.cumsum(2, dtype=torch.float32)
if self.normalize:
eps = 1e-6
y_embed = y_embed / (y_embed[:, -1:, :] + eps) * self.scale
x_embed = x_embed / (x_embed[:, :, -1:] + eps) * self.scale
dim_t = torch.arange(self.num_pos_feats, dtype=torch.float32, device=x.device)
dim_t = self.temperature ** (2 * (dim_t // 2) / self.num_pos_feats)
pos_x = x_embed[:, :, :, None] / dim_t
pos_y = y_embed[:, :, :, None] / dim_t
pos_x = torch.stack((pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4).flatten(3)
pos_y = torch.stack((pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4).flatten(3)
pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2)
return pos
class PositionEmbeddingLearned(nn.Module):
"""
Absolute pos embedding, learned.
"""
def __init__(self, num_pos_feats=256):
super().__init__()
self.row_embed = nn.Embedding(50, num_pos_feats)
self.col_embed = nn.Embedding(50, num_pos_feats)
self.reset_parameters()
def reset_parameters(self):
nn.init.uniform_(self.row_embed.weight)
nn.init.uniform_(self.col_embed.weight)
def forward(self, tensor_list: NestedTensor):
x = tensor_list.tensors
h, w = x.shape[-2:]
i = torch.arange(w, device=x.device)
j = torch.arange(h, device=x.device)
x_emb = self.col_embed(i)
y_emb = self.row_embed(j)
pos = torch.cat([
x_emb.unsqueeze(0).repeat(h, 1, 1),
y_emb.unsqueeze(1).repeat(1, w, 1),
], dim=-1).permute(2, 0, 1).unsqueeze(0).repeat(x.shape[0], 1, 1, 1)
return pos
def build_position_encoding(args):
N_steps = args.hidden_dim // 2
if args.position_embedding in ('v2', 'sine'):
# TODO find a better way of exposing other arguments
position_embedding = PositionEmbeddingSine(N_steps, normalize=True)
elif args.position_embedding in ('v3', 'learned'):
position_embedding = PositionEmbeddingLearned(N_steps)
else:
raise ValueError(f"not supported {args.position_embedding}")
return position_embedding
| 3,336 | 36.077778 | 103 | py |
pmb-nll | pmb-nll-main/src/detr/models/backbone.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
Backbone modules.
"""
from collections import OrderedDict
import torch
import torch.nn.functional as F
import torchvision
from torch import nn
from torchvision.models._utils import IntermediateLayerGetter
from typing import Dict, List
from util.misc import NestedTensor, is_main_process
from .position_encoding import build_position_encoding
class FrozenBatchNorm2d(torch.nn.Module):
"""
BatchNorm2d where the batch statistics and the affine parameters are fixed.
Copy-paste from torchvision.misc.ops with added eps before rqsrt,
without which any other models than torchvision.models.resnet[18,34,50,101]
produce nans.
"""
def __init__(self, n):
super(FrozenBatchNorm2d, self).__init__()
self.register_buffer("weight", torch.ones(n))
self.register_buffer("bias", torch.zeros(n))
self.register_buffer("running_mean", torch.zeros(n))
self.register_buffer("running_var", torch.ones(n))
def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict,
missing_keys, unexpected_keys, error_msgs):
num_batches_tracked_key = prefix + 'num_batches_tracked'
if num_batches_tracked_key in state_dict:
del state_dict[num_batches_tracked_key]
super(FrozenBatchNorm2d, self)._load_from_state_dict(
state_dict, prefix, local_metadata, strict,
missing_keys, unexpected_keys, error_msgs)
def forward(self, x):
# move reshapes to the beginning
# to make it fuser-friendly
w = self.weight.reshape(1, -1, 1, 1)
b = self.bias.reshape(1, -1, 1, 1)
rv = self.running_var.reshape(1, -1, 1, 1)
rm = self.running_mean.reshape(1, -1, 1, 1)
eps = 1e-5
scale = w * (rv + eps).rsqrt()
bias = b - rm * scale
return x * scale + bias
class BackboneBase(nn.Module):
def __init__(self, backbone: nn.Module, train_backbone: bool, num_channels: int, return_interm_layers: bool):
super().__init__()
for name, parameter in backbone.named_parameters():
if not train_backbone or 'layer2' not in name and 'layer3' not in name and 'layer4' not in name:
parameter.requires_grad_(False)
if return_interm_layers:
return_layers = {"layer1": "0", "layer2": "1", "layer3": "2", "layer4": "3"}
else:
return_layers = {'layer4': "0"}
self.body = IntermediateLayerGetter(backbone, return_layers=return_layers)
self.num_channels = num_channels
def forward(self, tensor_list: NestedTensor):
xs = self.body(tensor_list.tensors)
out: Dict[str, NestedTensor] = {}
for name, x in xs.items():
m = tensor_list.mask
assert m is not None
mask = F.interpolate(m[None].float(), size=x.shape[-2:]).to(torch.bool)[0]
out[name] = NestedTensor(x, mask)
return out
class Backbone(BackboneBase):
"""ResNet backbone with frozen BatchNorm."""
def __init__(self, name: str,
train_backbone: bool,
return_interm_layers: bool,
dilation: bool):
backbone = getattr(torchvision.models, name)(
replace_stride_with_dilation=[False, False, dilation],
pretrained=is_main_process(), norm_layer=FrozenBatchNorm2d)
num_channels = 512 if name in ('resnet18', 'resnet34') else 2048
super().__init__(backbone, train_backbone, num_channels, return_interm_layers)
class Joiner(nn.Sequential):
def __init__(self, backbone, position_embedding):
super().__init__(backbone, position_embedding)
def forward(self, tensor_list: NestedTensor):
xs = self[0](tensor_list)
out: List[NestedTensor] = []
pos = []
for name, x in xs.items():
out.append(x)
# position encoding
pos.append(self[1](x).to(x.tensors.dtype))
return out, pos
def build_backbone(args):
position_embedding = build_position_encoding(args)
train_backbone = args.lr_backbone > 0
return_interm_layers = args.masks
backbone = Backbone(args.backbone, train_backbone, return_interm_layers, args.dilation)
model = Joiner(backbone, position_embedding)
model.num_channels = backbone.num_channels
return model
| 4,437 | 35.983333 | 113 | py |
pmb-nll | pmb-nll-main/src/detr/models/transformer.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
DETR Transformer class.
Copy-paste from torch.nn.Transformer with modifications:
* positional encodings are passed in MHattention
* extra LN at the end of encoder is removed
* decoder returns a stack of activations from all decoding layers
"""
import copy
from typing import Optional, List
import torch
import torch.nn.functional as F
from torch import nn, Tensor
class Transformer(nn.Module):
def __init__(self, d_model=512, nhead=8, num_encoder_layers=6,
num_decoder_layers=6, dim_feedforward=2048, dropout=0.1,
activation="relu", normalize_before=False,
return_intermediate_dec=False):
super().__init__()
encoder_layer = TransformerEncoderLayer(d_model, nhead, dim_feedforward,
dropout, activation, normalize_before)
encoder_norm = nn.LayerNorm(d_model) if normalize_before else None
self.encoder = TransformerEncoder(encoder_layer, num_encoder_layers, encoder_norm)
decoder_layer = TransformerDecoderLayer(d_model, nhead, dim_feedforward,
dropout, activation, normalize_before)
decoder_norm = nn.LayerNorm(d_model)
self.decoder = TransformerDecoder(decoder_layer, num_decoder_layers, decoder_norm,
return_intermediate=return_intermediate_dec)
self._reset_parameters()
self.d_model = d_model
self.nhead = nhead
def _reset_parameters(self):
for p in self.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
def forward(self, src, mask, query_embed, pos_embed):
# flatten NxCxHxW to HWxNxC
bs, c, h, w = src.shape
src = src.flatten(2).permute(2, 0, 1)
pos_embed = pos_embed.flatten(2).permute(2, 0, 1)
query_embed = query_embed.unsqueeze(1).repeat(1, bs, 1)
mask = mask.flatten(1)
tgt = torch.zeros_like(query_embed)
memory = self.encoder(src, src_key_padding_mask=mask, pos=pos_embed)
hs = self.decoder(tgt, memory, memory_key_padding_mask=mask,
pos=pos_embed, query_pos=query_embed)
return hs.transpose(1, 2), memory.permute(1, 2, 0).view(bs, c, h, w)
class TransformerEncoder(nn.Module):
def __init__(self, encoder_layer, num_layers, norm=None):
super().__init__()
self.layers = _get_clones(encoder_layer, num_layers)
self.num_layers = num_layers
self.norm = norm
def forward(self, src,
mask: Optional[Tensor] = None,
src_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None):
output = src
for layer in self.layers:
output = layer(output, src_mask=mask,
src_key_padding_mask=src_key_padding_mask, pos=pos)
if self.norm is not None:
output = self.norm(output)
return output
class TransformerDecoder(nn.Module):
def __init__(self, decoder_layer, num_layers, norm=None, return_intermediate=False):
super().__init__()
self.layers = _get_clones(decoder_layer, num_layers)
self.num_layers = num_layers
self.norm = norm
self.return_intermediate = return_intermediate
def forward(self, tgt, memory,
tgt_mask: Optional[Tensor] = None,
memory_mask: Optional[Tensor] = None,
tgt_key_padding_mask: Optional[Tensor] = None,
memory_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None):
output = tgt
intermediate = []
for layer in self.layers:
output = layer(output, memory, tgt_mask=tgt_mask,
memory_mask=memory_mask,
tgt_key_padding_mask=tgt_key_padding_mask,
memory_key_padding_mask=memory_key_padding_mask,
pos=pos, query_pos=query_pos)
if self.return_intermediate:
intermediate.append(self.norm(output))
if self.norm is not None:
output = self.norm(output)
if self.return_intermediate:
intermediate.pop()
intermediate.append(output)
if self.return_intermediate:
return torch.stack(intermediate)
return output.unsqueeze(0)
class TransformerEncoderLayer(nn.Module):
def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1,
activation="relu", normalize_before=False):
super().__init__()
self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
# Implementation of Feedforward model
self.linear1 = nn.Linear(d_model, dim_feedforward)
self.dropout = nn.Dropout(dropout)
self.linear2 = nn.Linear(dim_feedforward, d_model)
self.norm1 = nn.LayerNorm(d_model)
self.norm2 = nn.LayerNorm(d_model)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
self.activation = _get_activation_fn(activation)
self.normalize_before = normalize_before
def with_pos_embed(self, tensor, pos: Optional[Tensor]):
return tensor if pos is None else tensor + pos
def forward_post(self,
src,
src_mask: Optional[Tensor] = None,
src_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None):
q = k = self.with_pos_embed(src, pos)
src2 = self.self_attn(q, k, value=src, attn_mask=src_mask,
key_padding_mask=src_key_padding_mask)[0]
src = src + self.dropout1(src2)
src = self.norm1(src)
src2 = self.linear2(self.dropout(self.activation(self.linear1(src))))
src = src + self.dropout2(src2)
src = self.norm2(src)
return src
def forward_pre(self, src,
src_mask: Optional[Tensor] = None,
src_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None):
src2 = self.norm1(src)
q = k = self.with_pos_embed(src2, pos)
src2 = self.self_attn(q, k, value=src2, attn_mask=src_mask,
key_padding_mask=src_key_padding_mask)[0]
src = src + self.dropout1(src2)
src2 = self.norm2(src)
src2 = self.linear2(self.dropout(self.activation(self.linear1(src2))))
src = src + self.dropout2(src2)
return src
def forward(self, src,
src_mask: Optional[Tensor] = None,
src_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None):
if self.normalize_before:
return self.forward_pre(src, src_mask, src_key_padding_mask, pos)
return self.forward_post(src, src_mask, src_key_padding_mask, pos)
class TransformerDecoderLayer(nn.Module):
def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1,
activation="relu", normalize_before=False):
super().__init__()
self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
self.multihead_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
# Implementation of Feedforward model
self.linear1 = nn.Linear(d_model, dim_feedforward)
self.dropout = nn.Dropout(dropout)
self.linear2 = nn.Linear(dim_feedforward, d_model)
self.norm1 = nn.LayerNorm(d_model)
self.norm2 = nn.LayerNorm(d_model)
self.norm3 = nn.LayerNorm(d_model)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
self.dropout3 = nn.Dropout(dropout)
self.activation = _get_activation_fn(activation)
self.normalize_before = normalize_before
def with_pos_embed(self, tensor, pos: Optional[Tensor]):
return tensor if pos is None else tensor + pos
def forward_post(self, tgt, memory,
tgt_mask: Optional[Tensor] = None,
memory_mask: Optional[Tensor] = None,
tgt_key_padding_mask: Optional[Tensor] = None,
memory_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None):
q = k = self.with_pos_embed(tgt, query_pos)
tgt2 = self.self_attn(q, k, value=tgt, attn_mask=tgt_mask,
key_padding_mask=tgt_key_padding_mask)[0]
tgt = tgt + self.dropout1(tgt2)
tgt = self.norm1(tgt)
tgt2 = self.multihead_attn(query=self.with_pos_embed(tgt, query_pos),
key=self.with_pos_embed(memory, pos),
value=memory, attn_mask=memory_mask,
key_padding_mask=memory_key_padding_mask)[0]
tgt = tgt + self.dropout2(tgt2)
tgt = self.norm2(tgt)
tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt))))
tgt = tgt + self.dropout3(tgt2)
tgt = self.norm3(tgt)
return tgt
def forward_pre(self, tgt, memory,
tgt_mask: Optional[Tensor] = None,
memory_mask: Optional[Tensor] = None,
tgt_key_padding_mask: Optional[Tensor] = None,
memory_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None):
tgt2 = self.norm1(tgt)
q = k = self.with_pos_embed(tgt2, query_pos)
tgt2 = self.self_attn(q, k, value=tgt2, attn_mask=tgt_mask,
key_padding_mask=tgt_key_padding_mask)[0]
tgt = tgt + self.dropout1(tgt2)
tgt2 = self.norm2(tgt)
tgt2 = self.multihead_attn(query=self.with_pos_embed(tgt2, query_pos),
key=self.with_pos_embed(memory, pos),
value=memory, attn_mask=memory_mask,
key_padding_mask=memory_key_padding_mask)[0]
tgt = tgt + self.dropout2(tgt2)
tgt2 = self.norm3(tgt)
tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt2))))
tgt = tgt + self.dropout3(tgt2)
return tgt
def forward(self, tgt, memory,
tgt_mask: Optional[Tensor] = None,
memory_mask: Optional[Tensor] = None,
tgt_key_padding_mask: Optional[Tensor] = None,
memory_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None):
if self.normalize_before:
return self.forward_pre(tgt, memory, tgt_mask, memory_mask,
tgt_key_padding_mask, memory_key_padding_mask, pos, query_pos)
return self.forward_post(tgt, memory, tgt_mask, memory_mask,
tgt_key_padding_mask, memory_key_padding_mask, pos, query_pos)
def _get_clones(module, N):
return nn.ModuleList([copy.deepcopy(module) for i in range(N)])
def build_transformer(args):
return Transformer(
d_model=args.hidden_dim,
dropout=args.dropout,
nhead=args.nheads,
dim_feedforward=args.dim_feedforward,
num_encoder_layers=args.enc_layers,
num_decoder_layers=args.dec_layers,
normalize_before=args.pre_norm,
return_intermediate_dec=True,
)
def _get_activation_fn(activation):
"""Return an activation function given a string"""
if activation == "relu":
return F.relu
if activation == "gelu":
return F.gelu
if activation == "glu":
return F.glu
raise RuntimeError(F"activation should be relu/gelu, not {activation}.")
| 12,162 | 39.815436 | 98 | py |
pmb-nll | pmb-nll-main/src/detr/d2/converter.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
Helper script to convert models trained with the main version of DETR to be used with the Detectron2 version.
"""
import json
import argparse
import numpy as np
import torch
def parse_args():
parser = argparse.ArgumentParser("D2 model converter")
parser.add_argument("--source_model", default="", type=str, help="Path or url to the DETR model to convert")
parser.add_argument("--output_model", default="", type=str, help="Path where to save the converted model")
return parser.parse_args()
def main():
args = parse_args()
# D2 expects contiguous classes, so we need to remap the 92 classes from DETR
# fmt: off
coco_idx = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25,
27, 28, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 46, 47, 48, 49, 50, 51,
52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 67, 70, 72, 73, 74, 75, 76, 77,
78, 79, 80, 81, 82, 84, 85, 86, 87, 88, 89, 90, 91]
# fmt: on
coco_idx = np.array(coco_idx)
if args.source_model.startswith("https"):
checkpoint = torch.hub.load_state_dict_from_url(args.source_model, map_location="cpu", check_hash=True)
else:
checkpoint = torch.load(args.source_model, map_location="cpu")
model_to_convert = checkpoint["model"]
model_converted = {}
for k in model_to_convert.keys():
old_k = k
if "backbone" in k:
k = k.replace("backbone.0.body.", "")
if "layer" not in k:
k = "stem." + k
for t in [1, 2, 3, 4]:
k = k.replace(f"layer{t}", f"res{t + 1}")
for t in [1, 2, 3]:
k = k.replace(f"bn{t}", f"conv{t}.norm")
k = k.replace("downsample.0", "shortcut")
k = k.replace("downsample.1", "shortcut.norm")
k = "backbone.0.backbone." + k
k = "detr." + k
print(old_k, "->", k)
if "class_embed" in old_k:
v = model_to_convert[old_k].detach()
if v.shape[0] == 92:
shape_old = v.shape
model_converted[k] = v[coco_idx]
print("Head conversion: changing shape from {} to {}".format(shape_old, model_converted[k].shape))
continue
model_converted[k] = model_to_convert[old_k].detach()
model_to_save = {"model": model_converted}
torch.save(model_to_save, args.output_model)
if __name__ == "__main__":
main()
| 2,590 | 36.014286 | 114 | py |
pmb-nll | pmb-nll-main/src/detr/d2/train_net.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
DETR Training Script.
This script is a simplified version of the training script in detectron2/tools.
"""
import os
import sys
import itertools
# fmt: off
sys.path.insert(1, os.path.join(sys.path[0], '..'))
# fmt: on
import time
from typing import Any, Dict, List, Set
import torch
import detectron2.utils.comm as comm
from d2.detr import DetrDatasetMapper, add_detr_config
from detectron2.checkpoint import DetectionCheckpointer
from detectron2.config import get_cfg
from detectron2.data import MetadataCatalog, build_detection_train_loader
from detectron2.engine import DefaultTrainer, default_argument_parser, default_setup, launch
from detectron2.evaluation import COCOEvaluator, verify_results
from detectron2.solver.build import maybe_add_gradient_clipping
class Trainer(DefaultTrainer):
"""
Extension of the Trainer class adapted to DETR.
"""
@classmethod
def build_evaluator(cls, cfg, dataset_name, output_folder=None):
"""
Create evaluator(s) for a given dataset.
This uses the special metadata "evaluator_type" associated with each builtin dataset.
For your own dataset, you can simply create an evaluator manually in your
script and do not have to worry about the hacky if-else logic here.
"""
if output_folder is None:
output_folder = os.path.join(cfg.OUTPUT_DIR, "inference")
return COCOEvaluator(dataset_name, cfg, True, output_folder)
@classmethod
def build_train_loader(cls, cfg):
if "Detr" == cfg.MODEL.META_ARCHITECTURE:
mapper = DetrDatasetMapper(cfg, True)
else:
mapper = None
return build_detection_train_loader(cfg, mapper=mapper)
@classmethod
def build_optimizer(cls, cfg, model):
params: List[Dict[str, Any]] = []
memo: Set[torch.nn.parameter.Parameter] = set()
for key, value in model.named_parameters(recurse=True):
if not value.requires_grad:
continue
# Avoid duplicating parameters
if value in memo:
continue
memo.add(value)
lr = cfg.SOLVER.BASE_LR
weight_decay = cfg.SOLVER.WEIGHT_DECAY
if "backbone" in key:
lr = lr * cfg.SOLVER.BACKBONE_MULTIPLIER
params += [{"params": [value], "lr": lr, "weight_decay": weight_decay}]
def maybe_add_full_model_gradient_clipping(optim): # optim: the optimizer class
# detectron2 doesn't have full model gradient clipping now
clip_norm_val = cfg.SOLVER.CLIP_GRADIENTS.CLIP_VALUE
enable = (
cfg.SOLVER.CLIP_GRADIENTS.ENABLED
and cfg.SOLVER.CLIP_GRADIENTS.CLIP_TYPE == "full_model"
and clip_norm_val > 0.0
)
class FullModelGradientClippingOptimizer(optim):
def step(self, closure=None):
all_params = itertools.chain(*[x["params"] for x in self.param_groups])
torch.nn.utils.clip_grad_norm_(all_params, clip_norm_val)
super().step(closure=closure)
return FullModelGradientClippingOptimizer if enable else optim
optimizer_type = cfg.SOLVER.OPTIMIZER
if optimizer_type == "SGD":
optimizer = maybe_add_full_model_gradient_clipping(torch.optim.SGD)(
params, cfg.SOLVER.BASE_LR, momentum=cfg.SOLVER.MOMENTUM
)
elif optimizer_type == "ADAMW":
optimizer = maybe_add_full_model_gradient_clipping(torch.optim.AdamW)(
params, cfg.SOLVER.BASE_LR
)
else:
raise NotImplementedError(f"no optimizer type {optimizer_type}")
if not cfg.SOLVER.CLIP_GRADIENTS.CLIP_TYPE == "full_model":
optimizer = maybe_add_gradient_clipping(cfg, optimizer)
return optimizer
def setup(args):
"""
Create configs and perform basic setups.
"""
cfg = get_cfg()
add_detr_config(cfg)
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
default_setup(cfg, args)
return cfg
def main(args):
cfg = setup(args)
if args.eval_only:
model = Trainer.build_model(cfg)
DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load(cfg.MODEL.WEIGHTS, resume=args.resume)
res = Trainer.test(cfg, model)
if comm.is_main_process():
verify_results(cfg, res)
return res
trainer = Trainer(cfg)
trainer.resume_or_load(resume=args.resume)
return trainer.train()
if __name__ == "__main__":
args = default_argument_parser().parse_args()
print("Command Line Args:", args)
launch(
main,
args.num_gpus,
num_machines=args.num_machines,
machine_rank=args.machine_rank,
dist_url=args.dist_url,
args=(args,),
)
| 4,999 | 33.246575 | 115 | py |
pmb-nll | pmb-nll-main/src/detr/d2/detr/detr.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import logging
import math
from typing import List
import numpy as np
import torch
import torch.distributed as dist
import torch.nn.functional as F
from scipy.optimize import linear_sum_assignment
from torch import nn
from detectron2.layers import ShapeSpec
from detectron2.modeling import META_ARCH_REGISTRY, build_backbone, detector_postprocess
from detectron2.structures import Boxes, ImageList, Instances, BitMasks, PolygonMasks
from detectron2.utils.logger import log_first_n
from fvcore.nn import giou_loss, smooth_l1_loss
from models.backbone import Joiner
from models.detr import DETR, SetCriterion
from models.matcher import HungarianMatcher
from models.position_encoding import PositionEmbeddingSine
from models.transformer import Transformer
from models.segmentation import DETRsegm, PostProcessPanoptic, PostProcessSegm
from util.box_ops import box_cxcywh_to_xyxy, box_xyxy_to_cxcywh
from util.misc import NestedTensor
from datasets.coco import convert_coco_poly_to_mask
__all__ = ["Detr"]
class MaskedBackbone(nn.Module):
""" This is a thin wrapper around D2's backbone to provide padding masking"""
def __init__(self, cfg):
super().__init__()
self.backbone = build_backbone(cfg)
backbone_shape = self.backbone.output_shape()
self.feature_strides = [backbone_shape[f].stride for f in backbone_shape.keys()]
self.num_channels = backbone_shape[list(backbone_shape.keys())[-1]].channels
def forward(self, images):
features = self.backbone(images.tensor)
masks = self.mask_out_padding(
[features_per_level.shape for features_per_level in features.values()],
images.image_sizes,
images.tensor.device,
)
assert len(features) == len(masks)
for i, k in enumerate(features.keys()):
features[k] = NestedTensor(features[k], masks[i])
return features
def mask_out_padding(self, feature_shapes, image_sizes, device):
masks = []
assert len(feature_shapes) == len(self.feature_strides)
for idx, shape in enumerate(feature_shapes):
N, _, H, W = shape
masks_per_feature_level = torch.ones((N, H, W), dtype=torch.bool, device=device)
for img_idx, (h, w) in enumerate(image_sizes):
masks_per_feature_level[
img_idx,
: int(np.ceil(float(h) / self.feature_strides[idx])),
: int(np.ceil(float(w) / self.feature_strides[idx])),
] = 0
masks.append(masks_per_feature_level)
return masks
@META_ARCH_REGISTRY.register()
class Detr(nn.Module):
"""
Implement Detr
"""
def __init__(self, cfg):
super().__init__()
self.device = torch.device(cfg.MODEL.DEVICE)
self.num_classes = cfg.MODEL.DETR.NUM_CLASSES
self.mask_on = cfg.MODEL.MASK_ON
hidden_dim = cfg.MODEL.DETR.HIDDEN_DIM
num_queries = cfg.MODEL.DETR.NUM_OBJECT_QUERIES
# Transformer parameters:
nheads = cfg.MODEL.DETR.NHEADS
dropout = cfg.MODEL.DETR.DROPOUT
dim_feedforward = cfg.MODEL.DETR.DIM_FEEDFORWARD
enc_layers = cfg.MODEL.DETR.ENC_LAYERS
dec_layers = cfg.MODEL.DETR.DEC_LAYERS
pre_norm = cfg.MODEL.DETR.PRE_NORM
# Loss parameters:
giou_weight = cfg.MODEL.DETR.GIOU_WEIGHT
l1_weight = cfg.MODEL.DETR.L1_WEIGHT
deep_supervision = cfg.MODEL.DETR.DEEP_SUPERVISION
no_object_weight = cfg.MODEL.DETR.NO_OBJECT_WEIGHT
N_steps = hidden_dim // 2
d2_backbone = MaskedBackbone(cfg)
backbone = Joiner(d2_backbone, PositionEmbeddingSine(N_steps, normalize=True))
backbone.num_channels = d2_backbone.num_channels
transformer = Transformer(
d_model=hidden_dim,
dropout=dropout,
nhead=nheads,
dim_feedforward=dim_feedforward,
num_encoder_layers=enc_layers,
num_decoder_layers=dec_layers,
normalize_before=pre_norm,
return_intermediate_dec=deep_supervision,
)
self.detr = DETR(
backbone, transformer, num_classes=self.num_classes, num_queries=num_queries, aux_loss=deep_supervision
)
if self.mask_on:
frozen_weights = cfg.MODEL.DETR.FROZEN_WEIGHTS
if frozen_weights != '':
print("LOAD pre-trained weights")
weight = torch.load(frozen_weights, map_location=lambda storage, loc: storage)['model']
new_weight = {}
for k, v in weight.items():
if 'detr.' in k:
new_weight[k.replace('detr.', '')] = v
else:
print(f"Skipping loading weight {k} from frozen model")
del weight
self.detr.load_state_dict(new_weight)
del new_weight
self.detr = DETRsegm(self.detr, freeze_detr=(frozen_weights != ''))
self.seg_postprocess = PostProcessSegm
self.detr.to(self.device)
# building criterion
matcher = HungarianMatcher(cost_class=1, cost_bbox=l1_weight, cost_giou=giou_weight)
weight_dict = {"loss_ce": 1, "loss_bbox": l1_weight}
weight_dict["loss_giou"] = giou_weight
if deep_supervision:
aux_weight_dict = {}
for i in range(dec_layers - 1):
aux_weight_dict.update({k + f"_{i}": v for k, v in weight_dict.items()})
weight_dict.update(aux_weight_dict)
losses = ["labels", "boxes", "cardinality"]
if self.mask_on:
losses += ["masks"]
self.criterion = SetCriterion(
self.num_classes, matcher=matcher, weight_dict=weight_dict, eos_coef=no_object_weight, losses=losses,
)
self.criterion.to(self.device)
pixel_mean = torch.Tensor(cfg.MODEL.PIXEL_MEAN).to(self.device).view(3, 1, 1)
pixel_std = torch.Tensor(cfg.MODEL.PIXEL_STD).to(self.device).view(3, 1, 1)
self.normalizer = lambda x: (x - pixel_mean) / pixel_std
self.to(self.device)
def forward(self, batched_inputs):
"""
Args:
batched_inputs: a list, batched outputs of :class:`DatasetMapper` .
Each item in the list contains the inputs for one image.
For now, each item in the list is a dict that contains:
* image: Tensor, image in (C, H, W) format.
* instances: Instances
Other information that's included in the original dicts, such as:
* "height", "width" (int): the output resolution of the model, used in inference.
See :meth:`postprocess` for details.
Returns:
dict[str: Tensor]:
mapping from a named loss to a tensor storing the loss. Used during training only.
"""
images = self.preprocess_image(batched_inputs)
output = self.detr(images)
if self.training:
gt_instances = [x["instances"].to(self.device) for x in batched_inputs]
targets = self.prepare_targets(gt_instances)
loss_dict = self.criterion(output, targets)
weight_dict = self.criterion.weight_dict
for k in loss_dict.keys():
if k in weight_dict:
loss_dict[k] *= weight_dict[k]
return loss_dict
else:
box_cls = output["pred_logits"]
box_pred = output["pred_boxes"]
mask_pred = output["pred_masks"] if self.mask_on else None
results = self.inference(box_cls, box_pred, mask_pred, images.image_sizes)
processed_results = []
for results_per_image, input_per_image, image_size in zip(results, batched_inputs, images.image_sizes):
height = input_per_image.get("height", image_size[0])
width = input_per_image.get("width", image_size[1])
r = detector_postprocess(results_per_image, height, width)
processed_results.append({"instances": r})
return processed_results
def prepare_targets(self, targets):
new_targets = []
for targets_per_image in targets:
h, w = targets_per_image.image_size
image_size_xyxy = torch.as_tensor([w, h, w, h], dtype=torch.float, device=self.device)
gt_classes = targets_per_image.gt_classes
gt_boxes = targets_per_image.gt_boxes.tensor / image_size_xyxy
gt_boxes = box_xyxy_to_cxcywh(gt_boxes)
new_targets.append({"labels": gt_classes, "boxes": gt_boxes})
if self.mask_on and hasattr(targets_per_image, 'gt_masks'):
gt_masks = targets_per_image.gt_masks
gt_masks = convert_coco_poly_to_mask(gt_masks.polygons, h, w)
new_targets[-1].update({'masks': gt_masks})
return new_targets
def inference(self, box_cls, box_pred, mask_pred, image_sizes):
"""
Arguments:
box_cls (Tensor): tensor of shape (batch_size, num_queries, K).
The tensor predicts the classification probability for each query.
box_pred (Tensor): tensors of shape (batch_size, num_queries, 4).
The tensor predicts 4-vector (x,y,w,h) box
regression values for every queryx
image_sizes (List[torch.Size]): the input image sizes
Returns:
results (List[Instances]): a list of #images elements.
"""
assert len(box_cls) == len(image_sizes)
results = []
# For each box we assign the best class or the second best if the best on is `no_object`.
scores, labels = F.softmax(box_cls, dim=-1)[:, :, :-1].max(-1)
for i, (scores_per_image, labels_per_image, box_pred_per_image, image_size) in enumerate(zip(
scores, labels, box_pred, image_sizes
)):
result = Instances(image_size)
result.pred_boxes = Boxes(box_cxcywh_to_xyxy(box_pred_per_image))
result.pred_boxes.scale(scale_x=image_size[1], scale_y=image_size[0])
if self.mask_on:
mask = F.interpolate(mask_pred[i].unsqueeze(0), size=image_size, mode='bilinear', align_corners=False)
mask = mask[0].sigmoid() > 0.5
B, N, H, W = mask_pred.shape
mask = BitMasks(mask.cpu()).crop_and_resize(result.pred_boxes.tensor.cpu(), 32)
result.pred_masks = mask.unsqueeze(1).to(mask_pred[0].device)
result.scores = scores_per_image
result.pred_classes = labels_per_image
results.append(result)
return results
def preprocess_image(self, batched_inputs):
"""
Normalize, pad and batch the input images.
"""
images = [self.normalizer(x["image"].to(self.device)) for x in batched_inputs]
images = ImageList.from_tensors(images)
return images
| 11,143 | 41.534351 | 118 | py |
pmb-nll | pmb-nll-main/src/detr/d2/detr/dataset_mapper.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import copy
import logging
import numpy as np
import torch
from detectron2.data import detection_utils as utils
from detectron2.data import transforms as T
from detectron2.data.transforms import TransformGen
__all__ = ["DetrDatasetMapper"]
def build_transform_gen(cfg, is_train):
"""
Create a list of :class:`TransformGen` from config.
Returns:
list[TransformGen]
"""
if is_train:
min_size = cfg.INPUT.MIN_SIZE_TRAIN
max_size = cfg.INPUT.MAX_SIZE_TRAIN
sample_style = cfg.INPUT.MIN_SIZE_TRAIN_SAMPLING
else:
min_size = cfg.INPUT.MIN_SIZE_TEST
max_size = cfg.INPUT.MAX_SIZE_TEST
sample_style = "choice"
if sample_style == "range":
assert len(min_size) == 2, "more than 2 ({}) min_size(s) are provided for ranges".format(len(min_size))
logger = logging.getLogger(__name__)
tfm_gens = []
if is_train:
tfm_gens.append(T.RandomFlip())
tfm_gens.append(T.ResizeShortestEdge(min_size, max_size, sample_style))
if is_train:
logger.info("TransformGens used in training: " + str(tfm_gens))
return tfm_gens
class DetrDatasetMapper:
"""
A callable which takes a dataset dict in Detectron2 Dataset format,
and map it into a format used by DETR.
The callable currently does the following:
1. Read the image from "file_name"
2. Applies geometric transforms to the image and annotation
3. Find and applies suitable cropping to the image and annotation
4. Prepare image and annotation to Tensors
"""
def __init__(self, cfg, is_train=True):
if cfg.INPUT.CROP.ENABLED and is_train:
self.crop_gen = [
T.ResizeShortestEdge([400, 500, 600], sample_style="choice"),
T.RandomCrop(cfg.INPUT.CROP.TYPE, cfg.INPUT.CROP.SIZE),
]
else:
self.crop_gen = None
self.mask_on = cfg.MODEL.MASK_ON
self.tfm_gens = build_transform_gen(cfg, is_train)
logging.getLogger(__name__).info(
"Full TransformGens used in training: {}, crop: {}".format(str(self.tfm_gens), str(self.crop_gen))
)
self.img_format = cfg.INPUT.FORMAT
self.is_train = is_train
def __call__(self, dataset_dict):
"""
Args:
dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.
Returns:
dict: a format that builtin models in detectron2 accept
"""
dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below
image = utils.read_image(dataset_dict["file_name"], format=self.img_format)
utils.check_image_size(dataset_dict, image)
if self.crop_gen is None:
image, transforms = T.apply_transform_gens(self.tfm_gens, image)
else:
if np.random.rand() > 0.5:
image, transforms = T.apply_transform_gens(self.tfm_gens, image)
else:
image, transforms = T.apply_transform_gens(
self.tfm_gens[:-1] + self.crop_gen + self.tfm_gens[-1:], image
)
image_shape = image.shape[:2] # h, w
# Pytorch's dataloader is efficient on torch.Tensor due to shared-memory,
# but not efficient on large generic data structures due to the use of pickle & mp.Queue.
# Therefore it's important to use torch.Tensor.
dataset_dict["image"] = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1)))
if not self.is_train:
# USER: Modify this if you want to keep them for some reason.
dataset_dict.pop("annotations", None)
return dataset_dict
if "annotations" in dataset_dict:
# USER: Modify this if you want to keep them for some reason.
for anno in dataset_dict["annotations"]:
if not self.mask_on:
anno.pop("segmentation", None)
anno.pop("keypoints", None)
# USER: Implement additional transformations if you have other types of data
annos = [
utils.transform_instance_annotations(obj, transforms, image_shape)
for obj in dataset_dict.pop("annotations")
if obj.get("iscrowd", 0) == 0
]
instances = utils.annotations_to_instances(annos, image_shape)
dataset_dict["instances"] = utils.filter_empty_instances(instances)
return dataset_dict
| 4,570 | 36.162602 | 111 | py |
pmb-nll | pmb-nll-main/src/detr/util/plot_utils.py | """
Plotting utilities to visualize training logs.
"""
import torch
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from pathlib import Path, PurePath
def plot_logs(logs, fields=('class_error', 'loss_bbox_unscaled', 'mAP'), ewm_col=0, log_name='log.txt'):
'''
Function to plot specific fields from training log(s). Plots both training and test results.
:: Inputs - logs = list containing Path objects, each pointing to individual dir with a log file
- fields = which results to plot from each log file - plots both training and test for each field.
- ewm_col = optional, which column to use as the exponential weighted smoothing of the plots
- log_name = optional, name of log file if different than default 'log.txt'.
:: Outputs - matplotlib plots of results in fields, color coded for each log file.
- solid lines are training results, dashed lines are test results.
'''
func_name = "plot_utils.py::plot_logs"
# verify logs is a list of Paths (list[Paths]) or single Pathlib object Path,
# convert single Path to list to avoid 'not iterable' error
if not isinstance(logs, list):
if isinstance(logs, PurePath):
logs = [logs]
print(f"{func_name} info: logs param expects a list argument, converted to list[Path].")
else:
raise ValueError(f"{func_name} - invalid argument for logs parameter.\n \
Expect list[Path] or single Path obj, received {type(logs)}")
# Quality checks - verify valid dir(s), that every item in list is Path object, and that log_name exists in each dir
for i, dir in enumerate(logs):
if not isinstance(dir, PurePath):
raise ValueError(f"{func_name} - non-Path object in logs argument of {type(dir)}: \n{dir}")
if not dir.exists():
raise ValueError(f"{func_name} - invalid directory in logs argument:\n{dir}")
# verify log_name exists
fn = Path(dir / log_name)
if not fn.exists():
print(f"-> missing {log_name}. Have you gotten to Epoch 1 in training?")
print(f"--> full path of missing log file: {fn}")
return
# load log file(s) and plot
dfs = [pd.read_json(Path(p) / log_name, lines=True) for p in logs]
fig, axs = plt.subplots(ncols=len(fields), figsize=(16, 5))
for df, color in zip(dfs, sns.color_palette(n_colors=len(logs))):
for j, field in enumerate(fields):
if field == 'mAP':
coco_eval = pd.DataFrame(
np.stack(df.test_coco_eval_bbox.dropna().values)[:, 1]
).ewm(com=ewm_col).mean()
axs[j].plot(coco_eval, c=color)
else:
df.interpolate().ewm(com=ewm_col).mean().plot(
y=[f'train_{field}', f'test_{field}'],
ax=axs[j],
color=[color] * 2,
style=['-', '--']
)
for ax, field in zip(axs, fields):
ax.legend([Path(p).name for p in logs])
ax.set_title(field)
def plot_precision_recall(files, naming_scheme='iter'):
if naming_scheme == 'exp_id':
# name becomes exp_id
names = [f.parts[-3] for f in files]
elif naming_scheme == 'iter':
names = [f.stem for f in files]
else:
raise ValueError(f'not supported {naming_scheme}')
fig, axs = plt.subplots(ncols=2, figsize=(16, 5))
for f, color, name in zip(files, sns.color_palette("Blues", n_colors=len(files)), names):
data = torch.load(f)
# precision is n_iou, n_points, n_cat, n_area, max_det
precision = data['precision']
recall = data['params'].recThrs
scores = data['scores']
# take precision for all classes, all areas and 100 detections
precision = precision[0, :, :, 0, -1].mean(1)
scores = scores[0, :, :, 0, -1].mean(1)
prec = precision.mean()
rec = data['recall'][0, :, 0, -1].mean()
print(f'{naming_scheme} {name}: mAP@50={prec * 100: 05.1f}, ' +
f'score={scores.mean():0.3f}, ' +
f'f1={2 * prec * rec / (prec + rec + 1e-8):0.3f}'
)
axs[0].plot(recall, precision, c=color)
axs[1].plot(recall, scores, c=color)
axs[0].set_title('Precision / Recall')
axs[0].legend(names)
axs[1].set_title('Scores / Recall')
axs[1].legend(names)
return fig, axs
| 4,514 | 40.805556 | 120 | py |
pmb-nll | pmb-nll-main/src/detr/util/misc.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
Misc functions, including distributed helpers.
Mostly copy-paste from torchvision references.
"""
import os
import subprocess
import time
from collections import defaultdict, deque
import datetime
import pickle
from typing import Optional, List
import torch
import torch.distributed as dist
from torch import Tensor
# needed due to empty tensor bug in pytorch and torchvision 0.5
import torchvision
if float(torchvision.__version__.split(".")[1]) < 7.0:
from torchvision.ops import _new_empty_tensor
from torchvision.ops.misc import _output_size
class SmoothedValue(object):
"""Track a series of values and provide access to smoothed values over a
window or the global series average.
"""
def __init__(self, window_size=20, fmt=None):
if fmt is None:
fmt = "{median:.4f} ({global_avg:.4f})"
self.deque = deque(maxlen=window_size)
self.total = 0.0
self.count = 0
self.fmt = fmt
def update(self, value, n=1):
self.deque.append(value)
self.count += n
self.total += value * n
def synchronize_between_processes(self):
"""
Warning: does not synchronize the deque!
"""
if not is_dist_avail_and_initialized():
return
t = torch.tensor([self.count, self.total], dtype=torch.float64, device='cuda')
dist.barrier()
dist.all_reduce(t)
t = t.tolist()
self.count = int(t[0])
self.total = t[1]
@property
def median(self):
d = torch.tensor(list(self.deque))
return d.median().item()
@property
def avg(self):
d = torch.tensor(list(self.deque), dtype=torch.float32)
return d.mean().item()
@property
def global_avg(self):
return self.total / self.count
@property
def max(self):
return max(self.deque)
@property
def value(self):
return self.deque[-1]
def __str__(self):
return self.fmt.format(
median=self.median,
avg=self.avg,
global_avg=self.global_avg,
max=self.max,
value=self.value)
def all_gather(data):
"""
Run all_gather on arbitrary picklable data (not necessarily tensors)
Args:
data: any picklable object
Returns:
list[data]: list of data gathered from each rank
"""
world_size = get_world_size()
if world_size == 1:
return [data]
# serialized to a Tensor
buffer = pickle.dumps(data)
storage = torch.ByteStorage.from_buffer(buffer)
tensor = torch.ByteTensor(storage).to("cuda")
# obtain Tensor size of each rank
local_size = torch.tensor([tensor.numel()], device="cuda")
size_list = [torch.tensor([0], device="cuda") for _ in range(world_size)]
dist.all_gather(size_list, local_size)
size_list = [int(size.item()) for size in size_list]
max_size = max(size_list)
# receiving Tensor from all ranks
# we pad the tensor because torch all_gather does not support
# gathering tensors of different shapes
tensor_list = []
for _ in size_list:
tensor_list.append(torch.empty((max_size,), dtype=torch.uint8, device="cuda"))
if local_size != max_size:
padding = torch.empty(size=(max_size - local_size,), dtype=torch.uint8, device="cuda")
tensor = torch.cat((tensor, padding), dim=0)
dist.all_gather(tensor_list, tensor)
data_list = []
for size, tensor in zip(size_list, tensor_list):
buffer = tensor.cpu().numpy().tobytes()[:size]
data_list.append(pickle.loads(buffer))
return data_list
def reduce_dict(input_dict, average=True):
"""
Args:
input_dict (dict): all the values will be reduced
average (bool): whether to do average or sum
Reduce the values in the dictionary from all processes so that all processes
have the averaged results. Returns a dict with the same fields as
input_dict, after reduction.
"""
world_size = get_world_size()
if world_size < 2:
return input_dict
with torch.no_grad():
names = []
values = []
# sort the keys so that they are consistent across processes
for k in sorted(input_dict.keys()):
names.append(k)
values.append(input_dict[k])
values = torch.stack(values, dim=0)
dist.all_reduce(values)
if average:
values /= world_size
reduced_dict = {k: v for k, v in zip(names, values)}
return reduced_dict
class MetricLogger(object):
def __init__(self, delimiter="\t"):
self.meters = defaultdict(SmoothedValue)
self.delimiter = delimiter
def update(self, **kwargs):
for k, v in kwargs.items():
if isinstance(v, torch.Tensor):
v = v.item()
assert isinstance(v, (float, int))
self.meters[k].update(v)
def __getattr__(self, attr):
if attr in self.meters:
return self.meters[attr]
if attr in self.__dict__:
return self.__dict__[attr]
raise AttributeError("'{}' object has no attribute '{}'".format(
type(self).__name__, attr))
def __str__(self):
loss_str = []
for name, meter in self.meters.items():
loss_str.append(
"{}: {}".format(name, str(meter))
)
return self.delimiter.join(loss_str)
def synchronize_between_processes(self):
for meter in self.meters.values():
meter.synchronize_between_processes()
def add_meter(self, name, meter):
self.meters[name] = meter
def log_every(self, iterable, print_freq, header=None):
i = 0
if not header:
header = ''
start_time = time.time()
end = time.time()
iter_time = SmoothedValue(fmt='{avg:.4f}')
data_time = SmoothedValue(fmt='{avg:.4f}')
space_fmt = ':' + str(len(str(len(iterable)))) + 'd'
if torch.cuda.is_available():
log_msg = self.delimiter.join([
header,
'[{0' + space_fmt + '}/{1}]',
'eta: {eta}',
'{meters}',
'time: {time}',
'data: {data}',
'max mem: {memory:.0f}'
])
else:
log_msg = self.delimiter.join([
header,
'[{0' + space_fmt + '}/{1}]',
'eta: {eta}',
'{meters}',
'time: {time}',
'data: {data}'
])
MB = 1024.0 * 1024.0
for obj in iterable:
data_time.update(time.time() - end)
yield obj
iter_time.update(time.time() - end)
if i % print_freq == 0 or i == len(iterable) - 1:
eta_seconds = iter_time.global_avg * (len(iterable) - i)
eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))
if torch.cuda.is_available():
print(log_msg.format(
i, len(iterable), eta=eta_string,
meters=str(self),
time=str(iter_time), data=str(data_time),
memory=torch.cuda.max_memory_allocated() / MB))
else:
print(log_msg.format(
i, len(iterable), eta=eta_string,
meters=str(self),
time=str(iter_time), data=str(data_time)))
i += 1
end = time.time()
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('{} Total time: {} ({:.4f} s / it)'.format(
header, total_time_str, total_time / len(iterable)))
def get_sha():
cwd = os.path.dirname(os.path.abspath(__file__))
def _run(command):
return subprocess.check_output(command, cwd=cwd).decode('ascii').strip()
sha = 'N/A'
diff = "clean"
branch = 'N/A'
try:
sha = _run(['git', 'rev-parse', 'HEAD'])
subprocess.check_output(['git', 'diff'], cwd=cwd)
diff = _run(['git', 'diff-index', 'HEAD'])
diff = "has uncommited changes" if diff else "clean"
branch = _run(['git', 'rev-parse', '--abbrev-ref', 'HEAD'])
except Exception:
pass
message = f"sha: {sha}, status: {diff}, branch: {branch}"
return message
def collate_fn(batch):
batch = list(zip(*batch))
batch[0] = nested_tensor_from_tensor_list(batch[0])
return tuple(batch)
def _max_by_axis(the_list):
# type: (List[List[int]]) -> List[int]
maxes = the_list[0]
for sublist in the_list[1:]:
for index, item in enumerate(sublist):
maxes[index] = max(maxes[index], item)
return maxes
class NestedTensor(object):
def __init__(self, tensors, mask: Optional[Tensor]):
self.tensors = tensors
self.mask = mask
def to(self, device):
# type: (Device) -> NestedTensor # noqa
cast_tensor = self.tensors.to(device)
mask = self.mask
if mask is not None:
assert mask is not None
cast_mask = mask.to(device)
else:
cast_mask = None
return NestedTensor(cast_tensor, cast_mask)
def decompose(self):
return self.tensors, self.mask
def __repr__(self):
return str(self.tensors)
def nested_tensor_from_tensor_list(tensor_list: List[Tensor]):
# TODO make this more general
if tensor_list[0].ndim == 3:
if torchvision._is_tracing():
# nested_tensor_from_tensor_list() does not export well to ONNX
# call _onnx_nested_tensor_from_tensor_list() instead
return _onnx_nested_tensor_from_tensor_list(tensor_list)
# TODO make it support different-sized images
max_size = _max_by_axis([list(img.shape) for img in tensor_list])
# min_size = tuple(min(s) for s in zip(*[img.shape for img in tensor_list]))
batch_shape = [len(tensor_list)] + max_size
b, c, h, w = batch_shape
dtype = tensor_list[0].dtype
device = tensor_list[0].device
tensor = torch.zeros(batch_shape, dtype=dtype, device=device)
mask = torch.ones((b, h, w), dtype=torch.bool, device=device)
for img, pad_img, m in zip(tensor_list, tensor, mask):
pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img)
m[: img.shape[1], :img.shape[2]] = False
else:
raise ValueError('not supported')
return NestedTensor(tensor, mask)
# _onnx_nested_tensor_from_tensor_list() is an implementation of
# nested_tensor_from_tensor_list() that is supported by ONNX tracing.
@torch.jit.unused
def _onnx_nested_tensor_from_tensor_list(tensor_list: List[Tensor]) -> NestedTensor:
max_size = []
for i in range(tensor_list[0].dim()):
max_size_i = torch.max(torch.stack([img.shape[i] for img in tensor_list]).to(torch.float32)).to(torch.int64)
max_size.append(max_size_i)
max_size = tuple(max_size)
# work around for
# pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img)
# m[: img.shape[1], :img.shape[2]] = False
# which is not yet supported in onnx
padded_imgs = []
padded_masks = []
for img in tensor_list:
padding = [(s1 - s2) for s1, s2 in zip(max_size, tuple(img.shape))]
padded_img = torch.nn.functional.pad(img, (0, padding[2], 0, padding[1], 0, padding[0]))
padded_imgs.append(padded_img)
m = torch.zeros_like(img[0], dtype=torch.int, device=img.device)
padded_mask = torch.nn.functional.pad(m, (0, padding[2], 0, padding[1]), "constant", 1)
padded_masks.append(padded_mask.to(torch.bool))
tensor = torch.stack(padded_imgs)
mask = torch.stack(padded_masks)
return NestedTensor(tensor, mask=mask)
def setup_for_distributed(is_master):
"""
This function disables printing when not in master process
"""
import builtins as __builtin__
builtin_print = __builtin__.print
def print(*args, **kwargs):
force = kwargs.pop('force', False)
if is_master or force:
builtin_print(*args, **kwargs)
__builtin__.print = print
def is_dist_avail_and_initialized():
if not dist.is_available():
return False
if not dist.is_initialized():
return False
return True
def get_world_size():
if not is_dist_avail_and_initialized():
return 1
return dist.get_world_size()
def get_rank():
if not is_dist_avail_and_initialized():
return 0
return dist.get_rank()
def is_main_process():
return get_rank() == 0
def save_on_master(*args, **kwargs):
if is_main_process():
torch.save(*args, **kwargs)
def init_distributed_mode(args):
if 'RANK' in os.environ and 'WORLD_SIZE' in os.environ:
args.rank = int(os.environ["RANK"])
args.world_size = int(os.environ['WORLD_SIZE'])
args.gpu = int(os.environ['LOCAL_RANK'])
elif 'SLURM_PROCID' in os.environ:
args.rank = int(os.environ['SLURM_PROCID'])
args.gpu = args.rank % torch.cuda.device_count()
else:
print('Not using distributed mode')
args.distributed = False
return
args.distributed = True
torch.cuda.set_device(args.gpu)
args.dist_backend = 'nccl'
print('| distributed init (rank {}): {}'.format(
args.rank, args.dist_url), flush=True)
torch.distributed.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
world_size=args.world_size, rank=args.rank)
torch.distributed.barrier()
setup_for_distributed(args.rank == 0)
@torch.no_grad()
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
if target.numel() == 0:
return [torch.zeros([], device=output.device)]
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def interpolate(input, size=None, scale_factor=None, mode="nearest", align_corners=None):
# type: (Tensor, Optional[List[int]], Optional[float], str, Optional[bool]) -> Tensor
"""
Equivalent to nn.functional.interpolate, but with support for empty batch sizes.
This will eventually be supported natively by PyTorch, and this
class can go away.
"""
if float(torchvision.__version__.split(".")[1]) < 7.0:
if input.numel() > 0:
return torch.nn.functional.interpolate(
input, size, scale_factor, mode, align_corners
)
output_shape = _output_size(2, input, size, scale_factor)
output_shape = list(input.shape[:-2]) + list(output_shape)
return _new_empty_tensor(input, output_shape)
else:
return torchvision.ops.misc.interpolate(input, size, scale_factor, mode, align_corners)
| 15,304 | 31.702991 | 116 | py |
pmb-nll | pmb-nll-main/src/detr/util/box_ops.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
Utilities for bounding box manipulation and GIoU.
"""
import torch
from torchvision.ops.boxes import box_area
def box_cxcywh_to_xyxy(x):
x_c, y_c, w, h = x.unbind(-1)
b = [(x_c - 0.5 * w), (y_c - 0.5 * h),
(x_c + 0.5 * w), (y_c + 0.5 * h)]
return torch.stack(b, dim=-1)
def box_xyxy_to_cxcywh(x):
x0, y0, x1, y1 = x.unbind(-1)
b = [(x0 + x1) / 2, (y0 + y1) / 2,
(x1 - x0), (y1 - y0)]
return torch.stack(b, dim=-1)
# modified from torchvision to also return the union
def box_iou(boxes1, boxes2):
area1 = box_area(boxes1)
area2 = box_area(boxes2)
lt = torch.max(boxes1[:, None, :2], boxes2[:, :2]) # [N,M,2]
rb = torch.min(boxes1[:, None, 2:], boxes2[:, 2:]) # [N,M,2]
wh = (rb - lt).clamp(min=0) # [N,M,2]
inter = wh[:, :, 0] * wh[:, :, 1] # [N,M]
union = area1[:, None] + area2 - inter
iou = inter / union
return iou, union
def generalized_box_iou(boxes1, boxes2):
"""
Generalized IoU from https://giou.stanford.edu/
The boxes should be in [x0, y0, x1, y1] format
Returns a [N, M] pairwise matrix, where N = len(boxes1)
and M = len(boxes2)
"""
# degenerate boxes gives inf / nan results
# so do an early check
assert (boxes1[:, 2:] >= boxes1[:, :2]).all()
assert (boxes2[:, 2:] >= boxes2[:, :2]).all()
iou, union = box_iou(boxes1, boxes2)
lt = torch.min(boxes1[:, None, :2], boxes2[:, :2])
rb = torch.max(boxes1[:, None, 2:], boxes2[:, 2:])
wh = (rb - lt).clamp(min=0) # [N,M,2]
area = wh[:, :, 0] * wh[:, :, 1]
return iou - (area - union) / area
def masks_to_boxes(masks):
"""Compute the bounding boxes around the provided masks
The masks should be in format [N, H, W] where N is the number of masks, (H, W) are the spatial dimensions.
Returns a [N, 4] tensors, with the boxes in xyxy format
"""
if masks.numel() == 0:
return torch.zeros((0, 4), device=masks.device)
h, w = masks.shape[-2:]
y = torch.arange(0, h, dtype=torch.float)
x = torch.arange(0, w, dtype=torch.float)
y, x = torch.meshgrid(y, x)
x_mask = (masks * x.unsqueeze(0))
x_max = x_mask.flatten(1).max(-1)[0]
x_min = x_mask.masked_fill(~(masks.bool()), 1e8).flatten(1).min(-1)[0]
y_mask = (masks * y.unsqueeze(0))
y_max = y_mask.flatten(1).max(-1)[0]
y_min = y_mask.masked_fill(~(masks.bool()), 1e8).flatten(1).min(-1)[0]
return torch.stack([x_min, y_min, x_max, y_max], 1)
| 2,561 | 27.786517 | 110 | py |
pmb-nll | pmb-nll-main/src/detr/datasets/__init__.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import torch.utils.data
import torchvision
from .coco import build as build_coco
def get_coco_api_from_dataset(dataset):
for _ in range(10):
# if isinstance(dataset, torchvision.datasets.CocoDetection):
# break
if isinstance(dataset, torch.utils.data.Subset):
dataset = dataset.dataset
if isinstance(dataset, torchvision.datasets.CocoDetection):
return dataset.coco
def build_dataset(image_set, args):
if args.dataset_file == 'coco':
return build_coco(image_set, args)
if args.dataset_file == 'coco_panoptic':
# to avoid making panopticapi required for coco
from .coco_panoptic import build as build_coco_panoptic
return build_coco_panoptic(image_set, args)
raise ValueError(f'dataset {args.dataset_file} not supported')
| 897 | 33.538462 | 70 | py |
pmb-nll | pmb-nll-main/src/detr/datasets/coco_eval.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
COCO evaluator that works in distributed mode.
Mostly copy-paste from https://github.com/pytorch/vision/blob/edfd5a7/references/detection/coco_eval.py
The difference is that there is less copy-pasting from pycocotools
in the end of the file, as python3 can suppress prints with contextlib
"""
import os
import contextlib
import copy
import numpy as np
import torch
from pycocotools.cocoeval import COCOeval
from pycocotools.coco import COCO
import pycocotools.mask as mask_util
from util.misc import all_gather
class CocoEvaluator(object):
def __init__(self, coco_gt, iou_types):
assert isinstance(iou_types, (list, tuple))
coco_gt = copy.deepcopy(coco_gt)
self.coco_gt = coco_gt
self.iou_types = iou_types
self.coco_eval = {}
for iou_type in iou_types:
self.coco_eval[iou_type] = COCOeval(coco_gt, iouType=iou_type)
self.img_ids = []
self.eval_imgs = {k: [] for k in iou_types}
def update(self, predictions):
img_ids = list(np.unique(list(predictions.keys())))
self.img_ids.extend(img_ids)
for iou_type in self.iou_types:
results = self.prepare(predictions, iou_type)
# suppress pycocotools prints
with open(os.devnull, 'w') as devnull:
with contextlib.redirect_stdout(devnull):
coco_dt = COCO.loadRes(self.coco_gt, results) if results else COCO()
coco_eval = self.coco_eval[iou_type]
coco_eval.cocoDt = coco_dt
coco_eval.params.imgIds = list(img_ids)
img_ids, eval_imgs = evaluate(coco_eval)
self.eval_imgs[iou_type].append(eval_imgs)
def synchronize_between_processes(self):
for iou_type in self.iou_types:
self.eval_imgs[iou_type] = np.concatenate(self.eval_imgs[iou_type], 2)
create_common_coco_eval(self.coco_eval[iou_type], self.img_ids, self.eval_imgs[iou_type])
def accumulate(self):
for coco_eval in self.coco_eval.values():
coco_eval.accumulate()
def summarize(self):
for iou_type, coco_eval in self.coco_eval.items():
print("IoU metric: {}".format(iou_type))
coco_eval.summarize()
def prepare(self, predictions, iou_type):
if iou_type == "bbox":
return self.prepare_for_coco_detection(predictions)
elif iou_type == "segm":
return self.prepare_for_coco_segmentation(predictions)
elif iou_type == "keypoints":
return self.prepare_for_coco_keypoint(predictions)
else:
raise ValueError("Unknown iou type {}".format(iou_type))
def prepare_for_coco_detection(self, predictions):
coco_results = []
for original_id, prediction in predictions.items():
if len(prediction) == 0:
continue
boxes = prediction["boxes"]
boxes = convert_to_xywh(boxes).tolist()
scores = prediction["scores"].tolist()
labels = prediction["labels"].tolist()
coco_results.extend(
[
{
"image_id": original_id,
"category_id": labels[k],
"bbox": box,
"score": scores[k],
}
for k, box in enumerate(boxes)
]
)
return coco_results
def prepare_for_coco_segmentation(self, predictions):
coco_results = []
for original_id, prediction in predictions.items():
if len(prediction) == 0:
continue
scores = prediction["scores"]
labels = prediction["labels"]
masks = prediction["masks"]
masks = masks > 0.5
scores = prediction["scores"].tolist()
labels = prediction["labels"].tolist()
rles = [
mask_util.encode(np.array(mask[0, :, :, np.newaxis], dtype=np.uint8, order="F"))[0]
for mask in masks
]
for rle in rles:
rle["counts"] = rle["counts"].decode("utf-8")
coco_results.extend(
[
{
"image_id": original_id,
"category_id": labels[k],
"segmentation": rle,
"score": scores[k],
}
for k, rle in enumerate(rles)
]
)
return coco_results
def prepare_for_coco_keypoint(self, predictions):
coco_results = []
for original_id, prediction in predictions.items():
if len(prediction) == 0:
continue
boxes = prediction["boxes"]
boxes = convert_to_xywh(boxes).tolist()
scores = prediction["scores"].tolist()
labels = prediction["labels"].tolist()
keypoints = prediction["keypoints"]
keypoints = keypoints.flatten(start_dim=1).tolist()
coco_results.extend(
[
{
"image_id": original_id,
"category_id": labels[k],
'keypoints': keypoint,
"score": scores[k],
}
for k, keypoint in enumerate(keypoints)
]
)
return coco_results
def convert_to_xywh(boxes):
xmin, ymin, xmax, ymax = boxes.unbind(1)
return torch.stack((xmin, ymin, xmax - xmin, ymax - ymin), dim=1)
def merge(img_ids, eval_imgs):
all_img_ids = all_gather(img_ids)
all_eval_imgs = all_gather(eval_imgs)
merged_img_ids = []
for p in all_img_ids:
merged_img_ids.extend(p)
merged_eval_imgs = []
for p in all_eval_imgs:
merged_eval_imgs.append(p)
merged_img_ids = np.array(merged_img_ids)
merged_eval_imgs = np.concatenate(merged_eval_imgs, 2)
# keep only unique (and in sorted order) images
merged_img_ids, idx = np.unique(merged_img_ids, return_index=True)
merged_eval_imgs = merged_eval_imgs[..., idx]
return merged_img_ids, merged_eval_imgs
def create_common_coco_eval(coco_eval, img_ids, eval_imgs):
img_ids, eval_imgs = merge(img_ids, eval_imgs)
img_ids = list(img_ids)
eval_imgs = list(eval_imgs.flatten())
coco_eval.evalImgs = eval_imgs
coco_eval.params.imgIds = img_ids
coco_eval._paramsEval = copy.deepcopy(coco_eval.params)
#################################################################
# From pycocotools, just removed the prints and fixed
# a Python3 bug about unicode not defined
#################################################################
def evaluate(self):
'''
Run per image evaluation on given images and store results (a list of dict) in self.evalImgs
:return: None
'''
# tic = time.time()
# print('Running per image evaluation...')
p = self.params
# add backward compatibility if useSegm is specified in params
if p.useSegm is not None:
p.iouType = 'segm' if p.useSegm == 1 else 'bbox'
print('useSegm (deprecated) is not None. Running {} evaluation'.format(p.iouType))
# print('Evaluate annotation type *{}*'.format(p.iouType))
p.imgIds = list(np.unique(p.imgIds))
if p.useCats:
p.catIds = list(np.unique(p.catIds))
p.maxDets = sorted(p.maxDets)
self.params = p
self._prepare()
# loop through images, area range, max detection number
catIds = p.catIds if p.useCats else [-1]
if p.iouType == 'segm' or p.iouType == 'bbox':
computeIoU = self.computeIoU
elif p.iouType == 'keypoints':
computeIoU = self.computeOks
self.ious = {
(imgId, catId): computeIoU(imgId, catId)
for imgId in p.imgIds
for catId in catIds}
evaluateImg = self.evaluateImg
maxDet = p.maxDets[-1]
evalImgs = [
evaluateImg(imgId, catId, areaRng, maxDet)
for catId in catIds
for areaRng in p.areaRng
for imgId in p.imgIds
]
# this is NOT in the pycocotools code, but could be done outside
evalImgs = np.asarray(evalImgs).reshape(len(catIds), len(p.areaRng), len(p.imgIds))
self._paramsEval = copy.deepcopy(self.params)
# toc = time.time()
# print('DONE (t={:0.2f}s).'.format(toc-tic))
return p.imgIds, evalImgs
#################################################################
# end of straight copy from pycocotools, just removing the prints
#################################################################
| 8,735 | 32.860465 | 103 | py |
pmb-nll | pmb-nll-main/src/detr/datasets/coco_panoptic.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import json
from pathlib import Path
import numpy as np
import torch
from PIL import Image
from panopticapi.utils import rgb2id
from util.box_ops import masks_to_boxes
from .coco import make_coco_transforms
class CocoPanoptic:
def __init__(self, img_folder, ann_folder, ann_file, transforms=None, return_masks=True):
with open(ann_file, 'r') as f:
self.coco = json.load(f)
# sort 'images' field so that they are aligned with 'annotations'
# i.e., in alphabetical order
self.coco['images'] = sorted(self.coco['images'], key=lambda x: x['id'])
# sanity check
if "annotations" in self.coco:
for img, ann in zip(self.coco['images'], self.coco['annotations']):
assert img['file_name'][:-4] == ann['file_name'][:-4]
self.img_folder = img_folder
self.ann_folder = ann_folder
self.ann_file = ann_file
self.transforms = transforms
self.return_masks = return_masks
def __getitem__(self, idx):
ann_info = self.coco['annotations'][idx] if "annotations" in self.coco else self.coco['images'][idx]
img_path = Path(self.img_folder) / ann_info['file_name'].replace('.png', '.jpg')
ann_path = Path(self.ann_folder) / ann_info['file_name']
img = Image.open(img_path).convert('RGB')
w, h = img.size
if "segments_info" in ann_info:
masks = np.asarray(Image.open(ann_path), dtype=np.uint32)
masks = rgb2id(masks)
ids = np.array([ann['id'] for ann in ann_info['segments_info']])
masks = masks == ids[:, None, None]
masks = torch.as_tensor(masks, dtype=torch.uint8)
labels = torch.tensor([ann['category_id'] for ann in ann_info['segments_info']], dtype=torch.int64)
target = {}
target['image_id'] = torch.tensor([ann_info['image_id'] if "image_id" in ann_info else ann_info["id"]])
if self.return_masks:
target['masks'] = masks
target['labels'] = labels
target["boxes"] = masks_to_boxes(masks)
target['size'] = torch.as_tensor([int(h), int(w)])
target['orig_size'] = torch.as_tensor([int(h), int(w)])
if "segments_info" in ann_info:
for name in ['iscrowd', 'area']:
target[name] = torch.tensor([ann[name] for ann in ann_info['segments_info']])
if self.transforms is not None:
img, target = self.transforms(img, target)
return img, target
def __len__(self):
return len(self.coco['images'])
def get_height_and_width(self, idx):
img_info = self.coco['images'][idx]
height = img_info['height']
width = img_info['width']
return height, width
def build(image_set, args):
img_folder_root = Path(args.coco_path)
ann_folder_root = Path(args.coco_panoptic_path)
assert img_folder_root.exists(), f'provided COCO path {img_folder_root} does not exist'
assert ann_folder_root.exists(), f'provided COCO path {ann_folder_root} does not exist'
mode = 'panoptic'
PATHS = {
"train": ("train2017", Path("annotations") / f'{mode}_train2017.json'),
"val": ("val2017", Path("annotations") / f'{mode}_val2017.json'),
}
img_folder, ann_file = PATHS[image_set]
img_folder_path = img_folder_root / img_folder
ann_folder = ann_folder_root / f'{mode}_{img_folder}'
ann_file = ann_folder_root / ann_file
dataset = CocoPanoptic(img_folder_path, ann_folder, ann_file,
transforms=make_coco_transforms(image_set), return_masks=args.masks)
return dataset
| 3,723 | 36.24 | 111 | py |
pmb-nll | pmb-nll-main/src/detr/datasets/coco.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
COCO dataset which returns image_id for evaluation.
Mostly copy-paste from https://github.com/pytorch/vision/blob/13b35ff/references/detection/coco_utils.py
"""
from pathlib import Path
import torch
import torch.utils.data
import torchvision
from pycocotools import mask as coco_mask
import datasets.transforms as T
class CocoDetection(torchvision.datasets.CocoDetection):
def __init__(self, img_folder, ann_file, transforms, return_masks):
super(CocoDetection, self).__init__(img_folder, ann_file)
self._transforms = transforms
self.prepare = ConvertCocoPolysToMask(return_masks)
def __getitem__(self, idx):
img, target = super(CocoDetection, self).__getitem__(idx)
image_id = self.ids[idx]
target = {'image_id': image_id, 'annotations': target}
img, target = self.prepare(img, target)
if self._transforms is not None:
img, target = self._transforms(img, target)
return img, target
def convert_coco_poly_to_mask(segmentations, height, width):
masks = []
for polygons in segmentations:
rles = coco_mask.frPyObjects(polygons, height, width)
mask = coco_mask.decode(rles)
if len(mask.shape) < 3:
mask = mask[..., None]
mask = torch.as_tensor(mask, dtype=torch.uint8)
mask = mask.any(dim=2)
masks.append(mask)
if masks:
masks = torch.stack(masks, dim=0)
else:
masks = torch.zeros((0, height, width), dtype=torch.uint8)
return masks
class ConvertCocoPolysToMask(object):
def __init__(self, return_masks=False):
self.return_masks = return_masks
def __call__(self, image, target):
w, h = image.size
image_id = target["image_id"]
image_id = torch.tensor([image_id])
anno = target["annotations"]
anno = [obj for obj in anno if 'iscrowd' not in obj or obj['iscrowd'] == 0]
boxes = [obj["bbox"] for obj in anno]
# guard against no boxes via resizing
boxes = torch.as_tensor(boxes, dtype=torch.float32).reshape(-1, 4)
boxes[:, 2:] += boxes[:, :2]
boxes[:, 0::2].clamp_(min=0, max=w)
boxes[:, 1::2].clamp_(min=0, max=h)
classes = [obj["category_id"] for obj in anno]
classes = torch.tensor(classes, dtype=torch.int64)
if self.return_masks:
segmentations = [obj["segmentation"] for obj in anno]
masks = convert_coco_poly_to_mask(segmentations, h, w)
keypoints = None
if anno and "keypoints" in anno[0]:
keypoints = [obj["keypoints"] for obj in anno]
keypoints = torch.as_tensor(keypoints, dtype=torch.float32)
num_keypoints = keypoints.shape[0]
if num_keypoints:
keypoints = keypoints.view(num_keypoints, -1, 3)
keep = (boxes[:, 3] > boxes[:, 1]) & (boxes[:, 2] > boxes[:, 0])
boxes = boxes[keep]
classes = classes[keep]
if self.return_masks:
masks = masks[keep]
if keypoints is not None:
keypoints = keypoints[keep]
target = {}
target["boxes"] = boxes
target["labels"] = classes
if self.return_masks:
target["masks"] = masks
target["image_id"] = image_id
if keypoints is not None:
target["keypoints"] = keypoints
# for conversion to coco api
area = torch.tensor([obj["area"] for obj in anno])
iscrowd = torch.tensor([obj["iscrowd"] if "iscrowd" in obj else 0 for obj in anno])
target["area"] = area[keep]
target["iscrowd"] = iscrowd[keep]
target["orig_size"] = torch.as_tensor([int(h), int(w)])
target["size"] = torch.as_tensor([int(h), int(w)])
return image, target
def make_coco_transforms(image_set):
normalize = T.Compose([
T.ToTensor(),
T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
scales = [480, 512, 544, 576, 608, 640, 672, 704, 736, 768, 800]
if image_set == 'train':
return T.Compose([
T.RandomHorizontalFlip(),
T.RandomSelect(
T.RandomResize(scales, max_size=1333),
T.Compose([
T.RandomResize([400, 500, 600]),
T.RandomSizeCrop(384, 600),
T.RandomResize(scales, max_size=1333),
])
),
normalize,
])
if image_set == 'val':
return T.Compose([
T.RandomResize([800], max_size=1333),
normalize,
])
raise ValueError(f'unknown {image_set}')
def build(image_set, args):
root = Path(args.coco_path)
assert root.exists(), f'provided COCO path {root} does not exist'
mode = 'instances'
PATHS = {
"train": (root / "train2017", root / "annotations" / f'{mode}_train2017.json'),
"val": (root / "val2017", root / "annotations" / f'{mode}_val2017.json'),
}
img_folder, ann_file = PATHS[image_set]
dataset = CocoDetection(img_folder, ann_file, transforms=make_coco_transforms(image_set), return_masks=args.masks)
return dataset
| 5,253 | 32.044025 | 118 | py |
pmb-nll | pmb-nll-main/src/detr/datasets/transforms.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
Transforms and data augmentation for both image + bbox.
"""
import random
import PIL
import torch
import torchvision.transforms as T
import torchvision.transforms.functional as F
from util.box_ops import box_xyxy_to_cxcywh
from util.misc import interpolate
def crop(image, target, region):
cropped_image = F.crop(image, *region)
target = target.copy()
i, j, h, w = region
# should we do something wrt the original size?
target["size"] = torch.tensor([h, w])
fields = ["labels", "area", "iscrowd"]
if "boxes" in target:
boxes = target["boxes"]
max_size = torch.as_tensor([w, h], dtype=torch.float32)
cropped_boxes = boxes - torch.as_tensor([j, i, j, i])
cropped_boxes = torch.min(cropped_boxes.reshape(-1, 2, 2), max_size)
cropped_boxes = cropped_boxes.clamp(min=0)
area = (cropped_boxes[:, 1, :] - cropped_boxes[:, 0, :]).prod(dim=1)
target["boxes"] = cropped_boxes.reshape(-1, 4)
target["area"] = area
fields.append("boxes")
if "masks" in target:
# FIXME should we update the area here if there are no boxes?
target['masks'] = target['masks'][:, i:i + h, j:j + w]
fields.append("masks")
# remove elements for which the boxes or masks that have zero area
if "boxes" in target or "masks" in target:
# favor boxes selection when defining which elements to keep
# this is compatible with previous implementation
if "boxes" in target:
cropped_boxes = target['boxes'].reshape(-1, 2, 2)
keep = torch.all(cropped_boxes[:, 1, :] > cropped_boxes[:, 0, :], dim=1)
else:
keep = target['masks'].flatten(1).any(1)
for field in fields:
target[field] = target[field][keep]
return cropped_image, target
def hflip(image, target):
flipped_image = F.hflip(image)
w, h = image.size
target = target.copy()
if "boxes" in target:
boxes = target["boxes"]
boxes = boxes[:, [2, 1, 0, 3]] * torch.as_tensor([-1, 1, -1, 1]) + torch.as_tensor([w, 0, w, 0])
target["boxes"] = boxes
if "masks" in target:
target['masks'] = target['masks'].flip(-1)
return flipped_image, target
def resize(image, target, size, max_size=None):
# size can be min_size (scalar) or (w, h) tuple
def get_size_with_aspect_ratio(image_size, size, max_size=None):
w, h = image_size
if max_size is not None:
min_original_size = float(min((w, h)))
max_original_size = float(max((w, h)))
if max_original_size / min_original_size * size > max_size:
size = int(round(max_size * min_original_size / max_original_size))
if (w <= h and w == size) or (h <= w and h == size):
return (h, w)
if w < h:
ow = size
oh = int(size * h / w)
else:
oh = size
ow = int(size * w / h)
return (oh, ow)
def get_size(image_size, size, max_size=None):
if isinstance(size, (list, tuple)):
return size[::-1]
else:
return get_size_with_aspect_ratio(image_size, size, max_size)
size = get_size(image.size, size, max_size)
rescaled_image = F.resize(image, size)
if target is None:
return rescaled_image, None
ratios = tuple(float(s) / float(s_orig) for s, s_orig in zip(rescaled_image.size, image.size))
ratio_width, ratio_height = ratios
target = target.copy()
if "boxes" in target:
boxes = target["boxes"]
scaled_boxes = boxes * torch.as_tensor([ratio_width, ratio_height, ratio_width, ratio_height])
target["boxes"] = scaled_boxes
if "area" in target:
area = target["area"]
scaled_area = area * (ratio_width * ratio_height)
target["area"] = scaled_area
h, w = size
target["size"] = torch.tensor([h, w])
if "masks" in target:
target['masks'] = interpolate(
target['masks'][:, None].float(), size, mode="nearest")[:, 0] > 0.5
return rescaled_image, target
def pad(image, target, padding):
# assumes that we only pad on the bottom right corners
padded_image = F.pad(image, (0, 0, padding[0], padding[1]))
if target is None:
return padded_image, None
target = target.copy()
# should we do something wrt the original size?
target["size"] = torch.tensor(padded_image.size[::-1])
if "masks" in target:
target['masks'] = torch.nn.functional.pad(target['masks'], (0, padding[0], 0, padding[1]))
return padded_image, target
class RandomCrop(object):
def __init__(self, size):
self.size = size
def __call__(self, img, target):
region = T.RandomCrop.get_params(img, self.size)
return crop(img, target, region)
class RandomSizeCrop(object):
def __init__(self, min_size: int, max_size: int):
self.min_size = min_size
self.max_size = max_size
def __call__(self, img: PIL.Image.Image, target: dict):
w = random.randint(self.min_size, min(img.width, self.max_size))
h = random.randint(self.min_size, min(img.height, self.max_size))
region = T.RandomCrop.get_params(img, [h, w])
return crop(img, target, region)
class CenterCrop(object):
def __init__(self, size):
self.size = size
def __call__(self, img, target):
image_width, image_height = img.size
crop_height, crop_width = self.size
crop_top = int(round((image_height - crop_height) / 2.))
crop_left = int(round((image_width - crop_width) / 2.))
return crop(img, target, (crop_top, crop_left, crop_height, crop_width))
class RandomHorizontalFlip(object):
def __init__(self, p=0.5):
self.p = p
def __call__(self, img, target):
if random.random() < self.p:
return hflip(img, target)
return img, target
class RandomResize(object):
def __init__(self, sizes, max_size=None):
assert isinstance(sizes, (list, tuple))
self.sizes = sizes
self.max_size = max_size
def __call__(self, img, target=None):
size = random.choice(self.sizes)
return resize(img, target, size, self.max_size)
class RandomPad(object):
def __init__(self, max_pad):
self.max_pad = max_pad
def __call__(self, img, target):
pad_x = random.randint(0, self.max_pad)
pad_y = random.randint(0, self.max_pad)
return pad(img, target, (pad_x, pad_y))
class RandomSelect(object):
"""
Randomly selects between transforms1 and transforms2,
with probability p for transforms1 and (1 - p) for transforms2
"""
def __init__(self, transforms1, transforms2, p=0.5):
self.transforms1 = transforms1
self.transforms2 = transforms2
self.p = p
def __call__(self, img, target):
if random.random() < self.p:
return self.transforms1(img, target)
return self.transforms2(img, target)
class ToTensor(object):
def __call__(self, img, target):
return F.to_tensor(img), target
class RandomErasing(object):
def __init__(self, *args, **kwargs):
self.eraser = T.RandomErasing(*args, **kwargs)
def __call__(self, img, target):
return self.eraser(img), target
class Normalize(object):
def __init__(self, mean, std):
self.mean = mean
self.std = std
def __call__(self, image, target=None):
image = F.normalize(image, mean=self.mean, std=self.std)
if target is None:
return image, None
target = target.copy()
h, w = image.shape[-2:]
if "boxes" in target:
boxes = target["boxes"]
boxes = box_xyxy_to_cxcywh(boxes)
boxes = boxes / torch.tensor([w, h, w, h], dtype=torch.float32)
target["boxes"] = boxes
return image, target
class Compose(object):
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, image, target):
for t in self.transforms:
image, target = t(image, target)
return image, target
def __repr__(self):
format_string = self.__class__.__name__ + "("
for t in self.transforms:
format_string += "\n"
format_string += " {0}".format(t)
format_string += "\n)"
return format_string
| 8,524 | 29.776173 | 104 | py |
pmb-nll | pmb-nll-main/src/offline_evaluation/compute_probabilistic_metrics.py | import json
import os
import pickle
from collections import defaultdict
import numpy as np
import torch
import torch.distributions as distributions
import tqdm
# Project imports
from core.evaluation_tools import evaluation_utils, scoring_rules
from core.evaluation_tools.evaluation_utils import (
calculate_iou,
get_test_thing_dataset_id_to_train_contiguous_id_dict,
)
from core.setup import setup_arg_parser, setup_config
from detectron2.checkpoint import DetectionCheckpointer
# Detectron imports
from detectron2.data import MetadataCatalog
from detectron2.engine import launch
from detectron2.modeling import build_model
from matplotlib import image
from matplotlib import pyplot as plt
from matplotlib.pyplot import hist
from prettytable import PrettyTable
from probabilistic_inference.inference_utils import get_inference_output_dir
from probabilistic_modeling.losses import (
compute_negative_log_likelihood,
negative_log_likelihood,
)
from probabilistic_modeling.modeling_utils import (
PoissonPointProcessGMM,
PoissonPointProcessIntensityFunction,
PoissonPointProcessUniform,
PoissonPointUnion,
)
from scipy.spatial.distance import mahalanobis
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
AREA_LIMITS = {"small": [0, 1024], "medium": [1024, 9216], "large": [9216, np.inf]}
def try_squeeze(to_squeeze, dim):
return to_squeeze.squeeze(dim) if len(to_squeeze.shape) > dim else to_squeeze
def print_nll_results_by_size(
out, gt_boxes, inference_output_dir, area_limits=AREA_LIMITS, prefix=""
):
title_dict = {
"matched_bernoulli_clss": "Matched Bernoulli Classification",
"matched_bernoulli_cls": "Matched Bernoulli Classification",
"matched_bernoulli_reg": "Matched Bernoulli Regression",
"matched_bernoulli_regs": "Matched Bernoulli Regression",
"matched_bernoulli": "Matched Bernoulli",
"matched_bernoullis": "Matched Bernoulli",
"matched_ppp": "Matched PPP",
"matched_ppps": "Matched PPP",
}
def plot_histogram(
size_decomp, decomp_key, area_limits, filepath, max_limit=40, nbins=100
):
plt.clf()
for size in size_decomp.keys():
hist(
np.clip(size_decomp[size][decomp_key], 0, max_limit),
nbins,
alpha=0.33,
label=size,
ec=(0, 0, 0, 0),
lw=0.0,
)
plt.title(title_dict[decomp_key])
plt.legend()
plt.xlim(0, max_limit)
plt.savefig(
os.path.join(filepath, f"{prefix}{decomp_key}.svg"),
format="svg",
transparent=True,
)
size_decomp = {size: defaultdict(list) for size in area_limits.keys()}
for img_id, out_dict in out.items():
boxes = gt_boxes[img_id].reshape(-1, 4)
decomp = out_dict["decomposition"]
# Remove unmatched detections and sort in gt-order instead
association = np.array(out_dict["associations"][0])
if not len(association):
continue
association = association[association[:, 1] > -1]
areas = (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])
num_gts = len(areas)
num_preds = (
decomp["num_unmatched_bernoulli"][0] + decomp["num_matched_bernoulli"][0]
)
ppp_association = association[association[:, 0] >= num_preds]
for size, limit in area_limits.items():
mask = torch.logical_and(limit[0] < areas, limit[1] > areas)
gt_idx = mask.nonzero()
matched_bernoulli_regs = [
comp
for assoc, comp in zip(association, decomp["matched_bernoulli_regs"][0])
if assoc[1] in gt_idx
]
size_decomp[size]["matched_bernoulli_regs"] += matched_bernoulli_regs
size_decomp[size]["matched_bernoulli_reg"] += [sum(matched_bernoulli_regs)]
matched_bernoulli_clss = [
comp
for assoc, comp in zip(association, decomp["matched_bernoulli_clss"][0])
if assoc[1] in gt_idx
]
size_decomp[size]["matched_bernoulli_clss"] += matched_bernoulli_clss
size_decomp[size]["matched_bernoulli_cls"] += [sum(matched_bernoulli_clss)]
size_decomp[size]["matched_bernoullis"] += [
cls_part + reg_part
for cls_part, reg_part in zip(
matched_bernoulli_clss, matched_bernoulli_regs
)
]
size_decomp[size]["matched_bernoulli"] += [
sum(matched_bernoulli_regs) + sum(matched_bernoulli_clss)
]
matched_ppps = [
comp
for assoc, comp in zip(ppp_association, decomp["matched_ppps"][0])
if assoc[1] in gt_idx
]
size_decomp[size]["matched_ppps"] += matched_ppps
size_decomp[size]["matched_ppp"] += [sum(matched_ppps)]
for size, limit in area_limits.items():
print(f"******** Size: {size} ********")
print(
f"Mean matched Bernoulli: {np.mean(size_decomp[size]['matched_bernoulli']):.2f}/",
end="",
)
print(f"{np.mean(size_decomp[size]['matched_bernoullis']):.2f}")
print(
f"Mean matched Bernoulli reg: {np.mean(size_decomp[size]['matched_bernoulli_reg']):.2f}/",
end="",
)
print(f"{np.mean(size_decomp[size]['matched_bernoulli_regs']):.2f}")
print(
f"Mean matched Bernoulli cls: {np.mean(size_decomp[size]['matched_bernoulli_cls']):.2f}/",
end="",
)
print(f"{np.mean(size_decomp[size]['matched_bernoulli_clss']):.2f}")
print(
f"Mean matched PPP: {np.mean(size_decomp[size]['matched_ppp']):.2f}/",
end="",
)
print(f"{np.mean(size_decomp[size]['matched_ppps']):.2f}")
print(f"**************************")
for decomp_key in size_decomp[list(area_limits.keys())[0]]:
plot_histogram(size_decomp, decomp_key, area_limits, inference_output_dir)
def print_nll_results(out):
nlls = torch.tensor([el["nll"] for el in out.values() if el["nll"] > 0])
print("*" * 40)
print("*" * 12 + "PMB NLL results" + "*" * 13)
print("*" * 40)
print(f"Min NLL: {nlls.min().item()}")
print(f"Mean NLL: {nlls.mean().item()}")
print(f"Median NLL: {nlls.median().item()}")
print(f"Max NLL: {nlls.max().item()}")
print(f"Binned NLL: {torch.histc(nlls, bins=20).tolist()}")
print("*" * 40)
matched_bernoulli = []
matched_bernoulli_reg = []
matched_bernoulli_cls = []
num_matched_bernoulli = []
unmatched_bernoulli = []
num_unmatched_bernoulli = []
matched_ppp = []
num_matched_ppp = []
ppp_integral = []
for img_id, out_dict in out.items():
decomp = out_dict["decomposition"]
matched_bernoulli.append(decomp["matched_bernoulli"][0])
matched_bernoulli_reg.append(decomp["matched_bernoulli_reg"][0])
matched_bernoulli_cls.append(decomp["matched_bernoulli_cls"][0])
num_matched_bernoulli.append(decomp["num_matched_bernoulli"][0])
unmatched_bernoulli.append(decomp["unmatched_bernoulli"][0])
num_unmatched_bernoulli.append(decomp["num_unmatched_bernoulli"][0])
matched_ppp.append(decomp["matched_ppp"][0])
num_matched_ppp.append(decomp["num_matched_ppp"][0])
ppp_integral.append(decomp["ppp_integral"])
matched_bernoulli = np.array(matched_bernoulli)
matched_bernoulli_reg = np.array(matched_bernoulli_reg)
matched_bernoulli_cls = np.array(matched_bernoulli_cls)
num_matched_bernoulli = np.array(num_matched_bernoulli)
unmatched_bernoulli = np.array(unmatched_bernoulli)
num_unmatched_bernoulli = np.array(num_unmatched_bernoulli)
matched_ppp = np.array(matched_ppp)
num_matched_ppp = np.array(num_matched_ppp)
num_matched_ppp = num_matched_ppp[matched_ppp < np.inf]
matched_ppp = matched_ppp[matched_ppp < np.inf]
matched_bernoulli_norm = matched_bernoulli.sum() / (num_matched_bernoulli.sum())
matched_bernoulli_reg_norm = matched_bernoulli_reg.sum() / (
num_matched_bernoulli.sum()
)
matched_bernoulli_cls_norm = matched_bernoulli_cls.sum() / (
num_matched_bernoulli.sum()
)
print(f"Mean matched Bernoulli: {np.mean(matched_bernoulli):.2f}/", end="")
print(f"{matched_bernoulli_norm:.2f}")
print(f"Mean matched Bernoulli reg: {np.mean(matched_bernoulli_reg):.2f}/", end="")
print(f"{matched_bernoulli_reg_norm:.2f}")
print(f"Mean matched Bernoulli cls: {np.mean(matched_bernoulli_cls):.2f}/", end="")
print(f"{matched_bernoulli_cls_norm:.2f}")
unmatched_bernoulli_norm = unmatched_bernoulli.sum() / (
num_unmatched_bernoulli.sum()
)
print(f"Mean unmatched Bernoulli: {np.mean(unmatched_bernoulli):.2f}/", end="")
print(f"{unmatched_bernoulli_norm:.2f}")
matched_ppp_norm = matched_ppp.sum() / num_matched_ppp.sum()
print(f"Mean matched PPP: {np.mean(matched_ppp):.2f}/", end="")
print(f"{matched_ppp_norm:.2f}")
print(f"Mean PPP integral: {np.mean(ppp_integral):.2f}")
print("*" * 40)
def plot_nll_results(out, inference_output_dir, prefix=""):
matched_bernoulli = []
matched_bernoulli_reg = []
matched_bernoulli_cls = []
num_matched_bernoulli = []
unmatched_bernoulli = []
num_unmatched_bernoulli = []
matched_ppp = []
num_matched_ppp = []
ppp_integral = []
for img_id, out_dict in out.items():
decomp = out_dict["decomposition"]
matched_bernoulli += [
reg + classification
for reg, classification in zip(
decomp["matched_bernoulli_regs"][0],
decomp["matched_bernoulli_clss"][0],
)
]
matched_bernoulli_reg += decomp["matched_bernoulli_regs"][0]
matched_bernoulli_cls += decomp["matched_bernoulli_clss"][0]
num_matched_bernoulli.append(decomp["num_matched_bernoulli"][0])
unmatched_bernoulli += decomp["unmatched_bernoullis"][0]
num_unmatched_bernoulli.append(decomp["num_unmatched_bernoulli"][0])
matched_ppp += decomp["matched_ppps"][0]
num_matched_ppp.append(decomp["num_matched_ppp"][0])
ppp_integral.append(decomp["ppp_integral"])
plt.figure()
plt.hist(np.clip(matched_bernoulli, 0, 40), 100, ec=(0, 0, 0, 0), lw=0.0)
plt.xlim(0, 40)
plt.title("Matched Bernoulli")
plt.savefig(
os.path.join(inference_output_dir, f"{prefix}matched_bernoulli_histogram.svg"),
format="svg",
transparent=True,
)
plt.clf()
plt.hist(np.clip(matched_bernoulli_reg, 0, 40), 100, ec=(0, 0, 0, 0), lw=0.0)
plt.xlim(0, 40)
plt.title("Matched Bernoulli regression")
plt.savefig(
os.path.join(
inference_output_dir, f"{prefix}matched_bernoulli_reg_histogram.svg"
),
format="svg",
transparent=True,
)
plt.clf()
plt.hist(np.clip(matched_bernoulli_cls, 0, 5), 100, ec=(0, 0, 0, 0), lw=0.0)
plt.xlim(0, 5)
plt.title("Matched Bernoulli Classification")
plt.savefig(
os.path.join(
inference_output_dir, f"{prefix}matched_bernoulli_cls_histogram.svg"
),
format="svg",
transparent=True,
)
plt.clf()
plt.hist(np.clip(unmatched_bernoulli, 0, 10), 100, ec=(0, 0, 0, 0), lw=0.0)
plt.xlim(0, 10)
plt.title("Unmatched Bernoulli")
plt.savefig(
os.path.join(
inference_output_dir, f"{prefix}unmatched_bernoulli_histogram.svg"
),
format="svg",
transparent=True,
)
plt.clf()
plt.hist(np.clip(matched_ppp, 0, 40), 100, ec=(0, 0, 0, 0), lw=0.0)
plt.xlim(0, 40)
plt.title("Matched PPP")
plt.savefig(
os.path.join(inference_output_dir, f"{prefix}matched_ppp_histogram.svg"),
format="svg",
transparent=True,
)
def compute_pmb_nll(
cfg,
inference_output_dir,
cat_mapping_dict,
min_allowed_score=0.0,
print_results=True,
plot_results=True,
print_by_size=True,
load_nll_results=True,
):
results_file = os.path.join(
inference_output_dir, f"nll_results_minallowedscore_{min_allowed_score}.pkl"
)
if load_nll_results and os.path.isfile(results_file):
with open(results_file, "rb") as f:
out = pickle.load(f)
if print_results:
print_nll_results(out)
if plot_results:
plot_nll_results(out, inference_output_dir)
if print_by_size:
(
preprocessed_predicted_instances,
preprocessed_gt_instances,
) = evaluation_utils.get_per_frame_preprocessed_instances(
cfg, inference_output_dir, min_allowed_score
)
gt_boxes = preprocessed_gt_instances["gt_boxes"]
print_nll_results_by_size(out, gt_boxes, inference_output_dir)
return out
with torch.no_grad():
# Load predictions and GT
(
preprocessed_predicted_instances,
preprocessed_gt_instances,
) = evaluation_utils.get_per_frame_preprocessed_instances(
cfg, inference_output_dir, min_allowed_score
)
predicted_box_means = preprocessed_predicted_instances["predicted_boxes"]
predicted_cls_probs = preprocessed_predicted_instances["predicted_cls_probs"]
predicted_box_covariances = preprocessed_predicted_instances[
"predicted_covar_mats"
]
if "ppp_weights" in preprocessed_predicted_instances:
predicted_ppp = preprocessed_predicted_instances["ppp_weights"]
elif "log_ppp_intensity" in preprocessed_predicted_instances:
predicted_ppp = preprocessed_predicted_instances["log_ppp_intensity"]
else:
predicted_ppp = defaultdict(list)
if cfg.PROBABILISTIC_INFERENCE.LOAD_PPP_FROM_MODEL:
model = build_model(cfg)
DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load(
cfg.MODEL.WEIGHTS, resume=True
)
ppp = model.get_ppp_intensity_function()
ppp.set_normalization_of_bboxes(True)
ppp.update_distribution()
predicted_ppp = defaultdict(int)
image_sizes = preprocessed_predicted_instances["image_size"]
gt_box_means = preprocessed_gt_instances["gt_boxes"]
gt_cat_idxs = preprocessed_gt_instances["gt_cat_idxs"]
# Initialize results
out = defaultdict(dict)
print("[NLLOD] Started evaluating NLL for dataset.")
with tqdm.tqdm(total=len(predicted_box_means)) as pbar:
for image_id in predicted_box_means:
ppp_mix = PoissonPointUnion()
pbar.update(1)
image_size = image_sizes[image_id]
################ GT STUFF ###########################
gt_boxes = gt_box_means[image_id]
if len(gt_boxes.shape) < 2:
gt_boxes = gt_boxes.view(-1, 4)
gt_classes = (
torch.as_tensor(
[
cat_mapping_dict[cat_id.item()]
for cat_id in gt_cat_idxs[image_id].long().view(-1, 1)
]
)
.long()
.to(device)
)
################# PREDICTION STUFF ####################
pred_cls_probs = predicted_cls_probs[image_id].clamp(1e-6, 1 - 1e-6)
if cfg.MODEL.META_ARCHITECTURE == "ProbabilisticRetinaNet":
num_classes = pred_cls_probs.shape[-1]
scores_have_bg_cls = False
else:
num_classes = pred_cls_probs.shape[-1] - 1
scores_have_bg_cls = True
pred_box_means = (
predicted_box_means[image_id].unsqueeze(1).repeat(1, num_classes, 1)
)
pred_box_covs = predicted_box_covariances[image_id]
pred_box_covs = pred_box_covs.unsqueeze(1).repeat(1, num_classes, 1, 1)
pred_ppp_weights = predicted_ppp[image_id]
if not cfg.PROBABILISTIC_INFERENCE.TREAT_AS_MB:
if cfg.PROBABILISTIC_INFERENCE.PPP_CONFIDENCE_THRES > 0:
if scores_have_bg_cls:
max_conf = 1 - pred_cls_probs[..., -1]
else:
max_conf = pred_cls_probs[..., :num_classes].max(dim=1)[0]
ppp_preds_idx = (
max_conf <= cfg.PROBABILISTIC_INFERENCE.PPP_CONFIDENCE_THRES
)
if not ppp_preds_idx.any():
ppp_preds = PoissonPointProcessIntensityFunction(
cfg, log_intensity=-np.inf, device=gt_boxes.device
)
else:
mixture_dict = {}
mixture_dict["weights"] = max_conf[ppp_preds_idx]
mixture_dict["means"] = pred_box_means[ppp_preds_idx, 0]
mixture_dict["covs"] = pred_box_covs[ppp_preds_idx, 0]
mixture_dict["cls_probs"] = pred_cls_probs[
ppp_preds_idx, :num_classes
]
mixture_dict[
"reg_dist_type"
] = (
cfg.MODEL.PROBABILISTIC_MODELING.BBOX_COV_LOSS.DISTRIBUTION_TYPE
)
if (
cfg.MODEL.PROBABILISTIC_MODELING.BBOX_COV_LOSS.DISTRIBUTION_TYPE
== "gaussian"
):
mixture_dict[
"reg_dist"
] = distributions.multivariate_normal.MultivariateNormal
mixture_dict["reg_kwargs"] = {
"covariance_matrix": mixture_dict["covs"]
}
elif (
cfg.MODEL.PROBABILISTIC_MODELING.BBOX_COV_LOSS.DISTRIBUTION_TYPE
== "laplacian"
):
mixture_dict["reg_dist"] = distributions.laplace.Laplace
mixture_dict["reg_kwargs"] = {
"scale": torch.sqrt(
mixture_dict["covs"].diagonal(dim1=-2, dim2=-1)
/ 2
)
}
ppp_preds = PoissonPointProcessIntensityFunction(
cfg, predictions=mixture_dict
)
pred_box_means = pred_box_means[ppp_preds_idx.logical_not()]
pred_box_covs = pred_box_covs[ppp_preds_idx.logical_not()]
pred_cls_probs = pred_cls_probs[ppp_preds_idx.logical_not()]
ppp_mix.add_ppp(ppp_preds)
if cfg.PROBABILISTIC_INFERENCE.LOAD_PPP_FROM_MODEL:
ppp = ppp
elif isinstance(pred_ppp_weights, dict):
ppp = PoissonPointProcessIntensityFunction(
cfg, device=gt_boxes.device
)
ppp.load_weights(pred_ppp_weights)
elif isinstance(pred_ppp_weights, torch.Tensor):
ppp = PoissonPointProcessIntensityFunction(
cfg, log_intensity=pred_ppp_weights, device=gt_boxes.device
)
else:
print(
"[NLLOD] PPP intensity function not found in annotations, using config"
)
pred_ppp_weights = -np.inf
ppp = PoissonPointProcessIntensityFunction(
cfg, log_intensity=pred_ppp_weights, device=gt_boxes.device
)
else:
pred_ppp_weights = -np.inf
ppp = PoissonPointProcessIntensityFunction(
cfg, log_intensity=pred_ppp_weights
)
ppp_mix.add_ppp(ppp)
if (
cfg.MODEL.PROBABILISTIC_MODELING.BBOX_COV_LOSS.DISTRIBUTION_TYPE
== "gaussian"
):
reg_distribution = lambda x, y: distributions.multivariate_normal.MultivariateNormal(
x, y
)
elif (
cfg.MODEL.PROBABILISTIC_MODELING.BBOX_COV_LOSS.DISTRIBUTION_TYPE
== "laplacian"
):
reg_distribution = lambda x, y: distributions.laplace.Laplace(
loc=x, scale=torch.sqrt(y.diagonal(dim1=-2, dim2=-1) / 2)
)
else:
raise Exception(
f"Bounding box uncertainty distribution {cfg.MODEL.PROBABILISTIC_MODELING.BBOX_COV_LOSS.DISTRIBUTION_TYPE} is not available."
)
try:
nll, associations, decompositions = negative_log_likelihood(
pred_box_scores=[pred_cls_probs],
pred_box_regs=[pred_box_means],
pred_box_covars=[pred_box_covs],
gt_boxes=[gt_boxes],
gt_classes=[gt_classes],
image_sizes=[image_size],
reg_distribution=reg_distribution,
intensity_func=ppp_mix,
max_n_solutions=cfg.MODEL.PROBABILISTIC_MODELING.NLL_MAX_NUM_SOLUTIONS,
training=False,
scores_have_bg_cls=scores_have_bg_cls,
)
out[image_id] = {
"nll": nll.item(),
"associations": associations[0].tolist(),
"decomposition": decompositions[0],
}
except Exception as e:
print(
f"Image {image_id} raised error. Will not be used to calculate NLL."
)
print(e)
with open(
os.path.join(
inference_output_dir,
f"nll_results_minallowedscore_{min_allowed_score}.pkl",
),
"wb",
) as f:
pickle.dump(out, f)
if print_results:
print_nll_results(out)
if plot_results:
plot_nll_results(out, inference_output_dir)
if print_by_size:
gt_boxes = preprocessed_gt_instances["gt_boxes"]
print_nll_results_by_size(out, gt_boxes, inference_output_dir)
return out
def main(
args,
cfg=None,
iou_min=None,
iou_correct=None,
min_allowed_score=None,
print_results=True,
inference_output_dir="",
image_ids=[],
):
# Setup config
if cfg is None:
cfg = setup_config(args, random_seed=args.random_seed, is_testing=True)
cfg.defrost()
cfg.ACTUAL_TEST_DATASET = args.test_dataset
# Setup torch device and num_threads
torch.set_num_threads(cfg.DATALOADER.NUM_WORKERS)
# Build path to gt instances and inference output
if inference_output_dir == "":
inference_output_dir = get_inference_output_dir(
cfg["OUTPUT_DIR"],
args.test_dataset,
args.inference_config,
args.image_corruption_level,
)
# Get thresholds to perform evaluation on
if iou_min is None:
iou_min = args.iou_min
if iou_correct is None:
iou_correct = args.iou_correct
if min_allowed_score is None or min_allowed_score < 0:
# Check if F-1 Score has been previously computed ON THE ORIGINAL
# DATASET such as COCO even when evaluating on OpenImages.
try:
with open(os.path.join(inference_output_dir, "mAP_res.txt"), "r") as f:
min_allowed_score = f.read().strip("][\n").split(", ")[-1]
min_allowed_score = round(float(min_allowed_score), 4)
except FileNotFoundError:
# If not, process all detections. Not recommended as the results might be influenced by very low scoring
# detections that would normally be removed in robotics/vision
# applications.
min_allowed_score = 0.0
# Get category mapping dictionary:
train_thing_dataset_id_to_contiguous_id = MetadataCatalog.get(
cfg.DATASETS.TRAIN[0]
).thing_dataset_id_to_contiguous_id
test_thing_dataset_id_to_contiguous_id = MetadataCatalog.get(
args.test_dataset
).thing_dataset_id_to_contiguous_id
cat_mapping_dict = get_test_thing_dataset_id_to_train_contiguous_id_dict(
cfg,
args,
train_thing_dataset_id_to_contiguous_id,
test_thing_dataset_id_to_contiguous_id,
)
# Compute NLL results
load_nll_results = len(image_ids) == 0
nll_results = compute_pmb_nll(
cfg, inference_output_dir, cat_mapping_dict, min_allowed_score, print_results, load_nll_results=load_nll_results
)
# Get matched results by either generating them or loading from file.
with torch.no_grad():
matched_results = evaluation_utils.get_matched_results(
cfg,
inference_output_dir,
iou_min=iou_min,
iou_correct=iou_correct,
min_allowed_score=min_allowed_score,
)
# Build preliminary dicts required for computing classification scores.
for matched_results_key in matched_results.keys():
if "gt_cat_idxs" in matched_results[matched_results_key].keys():
# First we convert the written things indices to contiguous
# indices.
gt_converted_cat_idxs = matched_results[matched_results_key][
"gt_cat_idxs"
]
gt_converted_cat_idxs = try_squeeze(gt_converted_cat_idxs, 1)
gt_converted_cat_idxs = torch.as_tensor(
[
cat_mapping_dict[class_idx.cpu().tolist()]
for class_idx in gt_converted_cat_idxs
]
).to(device)
matched_results[matched_results_key][
"gt_converted_cat_idxs"
] = gt_converted_cat_idxs.to(device)
if "predicted_cls_probs" in matched_results[matched_results_key].keys():
predicted_cls_probs = matched_results[matched_results_key][
"predicted_cls_probs"
]
# This is required for evaluation of retinanet based
# detections.
matched_results[matched_results_key][
"predicted_score_of_gt_category"
] = torch.gather(
predicted_cls_probs, 1, gt_converted_cat_idxs.unsqueeze(1)
).squeeze(
1
)
matched_results[matched_results_key][
"gt_cat_idxs"
] = gt_converted_cat_idxs
else:
if cfg.MODEL.META_ARCHITECTURE == "ProbabilisticRetinaNet":
# For false positives, the correct category is background. For retinanet, since no explicit
# background category is available, this value is computed as 1.0 - score of the predicted
# category.
predicted_class_probs, predicted_class_idx = matched_results[
matched_results_key
]["predicted_cls_probs"].max(1)
matched_results[matched_results_key][
"predicted_score_of_gt_category"
] = (1.0 - predicted_class_probs)
matched_results[matched_results_key][
"predicted_cat_idxs"
] = predicted_class_idx
else:
# For RCNN/DETR based networks, a background category is
# explicitly available.
matched_results[matched_results_key][
"predicted_score_of_gt_category"
] = matched_results[matched_results_key]["predicted_cls_probs"][
:, -1
]
_, predicted_class_idx = matched_results[matched_results_key][
"predicted_cls_probs"
][:, :-1].max(1)
matched_results[matched_results_key][
"predicted_cat_idxs"
] = predicted_class_idx
# Load the different detection partitions
true_positives = matched_results["true_positives"]
duplicates = matched_results["duplicates"]
localization_errors = matched_results["localization_errors"]
false_negatives = matched_results["false_negatives"]
false_positives = matched_results["false_positives"]
# Get the number of elements in each partition
num_true_positives = true_positives["predicted_box_means"].shape[0]
num_duplicates = duplicates["predicted_box_means"].shape[0]
num_localization_errors = localization_errors["predicted_box_means"].shape[0]
num_false_negatives = false_negatives["gt_box_means"].shape[0]
num_false_positives = false_positives["predicted_box_means"].shape[0]
per_class_output_list = []
for class_idx in cat_mapping_dict.values():
true_positives_valid_idxs = (
true_positives["gt_converted_cat_idxs"] == class_idx
)
localization_errors_valid_idxs = (
localization_errors["gt_converted_cat_idxs"] == class_idx
)
duplicates_valid_idxs = duplicates["gt_converted_cat_idxs"] == class_idx
false_positives_valid_idxs = (
false_positives["predicted_cat_idxs"] == class_idx
)
if cfg.MODEL.META_ARCHITECTURE == "ProbabilisticRetinaNet":
# Compute classification metrics for every partition
true_positives_cls_analysis = scoring_rules.sigmoid_compute_cls_scores(
true_positives, true_positives_valid_idxs
)
localization_errors_cls_analysis = (
scoring_rules.sigmoid_compute_cls_scores(
localization_errors, localization_errors_valid_idxs
)
)
duplicates_cls_analysis = scoring_rules.sigmoid_compute_cls_scores(
duplicates, duplicates_valid_idxs
)
false_positives_cls_analysis = scoring_rules.sigmoid_compute_cls_scores(
false_positives, false_positives_valid_idxs
)
else:
# Compute classification metrics for every partition
true_positives_cls_analysis = scoring_rules.softmax_compute_cls_scores(
true_positives, true_positives_valid_idxs
)
localization_errors_cls_analysis = (
scoring_rules.softmax_compute_cls_scores(
localization_errors, localization_errors_valid_idxs
)
)
duplicates_cls_analysis = scoring_rules.softmax_compute_cls_scores(
duplicates, duplicates_valid_idxs
)
false_positives_cls_analysis = scoring_rules.softmax_compute_cls_scores(
false_positives, false_positives_valid_idxs
)
# Compute regression metrics for every partition
true_positives_reg_analysis = scoring_rules.compute_reg_scores(
true_positives, true_positives_valid_idxs
)
localization_errors_reg_analysis = scoring_rules.compute_reg_scores(
localization_errors, localization_errors_valid_idxs
)
duplicates_reg_analysis = scoring_rules.compute_reg_scores(
duplicates, duplicates_valid_idxs
)
false_positives_reg_analysis = scoring_rules.compute_reg_scores_fn(
false_positives, false_positives_valid_idxs
)
per_class_output_list.append(
{
"true_positives_cls_analysis": true_positives_cls_analysis,
"true_positives_reg_analysis": true_positives_reg_analysis,
"localization_errors_cls_analysis": localization_errors_cls_analysis,
"localization_errors_reg_analysis": localization_errors_reg_analysis,
"duplicates_cls_analysis": duplicates_cls_analysis,
"duplicates_reg_analysis": duplicates_reg_analysis,
"false_positives_cls_analysis": false_positives_cls_analysis,
"false_positives_reg_analysis": false_positives_reg_analysis,
}
)
final_accumulated_output_dict = dict()
final_average_output_dict = dict()
for key in per_class_output_list[0].keys():
average_output_dict = dict()
for inner_key in per_class_output_list[0][key].keys():
collected_values = [
per_class_output[key][inner_key]
if per_class_output[key][inner_key] is not None
else np.NaN
for per_class_output in per_class_output_list
]
collected_values = np.array(collected_values)
if key in average_output_dict.keys():
# Use nan mean since some classes do not have duplicates for
# instance or has one duplicate for instance. torch.std returns nan in that case
# so we handle those here. This should not have any effect on the final results, as
# it only affects inter-class variance which we do not
# report anyways.
average_output_dict[key].update(
{
inner_key: np.nanmean(collected_values),
inner_key + "_std": np.nanstd(collected_values, ddof=1),
}
)
final_accumulated_output_dict[key].update(
{inner_key: collected_values}
)
else:
average_output_dict.update(
{
key: {
inner_key: np.nanmean(collected_values),
inner_key + "_std": np.nanstd(collected_values, ddof=1),
}
}
)
final_accumulated_output_dict.update(
{key: {inner_key: collected_values}}
)
final_average_output_dict.update(average_output_dict)
final_accumulated_output_dict.update(
{
"num_instances": {
"num_true_positives": num_true_positives,
"num_duplicates": num_duplicates,
"num_localization_errors": num_localization_errors,
"num_false_positives": num_false_positives,
"num_false_negatives": num_false_negatives,
}
}
)
if print_results:
# Summarize and print all
table = PrettyTable()
table.field_names = [
"Output Type",
"Number of Instances",
"Cls Negative Log Likelihood",
"Cls Brier Score",
"Reg TP Negative Log Likelihood / FP Entropy",
"Reg Energy Score",
]
table.add_row(
[
"True Positives:",
num_true_positives,
"{:.4f} ± {:.4f}".format(
final_average_output_dict["true_positives_cls_analysis"][
"ignorance_score_mean"
],
final_average_output_dict["true_positives_cls_analysis"][
"ignorance_score_mean_std"
],
),
"{:.4f} ± {:.4f}".format(
final_average_output_dict["true_positives_cls_analysis"][
"brier_score_mean"
],
final_average_output_dict["true_positives_cls_analysis"][
"brier_score_mean_std"
],
),
"{:.4f} ± {:.4f}".format(
final_average_output_dict["true_positives_reg_analysis"][
"ignorance_score_mean"
],
final_average_output_dict["true_positives_reg_analysis"][
"ignorance_score_mean_std"
],
),
"{:.4f} ± {:.4f}".format(
final_average_output_dict["true_positives_reg_analysis"][
"energy_score_mean"
],
final_average_output_dict["true_positives_reg_analysis"][
"energy_score_mean_std"
],
),
]
)
table.add_row(
[
"Duplicates:",
num_duplicates,
"{:.4f} ± {:.4f}".format(
final_average_output_dict["duplicates_cls_analysis"][
"ignorance_score_mean"
],
final_average_output_dict["duplicates_cls_analysis"][
"ignorance_score_mean_std"
],
),
"{:.4f} ± {:.4f}".format(
final_average_output_dict["duplicates_cls_analysis"][
"brier_score_mean"
],
final_average_output_dict["duplicates_cls_analysis"][
"brier_score_mean_std"
],
),
"{:.4f} ± {:.4f}".format(
final_average_output_dict["duplicates_reg_analysis"][
"ignorance_score_mean"
],
final_average_output_dict["duplicates_reg_analysis"][
"ignorance_score_mean_std"
],
),
"{:.4f} ± {:.4f}".format(
final_average_output_dict["duplicates_reg_analysis"][
"energy_score_mean"
],
final_average_output_dict["duplicates_reg_analysis"][
"energy_score_mean_std"
],
),
]
)
table.add_row(
[
"Localization Errors:",
num_localization_errors,
"{:.4f} ± {:.4f}".format(
final_average_output_dict["localization_errors_cls_analysis"][
"ignorance_score_mean"
],
final_average_output_dict["localization_errors_cls_analysis"][
"ignorance_score_mean_std"
],
),
"{:.4f} ± {:.4f}".format(
final_average_output_dict["localization_errors_cls_analysis"][
"brier_score_mean"
],
final_average_output_dict["localization_errors_cls_analysis"][
"brier_score_mean_std"
],
),
"{:.4f} ± {:.4f}".format(
final_average_output_dict["localization_errors_reg_analysis"][
"ignorance_score_mean"
],
final_average_output_dict["localization_errors_reg_analysis"][
"ignorance_score_mean_std"
],
),
"{:.4f} ± {:.4f}".format(
final_average_output_dict["localization_errors_reg_analysis"][
"energy_score_mean"
],
final_average_output_dict["localization_errors_reg_analysis"][
"energy_score_mean_std"
],
),
]
)
table.add_row(
[
"False Positives:",
num_false_positives,
"{:.4f} ± {:.4f}".format(
final_average_output_dict["false_positives_cls_analysis"][
"ignorance_score_mean"
],
final_average_output_dict["false_positives_cls_analysis"][
"ignorance_score_mean_std"
],
),
"{:.4f} ± {:.4f}".format(
final_average_output_dict["false_positives_cls_analysis"][
"brier_score_mean"
],
final_average_output_dict["false_positives_cls_analysis"][
"brier_score_mean_std"
],
),
"{:.4f} ± {:.4f}".format(
final_average_output_dict["false_positives_reg_analysis"][
"total_entropy_mean"
],
final_average_output_dict["false_positives_reg_analysis"][
"total_entropy_mean_std"
],
),
"-",
]
)
table.add_row(["False Negatives:", num_false_negatives, "-", "-", "-", "-"])
print(table)
text_file_name = os.path.join(
inference_output_dir,
"probabilistic_scoring_res_{}_{}_{}.txt".format(
iou_min, iou_correct, min_allowed_score
),
)
with open(text_file_name, "w") as text_file:
print(table, file=text_file)
dictionary_file_name = os.path.join(
inference_output_dir,
"probabilistic_scoring_res_{}_{}_{}.pkl".format(
iou_min, iou_correct, min_allowed_score
),
)
with open(dictionary_file_name, "wb") as pickle_file:
pickle.dump(final_accumulated_output_dict, pickle_file)
if __name__ == "__main__":
# Create arg parser
arg_parser = setup_arg_parser()
args = arg_parser.parse_args()
print("Command Line Args:", args)
launch(
main,
args.num_gpus,
num_machines=args.num_machines,
machine_rank=args.machine_rank,
dist_url=args.dist_url,
args=(args,),
)
| 44,542 | 40.785178 | 149 | py |
pmb-nll | pmb-nll-main/src/offline_evaluation/compute_ood_probabilistic_metrics.py | import itertools
import os
import torch
import ujson as json
import pickle
from prettytable import PrettyTable
# Detectron imports
from detectron2.engine import launch
# Project imports
from core.evaluation_tools import scoring_rules
from core.evaluation_tools.evaluation_utils import eval_predictions_preprocess
from core.setup import setup_config, setup_arg_parser
from probabilistic_inference.inference_utils import get_inference_output_dir
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def main(
args,
cfg=None,
min_allowed_score=None):
# Setup config
if cfg is None:
cfg = setup_config(args, random_seed=args.random_seed, is_testing=True)
cfg.defrost()
cfg.ACTUAL_TEST_DATASET = args.test_dataset
# Setup torch device and num_threads
torch.set_num_threads(cfg.DATALOADER.NUM_WORKERS)
# Build path to gt instances and inference output
inference_output_dir = get_inference_output_dir(
cfg['OUTPUT_DIR'],
args.test_dataset,
args.inference_config,
args.image_corruption_level)
if min_allowed_score is None:
# Check if F-1 Score has been previously computed ON THE ORIGINAL
# DATASET, and not on VOC.
try:
train_set_inference_output_dir = get_inference_output_dir(
cfg['OUTPUT_DIR'],
cfg.DATASETS.TEST[0],
args.inference_config,
0)
with open(os.path.join(train_set_inference_output_dir, "mAP_res.txt"), "r") as f:
min_allowed_score = f.read().strip('][\n').split(', ')[-1]
min_allowed_score = round(float(min_allowed_score), 4)
except FileNotFoundError:
# If not, process all detections. Not recommended as the results might be influenced by very low scoring
# detections that would normally be removed in robotics/vision
# applications.
min_allowed_score = 0.0
# Get matched results by either generating them or loading from file.
with torch.no_grad():
try:
preprocessed_predicted_instances = torch.load(
os.path.join(
inference_output_dir,
"preprocessed_predicted_instances_odd_{}.pth".format(min_allowed_score)),
map_location=device)
# Process predictions
except FileNotFoundError:
prediction_file_name = os.path.join(
inference_output_dir,
'coco_instances_results.json')
predicted_instances = json.load(open(prediction_file_name, 'r'))
preprocessed_predicted_instances = eval_predictions_preprocess(
predicted_instances, min_allowed_score=min_allowed_score, is_odd=True)
torch.save(
preprocessed_predicted_instances,
os.path.join(
inference_output_dir,
"preprocessed_predicted_instances_odd_{}.pth".format(min_allowed_score)))
predicted_boxes = preprocessed_predicted_instances['predicted_boxes']
predicted_cov_mats = preprocessed_predicted_instances['predicted_covar_mats']
predicted_cls_probs = preprocessed_predicted_instances['predicted_cls_probs']
predicted_boxes = list(itertools.chain.from_iterable(
[predicted_boxes[key] for key in predicted_boxes.keys()]))
predicted_cov_mats = list(itertools.chain.from_iterable(
[predicted_cov_mats[key] for key in predicted_cov_mats.keys()]))
predicted_cls_probs = list(itertools.chain.from_iterable(
[predicted_cls_probs[key] for key in predicted_cls_probs.keys()]))
num_false_positives = len(predicted_boxes)
valid_idxs = torch.as_tensor(
[i for i in range(num_false_positives)]).to(device)
predicted_boxes = torch.stack(predicted_boxes, 1).transpose(0, 1)
predicted_cov_mats = torch.stack(predicted_cov_mats, 1).transpose(0, 1)
predicted_cls_probs = torch.stack(
predicted_cls_probs,
1).transpose(
0,
1)
false_positives_dict = {
'predicted_box_means': predicted_boxes,
'predicted_box_covariances': predicted_cov_mats,
'predicted_cls_probs': predicted_cls_probs}
false_positives_reg_analysis = scoring_rules.compute_reg_scores_fn(
false_positives_dict, valid_idxs)
if cfg.MODEL.META_ARCHITECTURE == 'ProbabilisticRetinaNet':
predicted_class_probs, predicted_class_idx = predicted_cls_probs.max(
1)
false_positives_dict['predicted_score_of_gt_category'] = 1.0 - \
predicted_class_probs
false_positives_cls_analysis = scoring_rules.sigmoid_compute_cls_scores(
false_positives_dict, valid_idxs)
else:
false_positives_dict['predicted_score_of_gt_category'] = predicted_cls_probs[:, -1]
_, predicted_class_idx = predicted_cls_probs[:, :-1].max(
1)
false_positives_cls_analysis = scoring_rules.softmax_compute_cls_scores(
false_positives_dict, valid_idxs)
# Summarize and print all
table = PrettyTable()
table.field_names = (['Output Type',
'Number of Instances',
'Cls Ignorance Score',
'Cls Brier/Probability Score',
'Reg Ignorance Score',
'Reg Energy Score'])
table.add_row(
[
"False Positives:",
num_false_positives,
'{:.4f}'.format(
false_positives_cls_analysis['ignorance_score_mean'],),
'{:.4f}'.format(
false_positives_cls_analysis['brier_score_mean']),
'{:.4f}'.format(
false_positives_reg_analysis['total_entropy_mean']),
'{:.4f}'.format(
false_positives_reg_analysis['fp_energy_score_mean'])])
print(table)
text_file_name = os.path.join(
inference_output_dir,
'probabilistic_scoring_res_odd_{}.txt'.format(min_allowed_score))
with open(text_file_name, "w") as text_file:
print(table, file=text_file)
dictionary_file_name = os.path.join(
inference_output_dir,
'probabilistic_scoring_res_odd_{}.pkl'.format(min_allowed_score))
false_positives_reg_analysis.update(false_positives_cls_analysis)
with open(dictionary_file_name, "wb") as pickle_file:
pickle.dump(false_positives_reg_analysis, pickle_file)
if __name__ == "__main__":
# Create arg parser
arg_parser = setup_arg_parser()
args = arg_parser.parse_args()
print("Command Line Args:", args)
launch(
main,
args.num_gpus,
num_machines=args.num_machines,
machine_rank=args.machine_rank,
dist_url=args.dist_url,
args=(args,),
)
| 7,146 | 38.486188 | 116 | py |
pmb-nll | pmb-nll-main/src/offline_evaluation/compute_calibration_errors.py | import calibration as cal
import os
import pickle
import torch
from prettytable import PrettyTable
# Detectron imports
from detectron2.data import MetadataCatalog
from detectron2.engine import launch
# Project imports
from core.evaluation_tools import evaluation_utils
from core.evaluation_tools.evaluation_utils import get_test_thing_dataset_id_to_train_contiguous_id_dict
from core.setup import setup_config, setup_arg_parser
from probabilistic_inference.inference_utils import get_inference_output_dir
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def main(
args,
cfg=None,
iou_min=None,
iou_correct=None,
min_allowed_score=None,
print_results=True,
inference_output_dir=""):
# Setup config
if cfg is None:
cfg = setup_config(args, random_seed=args.random_seed, is_testing=True)
cfg.defrost()
cfg.ACTUAL_TEST_DATASET = args.test_dataset
# Setup torch device and num_threads
torch.set_num_threads(cfg.DATALOADER.NUM_WORKERS)
# Build path to gt instances and inference output
if inference_output_dir == "":
inference_output_dir = get_inference_output_dir(
cfg['OUTPUT_DIR'],
args.test_dataset,
args.inference_config,
args.image_corruption_level)
# Get thresholds to perform evaluation on
if iou_min is None:
iou_min = args.iou_min
if iou_correct is None:
iou_correct = args.iou_correct
if min_allowed_score is None:
# Check if F-1 Score has been previously computed ON THE ORIGINAL
# DATASET such as COCO even when evaluating on OpenImages.
try:
train_set_inference_output_dir = get_inference_output_dir(
cfg['OUTPUT_DIR'],
cfg.DATASETS.TEST[0],
args.inference_config,
0)
with open(os.path.join(train_set_inference_output_dir, "mAP_res.txt"), "r") as f:
min_allowed_score = f.read().strip('][\n').split(', ')[-1]
min_allowed_score = round(float(min_allowed_score), 4)
except FileNotFoundError:
# If not, process all detections. Not recommended as the results might be influenced by very low scoring
# detections that would normally be removed in robotics/vision
# applications.
min_allowed_score = 0.0
# Get category mapping dictionary:
train_thing_dataset_id_to_contiguous_id = MetadataCatalog.get(
cfg.DATASETS.TRAIN[0]).thing_dataset_id_to_contiguous_id
test_thing_dataset_id_to_contiguous_id = MetadataCatalog.get(
args.test_dataset).thing_dataset_id_to_contiguous_id
cat_mapping_dict = get_test_thing_dataset_id_to_train_contiguous_id_dict(
cfg,
args,
train_thing_dataset_id_to_contiguous_id,
test_thing_dataset_id_to_contiguous_id)
# Get matched results by either generating them or loading from file.
with torch.no_grad():
matched_results = evaluation_utils.get_matched_results(
cfg, inference_output_dir,
iou_min=iou_min,
iou_correct=iou_correct,
min_allowed_score=min_allowed_score)
# Build preliminary dicts required for computing classification scores.
for matched_results_key in matched_results.keys():
if 'gt_cat_idxs' in matched_results[matched_results_key].keys():
# First we convert the written things indices to contiguous
# indices.
gt_converted_cat_idxs = matched_results[matched_results_key]['gt_cat_idxs'].squeeze(
1)
gt_converted_cat_idxs = torch.as_tensor([cat_mapping_dict[class_idx.cpu(
).tolist()] for class_idx in gt_converted_cat_idxs]).to(device)
matched_results[matched_results_key]['gt_converted_cat_idxs'] = gt_converted_cat_idxs.to(
device)
matched_results[matched_results_key]['gt_cat_idxs'] = gt_converted_cat_idxs
if 'predicted_cls_probs' in matched_results[matched_results_key].keys(
):
if cfg.MODEL.META_ARCHITECTURE == 'ProbabilisticRetinaNet':
# For false positives, the correct category is background. For retinanet, since no explicit
# background category is available, this value is computed as 1.0 - score of the predicted
# category.
predicted_class_probs, predicted_cat_idxs = matched_results[matched_results_key][
'predicted_cls_probs'].max(
1)
matched_results[matched_results_key]['output_logits'] = predicted_class_probs
else:
predicted_class_probs, predicted_cat_idxs = matched_results[
matched_results_key]['predicted_cls_probs'][:, :-1].max(1)
matched_results[matched_results_key]['predicted_cat_idxs'] = predicted_cat_idxs
# Load the different detection partitions
true_positives = matched_results['true_positives']
duplicates = matched_results['duplicates']
localization_errors = matched_results['localization_errors']
false_positives = matched_results['false_positives']
reg_maximum_calibration_error_list = []
reg_expected_calibration_error_list = []
if cfg.MODEL.META_ARCHITECTURE == 'ProbabilisticRetinaNet':
all_predicted_scores = torch.cat(
(true_positives['predicted_cls_probs'].flatten(),
duplicates['predicted_cls_probs'].flatten(),
localization_errors['predicted_cls_probs'].flatten(),
false_positives['predicted_cls_probs'].flatten()),
0)
all_gt_scores = torch.cat(
(torch.nn.functional.one_hot(
true_positives['gt_cat_idxs'],
true_positives['predicted_cls_probs'].shape[1]).flatten().to(device),
torch.nn.functional.one_hot(
duplicates['gt_cat_idxs'],
duplicates['predicted_cls_probs'].shape[1]).flatten().to(device),
torch.zeros_like(
localization_errors['predicted_cls_probs'].type(
torch.LongTensor).flatten()).to(device),
torch.zeros_like(
false_positives['predicted_cls_probs'].type(
torch.LongTensor).flatten()).to(device)),
0)
else:
# For RCNN based networks, a background category is
# explicitly available.
all_predicted_scores = torch.cat(
(true_positives['predicted_cls_probs'],
duplicates['predicted_cls_probs'],
localization_errors['predicted_cls_probs'],
false_positives['predicted_cls_probs']),
0)
all_gt_scores = torch.cat(
(true_positives['gt_cat_idxs'],
duplicates['gt_cat_idxs'],
torch.ones_like(
localization_errors['predicted_cls_probs'][:, 0]).fill_(80.0).type(
torch.LongTensor).to(device),
torch.ones_like(
false_positives['predicted_cls_probs'][:, 0]).fill_(80.0).type(
torch.LongTensor).to(device)), 0)
# Compute classification calibration error using calibration
# library
cls_marginal_calibration_error = cal.get_calibration_error(
all_predicted_scores.cpu().numpy(), all_gt_scores.cpu().numpy())
for class_idx in cat_mapping_dict.values():
true_positives_valid_idxs = true_positives['gt_converted_cat_idxs'] == class_idx
localization_errors_valid_idxs = localization_errors['gt_converted_cat_idxs'] == class_idx
duplicates_valid_idxs = duplicates['gt_converted_cat_idxs'] == class_idx
# Compute regression calibration errors. False negatives cant be evaluated since
# those do not have ground truth.
all_predicted_means = torch.cat(
(true_positives['predicted_box_means'][true_positives_valid_idxs],
duplicates['predicted_box_means'][duplicates_valid_idxs],
localization_errors['predicted_box_means'][localization_errors_valid_idxs]),
0)
all_predicted_covariances = torch.cat(
(true_positives['predicted_box_covariances'][true_positives_valid_idxs],
duplicates['predicted_box_covariances'][duplicates_valid_idxs],
localization_errors['predicted_box_covariances'][localization_errors_valid_idxs]),
0)
all_predicted_gt = torch.cat(
(true_positives['gt_box_means'][true_positives_valid_idxs],
duplicates['gt_box_means'][duplicates_valid_idxs],
localization_errors['gt_box_means'][localization_errors_valid_idxs]),
0)
all_predicted_covariances = torch.diagonal(
all_predicted_covariances, dim1=1, dim2=2)
# The assumption of uncorrelated components is not accurate, especially when estimating full
# covariance matrices. However, using scipy to compute multivariate cdfs is very very
# time consuming for such large amounts of data.
reg_maximum_calibration_error = []
reg_expected_calibration_error = []
# Regression calibration is computed for every box dimension
# separately, and averaged after.
for box_dim in range(all_predicted_gt.shape[1]):
all_predicted_means_current_dim = all_predicted_means[:, box_dim]
all_predicted_gt_current_dim = all_predicted_gt[:, box_dim]
all_predicted_covariances_current_dim = all_predicted_covariances[:, box_dim]
normal_dists = torch.distributions.Normal(
all_predicted_means_current_dim,
scale=torch.sqrt(all_predicted_covariances_current_dim))
all_predicted_scores = normal_dists.cdf(
all_predicted_gt_current_dim)
reg_calibration_error = []
histogram_bin_step_size = 1 / 15.0
for i in torch.arange(
0.0,
1.0 - histogram_bin_step_size,
histogram_bin_step_size):
# Get number of elements in bin
elements_in_bin = (
all_predicted_scores < (i + histogram_bin_step_size))
num_elems_in_bin_i = elements_in_bin.type(
torch.FloatTensor).to(device).sum()
# Compute calibration error from "Accurate uncertainties for deep
# learning using calibrated regression" paper.
reg_calibration_error.append(
(num_elems_in_bin_i / all_predicted_scores.shape[0] - (i + histogram_bin_step_size)) ** 2)
calibration_error = torch.stack(
reg_calibration_error).to(device)
reg_maximum_calibration_error.append(calibration_error.max())
reg_expected_calibration_error.append(calibration_error.mean())
reg_maximum_calibration_error_list.append(
reg_maximum_calibration_error)
reg_expected_calibration_error_list.append(
reg_expected_calibration_error)
# Summarize and print all
reg_expected_calibration_error = torch.stack([torch.stack(
reg, 0) for reg in reg_expected_calibration_error_list], 0)
reg_expected_calibration_error = reg_expected_calibration_error[
~torch.isnan(reg_expected_calibration_error)].mean()
reg_maximum_calibration_error = torch.stack([torch.stack(
reg, 0) for reg in reg_maximum_calibration_error_list], 0)
reg_maximum_calibration_error = reg_maximum_calibration_error[
~torch.isnan(reg_maximum_calibration_error)].mean()
if print_results:
table = PrettyTable()
table.field_names = (['Cls Marginal Calibration Error',
'Reg Expected Calibration Error',
'Reg Maximum Calibration Error'])
table.add_row([cls_marginal_calibration_error,
reg_expected_calibration_error.cpu().numpy().tolist(),
reg_maximum_calibration_error.cpu().numpy().tolist()])
print(table)
text_file_name = os.path.join(
inference_output_dir,
'calibration_errors_{}_{}_{}.txt'.format(
iou_min, iou_correct, min_allowed_score))
with open(text_file_name, "w") as text_file:
print([
cls_marginal_calibration_error,
reg_expected_calibration_error.cpu().numpy().tolist(),
reg_maximum_calibration_error.cpu().numpy().tolist()], file=text_file)
dictionary_file_name = os.path.join(
inference_output_dir, 'calibration_errors_res_{}_{}_{}.pkl'.format(
iou_min, iou_correct, min_allowed_score))
final_accumulated_output_dict = {
'cls_marginal_calibration_error': cls_marginal_calibration_error,
'reg_expected_calibration_error': reg_expected_calibration_error.cpu().numpy(),
'reg_maximum_calibration_error': reg_maximum_calibration_error.cpu().numpy()}
with open(dictionary_file_name, "wb") as pickle_file:
pickle.dump(final_accumulated_output_dict, pickle_file)
if __name__ == "__main__":
# Create arg parser
arg_parser = setup_arg_parser()
args = arg_parser.parse_args()
print("Command Line Args:", args)
launch(
main,
args.num_gpus,
num_machines=args.num_machines,
machine_rank=args.machine_rank,
dist_url=args.dist_url,
args=(args,),
)
| 14,295 | 45.718954 | 116 | py |
FDS | FDS-main/main.py | """
This is the base code to learn the learning rate, momentum and weight decay
non-greedily with forward mode differentiation, over long horizons (e.g. CIFAR10)
"""
import os
import time
import shutil
import torch
import torch.optim as optim
import pickle
from utils.logger import *
from utils.helpers import *
from utils.datasets import *
from models.selector import *
class MetaLearner(object):
def __init__(self, args):
self.args = args
## Optimization
self.hypers_init()
self.cross_entropy = nn.CrossEntropyLoss()
## Experiment Set Up
self.best_outer_step = 0
self.best_validation_acc = 0
ns, learnables = (self.args.n_lrs, self.args.n_moms, self.args.n_wds), (self.args.learn_lr, self.args.learn_mom, self.args.learn_wd)
self.all_lr_schedules, self.all_mom_schedules, self.all_wd_schedules = [torch.zeros((self.args.n_outer_steps+1, n)) for n in ns] #+1 since save init schedules and last schedule
self.all_lr_raw_grads, self.all_mom_raw_grads, self.all_wd_raw_grads = [torch.zeros((self.args.n_outer_steps, n)) if l else None for (n,l) in zip(ns, learnables)]
self.all_lr_smooth_grads, self.all_mom_smooth_grads, self.all_wd_smooth_grads = [torch.zeros((self.args.n_outer_steps, n)) if l else None for (n,l) in zip(ns, learnables)]
self.experiment_path = os.path.join(self.args.log_directory_path, self.args.experiment_name)
self.checkpoint_path = os.path.join(self.experiment_path, 'checkpoint.pth.tar')
if os.path.exists(self.experiment_path):
if self.args.use_gpu and os.path.isfile(self.checkpoint_path):
raise NotImplementedError(f"Experiment folder {self.experiment_path} already exists") #TODO: restore code from ckpt
else:
shutil.rmtree(self.experiment_path) # clear debug logs on cpu
os.makedirs(self.experiment_path)
else:
os.makedirs(self.experiment_path)
copy_file(os.path.realpath(__file__), self.experiment_path) # save this python file in logs folder
self.logger = Logger(self.experiment_path, 'run_results.csv')
## Save and Print Args
print('\n---------')
with open(os.path.join(self.experiment_path, 'args.txt'), 'w+') as f:
for k, v in self.args.__dict__.items():
print(k, v)
f.write("{} \t {}\n".format(k, v))
print('---------\n')
print('\nLogging every {} outer_steps and every {} epochs per outer_step\n'.format(self.args.outer_step_log_freq, self.args.epoch_log_freq))
def hypers_init(self):
""" initialize hyperparameters """
self.inner_lrs = self.args.inner_lr_init*torch.ones(self.args.n_lrs, device=self.args.device)
self.inner_lrs_grad = torch.zeros_like(self.inner_lrs) # lr hypergradient
self.lr_hypersigns = torch.zeros(self.args.n_lrs, device=self.args.device)
self.lr_step_sizes = self.args.lr_init_step_size*torch.ones(self.args.n_lrs, device=self.args.device)
self.inner_moms = self.args.inner_mom_init*torch.ones(self.args.n_moms, device=self.args.device)
self.inner_moms_grad = torch.zeros_like(self.inner_moms)
self.mom_hypersigns = torch.zeros(self.args.n_moms, device=self.args.device)
self.mom_step_sizes = self.args.mom_init_step_size*torch.ones(self.args.n_moms, device=self.args.device)
self.inner_wds = self.args.inner_wd_init*torch.ones(self.args.n_wds, device=self.args.device)
self.inner_wds_grad = torch.zeros_like(self.inner_wds)
self.wd_hypersigns = torch.zeros(self.args.n_wds, device=self.args.device)
self.wd_step_sizes = self.args.wd_init_step_size*torch.ones(self.args.n_wds, device=self.args.device)
def get_hypers(self, epoch, batch_idx):
"""return hyperparameters to be used for given batch"""
lr_index = int(self.args.n_lrs * (epoch*self.n_batches_per_epoch + batch_idx)/self.n_total_batches_for_this_outer_step)
lr = float(self.inner_lrs[lr_index])
mom_index = int(self.args.n_moms * (epoch*self.n_batches_per_epoch + batch_idx)/self.n_total_batches_for_this_outer_step)
mom = float(self.inner_moms[mom_index])
wd_index = int(self.args.n_wds * (epoch*self.n_batches_per_epoch + batch_idx)/self.n_total_batches_for_this_outer_step)
wd = float(self.inner_wds[wd_index])
return lr, mom, wd, lr_index, mom_index, wd_index
def to_prune(self, epoch, batch_idx, n_hypers):
""" Do we skip calculation of Z for this batch?"""
if self.args.pruning_ratio==0:
to_prune=False
else:
n_batches_per_hyper = int(self.n_total_batches_for_this_outer_step/n_hypers)
current_global_batch_idx = epoch*self.n_batches_per_epoch + batch_idx
current_global_batch_idx_per_hyper = current_global_batch_idx % n_batches_per_hyper
if self.args.pruning_mode=='alternate': #rounded to nearest integer, so r=0.25 -> prune 1 in 4 but r=0.21 -> 1 in 4 also
if self.args.pruning_ratio>=0.5: #at least 1 in 2 pruned
keep_freq = int(1/(1-self.args.pruning_ratio))
to_prune = (current_global_batch_idx_per_hyper % keep_freq != 0)
else:
prune_freq = int(1/(self.args.pruning_ratio))
to_prune = (current_global_batch_idx_per_hyper % prune_freq == 0)
elif self.args.pruning_mode=='truncate':
to_prune = current_global_batch_idx_per_hyper < self.args.pruning_ratio*n_batches_per_hyper
return to_prune
def inner_loop(self):
"""
Compute Z for each hyperparameter to learn over all epochs in the run
"""
## Network
self.classifier = select_model(True, self.args.dataset, self.args.architecture,
self.args.init_type, self.args.init_param,
self.args.device).to(self.args.device)
self.classifier.train()
self.weights = self.classifier.get_param()
velocity = torch.zeros(self.weights.numel(), requires_grad=False, device=self.args.device)
## Forward Mode Init
if self.args.learn_lr:
self.n_batches_per_lr = 0
Z_lr = torch.zeros((self.weights.numel(), self.args.n_lrs), device=self.args.device)
C_lr = torch.zeros((self.weights.numel(), self.args.n_lrs), device=self.args.device)
else:
Z_lr = None
if self.args.learn_mom:
self.n_batches_per_mom = 0
Z_mom = torch.zeros((self.weights.numel(), self.args.n_moms), device=self.args.device)
C_mom = torch.zeros((self.weights.numel(), self.args.n_moms), device=self.args.device)
else:
Z_mom = None
if self.args.learn_wd:
self.n_batches_per_wd = 0
Z_wd = torch.zeros((self.weights.numel(), self.args.n_wds), device=self.args.device)
C_wd = torch.zeros((self.weights.numel(), self.args.n_wds), device=self.args.device)
else:
Z_wd = None
## Inner Loop Over All Epochs
for epoch in range(self.n_inner_epochs_for_this_outer_step):
t0_epoch = time.time()
for batch_idx, (x_train, y_train) in enumerate(self.train_loader):
lr, mom, wd, lr_index, mom_index, wd_index = self.get_hypers(epoch, batch_idx)
#print(f'epoch {epoch} batch {batch_idx} -- lr idx {lr_index} -- mom idx {mom_index} -- wd index {wd_index}')
x_train, y_train = x_train.to(device=self.args.device), y_train.to(device=self.args.device)
train_logits = self.classifier.forward_with_param(x_train, self.weights)
train_loss = self.cross_entropy(train_logits, y_train)
grads = torch.autograd.grad(train_loss, self.weights, create_graph=True)[0]
if self.args.clamp_grads: grads.clamp_(-self.args.clamp_grads_range, self.args.clamp_grads_range)
if self.args.learn_lr and not self.to_prune(epoch, batch_idx, self.args.n_lrs):
#print('update lr')
self.n_batches_per_lr += 1
H_times_Z = torch.zeros((self.weights.numel(), self.args.n_lrs),device=self.args.device)
for j in range(lr_index + 1):
retain = (j != lr_index) or self.args.learn_mom or self.args.learn_wd
H_times_Z[:, j] = torch.autograd.grad(grads @ Z_lr[:, j], self.weights, retain_graph=retain)[0]
if self.args.clamp_HZ: H_times_Z.clamp_(-self.args.clamp_HZ_range, self.args.clamp_HZ_range)
A_times_Z = Z_lr*(1 - lr*wd) - lr*H_times_Z
B = - mom*lr*C_lr
B[:,lr_index] -= grads.detach() + wd*self.weights.detach() + mom*velocity
C_lr = mom*C_lr + H_times_Z + wd*Z_lr
Z_lr = A_times_Z + B
if self.args.learn_mom and not self.to_prune(epoch, batch_idx, self.args.n_moms):
#print('update mom')
self.n_batches_per_mom += 1
H_times_Z = torch.zeros((self.weights.numel(), self.args.n_moms),device=self.args.device)
for j in range(mom_index + 1):
retain = (j != mom_index) or self.args.learn_wd
H_times_Z[:, j] = torch.autograd.grad(grads @ Z_mom[:, j], self.weights, retain_graph=retain)[0]
if self.args.clamp_HZ: H_times_Z.clamp_(-self.args.clamp_HZ_range, self.args.clamp_HZ_range)
A_times_Z = (1 - lr*wd)*Z_mom - lr*H_times_Z
B = -lr*mom*C_mom
B[:, mom_index] -= lr*velocity
C_mom = mom*C_mom + H_times_Z + wd * Z_mom
C_mom[:, mom_index] += velocity
Z_mom = A_times_Z + B
if self.args.learn_wd and not self.to_prune(epoch, batch_idx, self.args.n_wds):
#print('update wd')
self.n_batches_per_wd += 1
H_times_Z = torch.zeros((self.weights.numel(), self.args.n_wds),device=self.args.device)
for j in range(wd_index + 1):
retain = (j != wd_index)
H_times_Z[:, j] = torch.autograd.grad(grads @ Z_wd[:, j], self.weights, retain_graph=retain)[0]
if self.args.clamp_HZ: H_times_Z.clamp_(-self.args.clamp_HZ_range, self.args.clamp_HZ_range)
A_times_Z = (1 - lr*wd)*Z_wd - lr*H_times_Z
B = - lr*mom*C_wd
B[:, wd_index] -= lr*self.weights.detach()
C_wd = mom*C_wd + H_times_Z + wd*Z_wd
C_wd[:, wd_index] += self.weights.detach()
Z_wd = A_times_Z + B
## SGD inner update
self.weights.detach_(), grads.detach_()
velocity = velocity*mom + (grads + wd*self.weights)
self.weights = self.weights - lr*velocity
self.weights.requires_grad_()
print(f'--- Ran epoch {epoch+1} in {format_time(time.time()-t0_epoch)} ---')
if self.args.learn_lr: self.n_batches_per_lr /= self.args.n_lrs # each hyper gets same # of updates regardless of pruning mode
if self.args.learn_mom: self.n_batches_per_mom /= self.args.n_moms
if self.args.learn_wd: self.n_batches_per_wd /= self.args.n_wds
return Z_lr, Z_mom, Z_wd
def outer_step(self, outer_step_idx, Z_lr_final, Z_mom_final, Z_wd_final):
"""
Calculate hypergradients and update hyperparameters accordingly.
"""
## Calculate validation gradients with final weights of inner loop
self.running_val_grad = AggregateTensor()
for batch_idx, (x_val, y_val) in enumerate(self.val_loader): #need as big batches as train mode for BN train mode
x_val, y_val = x_val.to(device=self.args.device), y_val.to(device=self.args.device)
val_logits = self.classifier.forward_with_param(x_val, self.weights)
val_loss = self.cross_entropy(val_logits, y_val)
dLval_dw = torch.autograd.grad(val_loss, self.weights)[0]
self.running_val_grad.update(dLval_dw)
## Update hyperparams
print('')
if self.args.learn_lr:
self.inner_lrs_grad = self.running_val_grad.avg() @ Z_lr_final / self.n_batches_per_lr
self.all_lr_raw_grads[outer_step_idx] = self.inner_lrs_grad.detach()
print('RAW LR GRADS: ', ["{:.2E}".format(float(i)) for i in self.inner_lrs_grad])
new_hypersigns = torch.sign(self.inner_lrs_grad) #Nans and zero have sign 0
flipped_signs = self.lr_hypersigns*new_hypersigns # 1, -1 or 0
multipliers = torch.tensor([self.args.lr_step_decay if f==-1.0 else 1.0 for f in flipped_signs], device=self.args.device)
self.lr_step_sizes = multipliers*self.lr_step_sizes
self.lr_hypersigns = new_hypersigns
deltas = new_hypersigns*self.lr_step_sizes # how much to change hyperparameter by
self.lr_converged = ((self.lr_step_sizes/self.inner_lrs) < self.args.converged_frac).all()
self.inner_lrs = self.inner_lrs - deltas
self.all_lr_smooth_grads[outer_step_idx] = deltas
print('SMOOTH LR DELTAS: ', ["{:02.2f}".format(float(i)) for i in deltas])
if self.args.learn_mom:
self.inner_moms_grad = self.running_val_grad.avg() @ Z_mom_final / self.n_batches_per_mom
self.all_mom_raw_grads[outer_step_idx] = self.inner_moms_grad.detach()
print('RAW MOM GRADS: ', ["{:.2E}".format(float(i)) for i in self.inner_moms_grad])
new_hypersigns = torch.sign(self.inner_moms_grad) #Nans and zero have sign 0
flipped_signs = self.mom_hypersigns*new_hypersigns # 1, -1 or 0
multipliers = torch.tensor([self.args.mom_step_decay if f==-1.0 else 1.0 for f in flipped_signs], device=self.args.device)
self.mom_step_sizes = multipliers*self.mom_step_sizes
self.mom_hypersigns = new_hypersigns
deltas = new_hypersigns*self.mom_step_sizes # how much to change hyperparameter by
self.mom_converged = ((self.mom_step_sizes/self.inner_moms) < self.args.converged_frac).all()
self.inner_moms = self.inner_moms - deltas
self.all_mom_smooth_grads[outer_step_idx] = deltas
print('SMOOTH MOM DELTAS: ', ["{:02.2f}".format(float(i)) for i in deltas])
if self.args.learn_wd:
self.inner_wds_grad = self.running_val_grad.avg() @ Z_wd_final / self.n_batches_per_wd
self.all_wd_raw_grads[outer_step_idx] = self.inner_wds_grad.detach()
print('RAW WD GRADS: ', ["{:.2E}".format(float(i)) for i in self.inner_wds_grad])
new_hypersigns = torch.sign(self.inner_wds_grad) #Nans and zero have sign 0
flipped_signs = self.wd_hypersigns*new_hypersigns # 1, -1 or 0
multipliers = torch.tensor([self.args.wd_step_decay if f==-1.0 else 1.0 for f in flipped_signs], device=self.args.device)
self.wd_step_sizes = multipliers*self.wd_step_sizes
self.wd_hypersigns = new_hypersigns
deltas = new_hypersigns*self.wd_step_sizes # how much to change hyperparameter by
self.wd_converged = ((self.wd_step_sizes/self.inner_wds) < self.args.converged_frac).all()
self.inner_wds = self.inner_wds - deltas
self.all_wd_smooth_grads[outer_step_idx] = deltas
print('SMOOTH WD DELTAS: ', ["{:02.2f}".format(float(i)) for i in deltas])
self.converged = (self.lr_converged if self.args.learn_lr else True) and (self.mom_converged if self.args.learn_mom else True) and (self.wd_converged if self.args.learn_wd else True)
def run(self):
""" Run meta learning experiment """
t0 = time.time()
for outer_step_idx in range(self.args.n_outer_steps): # number of outer steps
## Set up
self.n_inner_epochs_for_this_outer_step = self.args.n_inner_epochs_per_outer_steps[outer_step_idx]
print(f'\nOuter step {outer_step_idx+1}/{self.args.n_outer_steps} --- current budget of {self.n_inner_epochs_for_this_outer_step} epochs --- using:')
print('lrs = ', [float('{:02.2e}'.format(el)) for el in self.inner_lrs],
'moms = ', [float('{:02.2e}'.format(el)) for el in self.inner_moms],
'wds = ', [float('{:02.2e}'.format(el)) for el in self.inner_wds])
self.all_lr_schedules[outer_step_idx], self.all_mom_schedules[outer_step_idx], self.all_wd_schedules[outer_step_idx] = self.inner_lrs.detach(), self.inner_moms.detach(), self.inner_wds.detach()
self.save_state(outer_step_idx) # state and lrs saved correspond to those set at the beginning of the outer_step
## New data split for each outer_step
self.train_loader, self.val_loader, self.test_loader = get_loaders(datasets_path=self.args.datasets_path,
dataset=self.args.dataset,
train_batch_size=self.args.train_batch_size,
val_batch_size=self.args.val_batch_size,
val_source='train',
val_train_fraction=self.args.val_train_fraction,
val_train_overlap=self.args.val_train_overlap,
workers=self.args.workers,
train_infinite=False,
val_infinite=False,
cutout=self.args.cutout,
cutout_length=self.args.cutout_length,
cutout_prob=self.args.cutout_prob)
self.n_batches_per_epoch = len(self.train_loader)
self.n_total_batches_for_this_outer_step = self.n_inner_epochs_for_this_outer_step * self.n_batches_per_epoch
## Update Hypers
Z_lr_final, Z_mom_final, Z_wd_final = self.inner_loop()
self.outer_step(outer_step_idx, Z_lr_final, Z_mom_final, Z_wd_final)
## See if schedule used for this outer_step led to best validation
_, val_acc = self.validate(self.weights)
_, test_acc = self.test(self.weights)
if val_acc > self.best_validation_acc:
self.best_validation_acc = val_acc
self.best_outer_step = outer_step_idx
#print(f'Best validation acc at outer_step idx {outer_step_idx}')
## Break if all hyperparameters have converged
if self.converged:
print('STOP HYPERTRAINING BECAUSE ALL HYPERPARAMETERS HAVE CONVERGED')
break
## Time
time_so_far = time.time() - t0
self.logger.write({'budget': self.n_inner_epochs_for_this_outer_step, 'time': time_so_far,
'val_acc': val_acc, 'test_acc': test_acc})
print(f'final val acc {100*val_acc:.2g} -- final test_acc: {100*test_acc:.2g}')
## Logging Final Metrics
self.all_lr_schedules[outer_step_idx+1], self.all_mom_schedules[outer_step_idx+1], self.all_wd_schedules[outer_step_idx+1] = self.inner_lrs.detach(), self.inner_moms.detach(), self.inner_wds.detach() #last schedule was never trained on
self.save_state(outer_step_idx+1)
avg_test_loss, avg_test_acc = self.test(self.weights)
return avg_test_acc
def validate(self, weights, fraction=1.0):
""" Fraction allows trading accuracy for speed when logging many times"""
self.classifier.eval()
running_acc, running_loss = AggregateTensor(), AggregateTensor()
with torch.no_grad():
for batch_idx, (x, y) in enumerate(self.val_loader):
x, y = x.to(device=self.args.device), y.to(device=self.args.device)
logits = self.classifier.forward_with_param(x, weights)
running_loss.update(self.cross_entropy(logits, y), x.shape[0])
running_acc.update(accuracy(logits, y, topk=(1,))[0], x.shape[0])
if fraction < 1 and (batch_idx + 1) >= fraction*len(self.val_loader):
break
self.classifier.train()
return float(running_loss.avg()), float(running_acc.avg())
def test(self, weights, fraction=1.0):
""" Fraction allows trading accuracy for speed when logging many times"""
self.classifier.eval()
running_acc, running_loss = AggregateTensor(), AggregateTensor()
with torch.no_grad():
for batch_idx, (x, y) in enumerate(self.test_loader):
x, y = x.to(device=self.args.device), y.to(device=self.args.device)
logits = self.classifier.forward_with_param(x, weights)
running_loss.update(self.cross_entropy(logits, y), x.shape[0])
running_acc.update(accuracy(logits, y, topk=(1,))[0], x.shape[0])
if fraction < 1 and (batch_idx + 1) >= fraction*len(self.test_loader):
break
self.classifier.train()
return float(running_loss.avg()), float(running_acc.avg())
def save_state(self, outer_step_idx):
torch.save({'args': self.args,
'outer_step_idx': outer_step_idx,
'best_outer_step': self.best_outer_step,
'best_validation_acc': self.best_validation_acc,
'all_lr_schedules': self.all_lr_schedules,
'all_lr_raw_grads': self.all_lr_raw_grads,
'all_lr_smooth_grads': self.all_lr_smooth_grads,
'all_mom_schedules': self.all_mom_schedules,
'all_mom_raw_grads': self.all_mom_raw_grads,
'all_mom_smooth_grads': self.all_mom_smooth_grads,
'all_wd_schedules': self.all_wd_schedules,
'all_wd_raw_grads': self.all_wd_raw_grads,
'all_wd_smooth_grads': self.all_wd_smooth_grads}, self.checkpoint_path)
class BaseLearner(object):
"""
Retrain from scratch using learned schedule and
whole training set
"""
def __init__(self, args, lr_schedule, mom_schedule, wd_schedule, log_name):
self.args = args
self.inner_lrs = lr_schedule
self.inner_moms = mom_schedule
self.inner_wds = wd_schedule
## Loaders
self.args.val_source = 'test' # retrain on full train set from scratch
self.train_loader, _, self.test_loader = get_loaders(datasets_path=self.args.datasets_path,
dataset=self.args.dataset,
train_batch_size=self.args.train_batch_size,
val_batch_size=self.args.val_batch_size,
val_source=self.args.val_source,
val_train_fraction=self.args.val_train_fraction,
val_train_overlap=self.args.val_train_overlap,
workers=self.args.workers,
train_infinite=False,
val_infinite=False,
cutout=self.args.cutout,
cutout_length=self.args.cutout_length,
cutout_prob=self.args.cutout_prob)
self.n_batches_per_epoch = len(self.train_loader)
self.n_total_batches = self.args.retrain_n_epochs * self.n_batches_per_epoch
## Optimizer
self.classifier = select_model(False, self.args.dataset, self.args.architecture,
self.args.init_type, self.args.init_param,
self.args.device).to(self.args.device)
self.optimizer = optim.SGD(self.classifier.parameters(), lr=0.0, momentum=0.0, weight_decay=0.0) #set hypers manually later
self.cross_entropy = nn.CrossEntropyLoss()
### Set up
self.experiment_path = os.path.join(args.log_directory_path, args.experiment_name)
self.logger = Logger(self.experiment_path, log_name)
def log_init(self):
self.running_train_loss, self.running_train_acc = AggregateTensor(), AggregateTensor()
def log(self, epoch, avg_train_loss, avg_train_acc):
avg_test_loss, avg_test_acc = self.test(fraction=0.1 if epoch!=self.args.retrain_n_epochs-1 else 1)
print('Retrain epoch {}/{} --- Train Acc: {:02.2f}% -- Test Acc: {:02.2f}%'.format(epoch+1, self.args.retrain_n_epochs, avg_train_acc * 100, avg_test_acc * 100))
self.logger.write({'train_loss': avg_train_loss, 'train_acc': avg_train_acc, 'test_loss': avg_test_loss, 'test_acc': avg_test_acc})
self.log_init()
def get_hypers(self, epoch, batch_idx):
"""return hyperparameters to be used for given batch"""
lr_index = int(self.args.n_lrs * (epoch*self.n_batches_per_epoch + batch_idx)/self.n_total_batches)
lr = float(self.inner_lrs[lr_index])
mom_index = int(self.args.n_moms * (epoch*self.n_batches_per_epoch + batch_idx)/self.n_total_batches)
mom = float(self.inner_moms[mom_index])
wd_index = int(self.args.n_wds * (epoch*self.n_batches_per_epoch + batch_idx)/self.n_total_batches)
wd = float(self.inner_wds[wd_index])
return lr, mom, wd, lr_index, mom_index, wd_index
def set_hypers(self, epoch, batch_idx):
lr, mom, wd, lr_index, mom_index, wd_index = self.get_hypers(epoch, batch_idx)
for param_group in self.optimizer.param_groups:
param_group['lr'] = lr
param_group['momentum'] = mom
param_group['weight_decay'] = wd
#print(f'Setting: lr={lr}, mom={mom}, wd={wd}')
def run(self):
for epoch in range(self.args.retrain_n_epochs):
avg_train_loss, avg_train_acc = self.train(epoch)
self.log(epoch, avg_train_loss, avg_train_acc)
test_loss, test_acc = self.test()
return test_acc
def train(self, epoch):
self.classifier.train()
running_acc, running_loss = AggregateTensor(), AggregateTensor()
for batch_idx, (x,y) in enumerate(self.train_loader):
self.set_hypers(epoch, batch_idx)
x, y = x.to(device=self.args.device), y.to(device=self.args.device)
logits = self.classifier(x)
loss = self.cross_entropy(input=logits, target=y)
acc1 = accuracy(logits.data, y, topk=(1,))[0]
running_loss.update(loss, x.shape[0])
running_acc.update(acc1, x.shape[0])
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
return float(running_loss.avg()), float(running_acc.avg())
def test(self, fraction=1.0):
""" fraction allows trading accuracy for speed when logging many times"""
self.classifier.eval()
running_acc, running_loss = AggregateTensor(), AggregateTensor()
with torch.no_grad():
for batch_idx, (x, y) in enumerate(self.test_loader):
x, y = x.to(device=self.args.device), y.to(device=self.args.device)
logits = self.classifier(x)
running_loss.update(self.cross_entropy(logits, y), x.shape[0])
running_acc.update(accuracy(logits, y, topk=(1,))[0], x.shape[0])
if fraction < 1 and (batch_idx + 1) >= fraction*len(self.test_loader):
break
self.classifier.train()
return float(running_loss.avg()), float(running_acc.avg())
# ________________________________________________________________________________
# ________________________________________________________________________________
# ________________________________________________________________________________
def make_experiment_name(args):
"""
Warning: Windows can have a weird behaviour for long filenames.
Protip: switch to Ubuntu ;)
"""
## Main
nepr = ''.join([str(i) for i in args.n_inner_epochs_per_outer_steps])
experiment_name = f'FSL_{args.dataset}_{args.architecture}_nepr{nepr}'
experiment_name += f'_init{args.init_type}-{args.init_param}'
experiment_name += f'_tbs{args.train_batch_size}'
if args.cutout: experiment_name += f'_cutout-p{args.cutout_prob}'
if args.clamp_HZ: experiment_name += f'_HZclamp{args.clamp_HZ_range}'
experiment_name += f'_S{args.seed}'
return experiment_name
def main(args):
set_torch_seeds(args.seed)
torch.backends.cudnn.enabled = True
torch.backends.cudnn.deterministic = False
torch.backends.cudnn.benchmark = True
t0 = time.time()
meta_learner = MetaLearner(args)
meta_test_acc = meta_learner.run()
total_time = time.time() - t0
to_print = '\n\nMETA TEST ACC: {:02.2f}%'.format(meta_test_acc*100)
file_name = "final_meta_test_acc_{:02.2f}_total_time_{}".format(meta_test_acc*100, format_time(total_time))
create_empty_file(os.path.join(args.log_directory_path, args.experiment_name, file_name))
if args.retrain_from_scratch:
## Fetch schedules
# best_idx = meta_learner.best_outer_step
final_lr_schedule, final_mom_schedule, final_wd_schedule = meta_learner.all_lr_schedules[-1], meta_learner.all_mom_schedules[-1], meta_learner.all_wd_schedules[-1]
# best_lr_schedule, best_mom_schedule, best_wd_schedule = meta_learner.all_lr_schedules[best_idx], meta_learner.all_mom_schedules[best_idx], meta_learner.all_wd_schedules[best_idx]
del meta_learner
## Retrain Last
print(f'\n\n\n---------- RETRAINING FROM SCRATCH WITH LAST SCHEDULE (idx {args.n_outer_steps}) ----------')
print(f'lrs = {final_lr_schedule.tolist()}')
print(f'moms = {final_mom_schedule.tolist()}')
print(f'wds = {final_wd_schedule.tolist()}')
log_name = f'Rerun_last_outer_step.csv'
base_learner = BaseLearner(args, final_lr_schedule, final_mom_schedule, final_wd_schedule, log_name)
if args.use_gpu: torch.cuda.empty_cache()
base_test_acc = base_learner.run()
to_print += '\nRE-RUN LAST SCHEDULE TEST ACC: {:02.2f}%'.format(base_test_acc*100)
file_name = "Rerun_last_test_acc_{:02.2f}".format(base_test_acc*100)
create_empty_file(os.path.join(args.log_directory_path, args.experiment_name, file_name))
# ## Retrain Best Val
# print(f'\n\n\n---------- RETRAINING FROM SCRATCH WITH BEST VAL SCHEDULE (idx {best_idx}) ----------')
# print(f'lrs = {best_lr_schedule.tolist()}')
# print(f'moms = {best_mom_schedule.tolist()}')
# print(f'wds = {best_wd_schedule.tolist()}')
#
# log_name = f'Rerun_best_outer_step_idx_{best_idx}.csv'
# base_learner = BaseLearner(args, best_lr_schedule, best_mom_schedule, best_wd_schedule, log_name)
# if args.use_gpu: torch.cuda.empty_cache()
# base_test_acc = base_learner.run()
# to_print += '\nRE-RUN BEST SCHEDULE TEST ACC: {:02.2f}%'.format(base_test_acc*100)
# file_name = "Rerun_best_test_acc_{:02.2f}".format(base_test_acc*100)
# create_empty_file(os.path.join(args.log_directory_path, args.experiment_name, file_name))
print(to_print)
if __name__ == "__main__":
import argparse
print('Running...')
parser = argparse.ArgumentParser(description='Welcome to GreedyGrad')
## Main
parser.add_argument('--learn_lr', type=str2bool, default=True)
parser.add_argument('--learn_mom', type=str2bool, default=True)
parser.add_argument('--learn_wd', type=str2bool, default=True)
parser.add_argument('--n_lrs', type=int, default=7)
parser.add_argument('--n_moms', type=int, default=1)
parser.add_argument('--n_wds', type=int, default=1)
parser.add_argument('--dataset', type=str, default='CIFAR10')
parser.add_argument('--n_inner_epochs_per_outer_steps', nargs='*', type=int, default=[1, 10, 10, 10, 10, 10, 10, 10, 10, 10], help='number of epochs to run for each outer step')
parser.add_argument('--pruning_mode', type=str, choices=['alternate', 'truncate'], default='alternate')
parser.add_argument('--pruning_ratio', type=float, default=0.0, help='<1, how many inner steps to skip Z calculation for expressed as a fraction of total inner steps per hyper')
## Architecture
parser.add_argument('--architecture', type=str, default='WRN-16-1')
parser.add_argument('--init_type', type=str, default='xavier', choices=['normal', 'xavier', 'kaiming', 'orthogonal', 'zero', 'default'], help='network initialization scheme')
parser.add_argument('--init_param', type=float, default=1, help='network initialization param: gain, std, etc.')
parser.add_argument('--init_norm_weights', type=float, default=1, help='init gammas of BN')
## Inner Loop
parser.add_argument('--inner_lr_init', type=float, default=0, help='SGD inner learning rate init')
parser.add_argument('--inner_mom_init', type=float, default=0, help='SGD inner momentum init')
parser.add_argument('--inner_wd_init', type=float, default=0, help='SGD inner weight decay init')
parser.add_argument('--train_batch_size', type=int, default=256)
parser.add_argument('--clamp_grads', type=str2bool, default=True)
parser.add_argument('--clamp_grads_range', type=float, default=3, help='clamp inner grads for each batch to +/- that')
parser.add_argument('--cutout', type=str2bool, default=False)
parser.add_argument('--cutout_length', type=int, default=16)
parser.add_argument('--cutout_prob', type=float, default=1, help='clamp inner grads for each batch to +/- that')
## Outer Loop
parser.add_argument('--val_batch_size', type=int, default=500)
parser.add_argument('--val_train_fraction', type=float, default=0.05)
parser.add_argument('--val_train_overlap', type=str2bool, default=False, help='if True and val_source=train, val images are also in train set')
parser.add_argument('--lr_init_step_size', type=float, default=0.1, help='at each iteration grads changed so that each hyper can only change by this fraction (ignoring outer momentum)')
parser.add_argument('--mom_init_step_size', type=float, default=0.1)
parser.add_argument('--wd_init_step_size', type=float, default=3e-4)
parser.add_argument('--lr_step_decay', type=float, default=0.5, help='step size multiplied by this much if hypergrad sign changes')
parser.add_argument('--mom_step_decay', type=float, default=0.5, help='step size multiplied by this much if hypergrad sign changes')
parser.add_argument('--wd_step_decay', type=float, default=0.5, help='step size multiplied by this much if hypergrad sign changes')
parser.add_argument('--clamp_HZ', type=str2bool, default=True)
parser.add_argument('--clamp_HZ_range', type=float, default=1, help='clamp to +/- that')
parser.add_argument('--converged_frac', type=float, default=0.05, help='if steps are smaller than this percentage of hypers, stop experiment')
## Other
parser.add_argument('--retrain_from_scratch', type=str2bool, default=True, help='retrain from scratch with learned lr schedule')
parser.add_argument('--retrain_n_epochs', type=int, default=50, help='interpolates from learned schedule, -1 for same as n_inner_epochs_per_outer_steps[-1]')
parser.add_argument('--datasets_path', type=str, default="~/Datasets/Pytorch/")
parser.add_argument('--log_directory_path', type=str, default="./logs/")
parser.add_argument('--epoch_log_freq', type=int, default=1, help='every how many epochs to save summaries')
parser.add_argument('--outer_step_log_freq', type=int, default=1, help='every how many outer_steps to save the whole run')
parser.add_argument('--seed', type=int, default=0)
parser.add_argument('--workers', type=int, default=0)
parser.add_argument('--use_gpu', type=str2bool, default=True)
args = parser.parse_args()
args.dataset_path = os.path.join(args.datasets_path, args.dataset)
args.use_gpu = args.use_gpu and torch.cuda.is_available()
args.device = torch.device('cuda') if args.use_gpu else torch.device('cpu')
assert args.lr_step_decay < 1
assert args.mom_step_decay < 1
assert args.wd_step_decay < 1
assert args.converged_frac < 1
if args.retrain_n_epochs < 0: args.retrain_n_epochs = args.n_inner_epochs_per_outer_steps[-1]
assert args.pruning_ratio <= 1
args.n_outer_steps = len(args.n_inner_epochs_per_outer_steps)
args.experiment_name = make_experiment_name(args)
print('\nRunning on device: {}'.format(args.device))
if args.use_gpu: print(torch.cuda.get_device_name(0))
main(args)
| 37,988 | 54.866176 | 243 | py |
FDS | FDS-main/figure2_hypergradients_fluctuation.py | """
Here we measure hypergradients for several runs when perturbing
the training data and weight initialization. This must be done on toy
datasets where reverse-mode differentiation is tractable. This corresponds
to figure 2 in the paper.
"""
import torch.optim as optim
import pickle
import os
import warnings
import sys
import shutil
import torch
import torch.nn.functional as F
import torch.optim as optimw
from utils.helpers import *
from utils.datasets import *
from models.selector import *
class HyperGradFluctuation(object):
def __init__(self, args):
self.args = args
self.hypergrads_all = torch.zeros((self.args.n_runs, self.args.T))
self.cross_entropy = nn.CrossEntropyLoss()
self.init_lr_schedule()
## Loaders
self.infinite_train_loader, self.val_loader, _ = get_loaders(datasets_path=self.args.datasets_path,
dataset=self.args.dataset,
train_batch_size=self.args.train_batch_size,
val_batch_size=self.args.n_val_images,
val_source='test',
workers=self.args.workers,
train_infinite=True,
val_infinite=False)
for x,y in self.val_loader: self.X_val, self.Y_val = x.to(device=self.args.device), y.to(device=self.args.device)
## Set up experiment folder
self.experiment_path = os.path.join(self.args.log_directory_path, self.args.experiment_name)
if os.path.isfile(os.path.join(self.experiment_path, 'hypergrads.pth.tar')):
if args.use_gpu: raise FileExistsError(f'Experiment already ran and exists at {self.experiment_path}. \nStopping now')
else:
if os.path.exists(self.experiment_path):
shutil.rmtree(self.experiment_path)
os.makedirs(self.experiment_path)
## Save and Print Args
copy_file(os.path.realpath(__file__), self.experiment_path) # save this python file in logs folder
print('\n---------')
with open(os.path.join(self.experiment_path, 'args.txt'), 'w+') as f:
for k, v in self.args.__dict__.items():
print(k, v)
f.write("{} \t {}\n".format(k, v))
print('---------\n')
def init_lr_schedule(self):
if self.args.inner_lr_cosine_anneal:
dummy_opt = optim.SGD([torch.ones([1], requires_grad=True)], lr=self.args.inner_lr_init)
dummy_scheduler = optim.lr_scheduler.CosineAnnealingLR(dummy_opt, T_max=self.args.T)
lrs = []
for i in range(self.args.T):
lrs.append(dummy_scheduler.get_last_lr()[0])
dummy_opt.step()
dummy_scheduler.step()
self.inner_lrs = torch.tensor(lrs, requires_grad=True, device=self.args.device)
else:
self.inner_lrs = torch.full((self.args.T,), self.args.inner_lr_init, requires_grad=True, device=self.args.device)
def inner_and_outer_loop(self):
for self.inner_step_idx, (x_train, y_train) in enumerate(self.infinite_train_loader):
x_train, y_train = x_train.to(self.args.device, self.args.dtype), y_train.to(self.args.device)
train_logits = self.classifier.forward_with_param(x_train, self.weights)
train_loss = self.cross_entropy(train_logits, y_train)
grads = torch.autograd.grad(train_loss, self.weights, create_graph=True)[0]
if self.args.clamp_inner_grads: grads.clamp_(-self.args.clamp_inner_grads_range, self.args.clamp_inner_grads_range)
self.velocity = self.args.inner_momentum * self.velocity + (grads + self.args.inner_weight_decay * self.weights)
self.weights = self.weights - self.inner_lrs[self.inner_step_idx] * self.velocity
if self.args.greedy:
self.compute_hypergradients() #only populates .grad of one item in self.inner_lrs
self.weights.detach_().requires_grad_()
self.velocity.detach_().requires_grad_()
if self.inner_step_idx+1 == self.args.T: break
if not self.args.greedy: self.compute_hypergradients() #populates .grad of all items in self.inner_lrs
def compute_hypergradients(self):
val_logits = self.classifier.forward_with_param(self.X_val, self.weights)
val_loss = self.cross_entropy(val_logits, self.Y_val)
val_loss.backward()
def run(self):
for self.run_idx in range(self.args.n_runs):
self.classifier = select_model(True, self.args.dataset, self.args.architecture,
self.args.init_type, self.args.init_param,
self.args.device).to(self.args.device)
self.weights = self.classifier.get_param()
self.velocity = torch.zeros(self.weights.numel(), device=self.args.device)
self.inner_and_outer_loop()
self.hypergrads_all[self.run_idx] = self.inner_lrs.grad.detach()
self.inner_lrs.grad.data.zero_()
self.save_final()
def save_final(self):
torch.save({'args': self.args,
'hypergrads_all': self.hypergrads_all},
os.path.join(self.experiment_path, 'hypergrads.pth.tar'))
print(f"Saved hypergrads to {os.path.join(self.experiment_path, 'hypergrads.pth.tar')}")
# ________________________________________________________________________________
# ________________________________________________________________________________
# ________________________________________________________________________________
def make_experiment_name(args):
experiment_name = f'Hg_{args.dataset}_{args.init_type}_T{args.T}_tbs{args.train_batch_size}_mom{args.inner_momentum}_wd{args.inner_weight_decay}_ilr{args.inner_lr_init}'
if args.inner_lr_cosine_anneal: experiment_name += f'cosine'
if args.greedy: experiment_name += f'_GREEDY'
if args.dtype == torch.float64: experiment_name += '_FL64'
experiment_name += f'_S{args.seed}'
return experiment_name
def main(args):
set_torch_seeds(args.seed)
torch.backends.cudnn.enabled = True
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = True
t0 = time.time()
hypervariance_learner = HyperGradFluctuation(args)
hypervariance_learner.run()
total_time = time.time() - t0
with open(os.path.join(args.log_directory_path, args.experiment_name, 'TOTAL_TIME_' + format_time(total_time)), 'w+') as f:
f.write("NA")
if __name__ == "__main__":
import argparse
print('Running...')
parser = argparse.ArgumentParser(description='Welcome to GreedyGrad')
## Main
parser.add_argument('--T', type=int, default=250, help='number of batches for the task and to learn a schedule over')
parser.add_argument('--n_runs', type=int, default=100, help='how many times to compute hypergrads, with different train-val-split each time')
parser.add_argument('--dataset', type=str, default='SVHN')
parser.add_argument('--greedy', type=str2bool, default=False)
parser.add_argument('--architecture', type=str, default='LeNet')
parser.add_argument('--init_type', type=str, default='xavier', choices=['normal', 'xavier', 'kaiming', 'orthogonal', 'zero', 'default'], help='network initialization scheme')
parser.add_argument('--init_param', type=float, default=1, help='network initialization param: gain, std, etc.')
parser.add_argument('--n_val_images', type=int, default=2000, help='ignored unless val_source=train') #20% of 60k=12000
## Inner Loop
parser.add_argument('--inner_lr_init', type=float, default=0.01, help='Used to initialize inner learning rate(s).')
parser.add_argument('--inner_lr_cosine_anneal', type=str2bool, default=True, help='Initial schedule is cosine annealing')
parser.add_argument('--inner_momentum', type=float, default=0.9, help='SGD inner momentum')
parser.add_argument('--inner_weight_decay', type=float, default=0.0, help='SGD + ADAM inner weight decay')
parser.add_argument('--train_batch_size', type=int, default=128)
parser.add_argument('--clamp_inner_grads', type=str2bool, default=True)
parser.add_argument('--clamp_inner_grads_range', type=float, default=1, help='clamp inner grads for each batch to +/- that')
## Misc
parser.add_argument('--datasets_path', type=str, default="~/Datasets/Pytorch/")
parser.add_argument('--log_directory_path', type=str, default="./logs/")
parser.add_argument('--dtype', type=str, default='float32', choices=['float32', 'float64'])
parser.add_argument('--seed', type=int, default=0)
parser.add_argument('--workers', type=int, default=0)
parser.add_argument('--use_gpu', type=str2bool, default=True)
args = parser.parse_args()
args.dataset_path = os.path.join(args.datasets_path, args.dataset)
args.use_gpu = args.use_gpu and torch.cuda.is_available()
args.device = torch.device('cuda') if args.use_gpu else torch.device('cpu')
if args.dtype == 'float64':
torch.set_default_tensor_type(torch.DoubleTensor) # changes weights and tensors but not loaders
args.dtype = torch.float64 if args.dtype == 'float64' else torch.float32
print('\nRunning on device: {}'.format(args.device))
args.experiment_name = make_experiment_name(args)
main(args)
| 9,738 | 49.201031 | 178 | py |
FDS | FDS-main/models/wresnet.py | """
Base architecture taken from https://github.com/xternalz/WideResNet-pytorch
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from models.meta_factory import ReparamModule
class BasicBlock(nn.Module):
def __init__(self, in_planes, out_planes, stride, dropRate):
super(BasicBlock, self).__init__()
self.dropRate = dropRate
self.bn1 = nn.BatchNorm2d(in_planes)
self.relu1 = nn.ReLU(inplace=True)
self.conv1 = nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(out_planes)
self.relu2 = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(out_planes, out_planes, kernel_size=3, stride=1,
padding=1, bias=False)
self.equalInOut = (in_planes == out_planes)
self.convShortcut = (not self.equalInOut) and nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride,
padding=0, bias=False) or None
def forward(self, x):
if self.equalInOut:
out = self.relu1(self.bn1(x))
out = self.relu2(self.bn2(self.conv1(out)))
else: #keep x var so can add it in skip connection
x = self.relu1(self.bn1(x))
out = self.relu2(self.bn2(self.conv1(x)))
if self.dropRate > 0:
out = F.dropout(out, p=self.dropRate, training=self.training)
out = self.conv2(out)
return torch.add(x if self.equalInOut else self.convShortcut(x), out)
class NetworkBlock(nn.Module):
def __init__(self, nb_layers, in_planes, out_planes, block, stride, dropRate):
super(NetworkBlock, self).__init__()
self.layer = self._make_layer(block, in_planes, out_planes, nb_layers, stride, dropRate)
def _make_layer(self, block, in_planes, out_planes, nb_layers, stride, dropRate):
layers = []
for i in range(int(nb_layers)):
layers.append(block(i == 0 and in_planes or out_planes, out_planes, i == 0 and stride or 1, dropRate))
return nn.Sequential(*layers)
def forward(self, x):
return self.layer(x)
class WideResNet(nn.Module):
def __init__(self, depth, n_classes, n_channels, widen_factor=1, dropRate=0.0):
super(WideResNet, self).__init__()
nChannels = [16, 16*widen_factor, 32*widen_factor, 64*widen_factor]
assert((depth - 4) % 6 == 0)
n = (depth - 4) / 6
block = BasicBlock
# 1st conv before any network block
self.conv1 = nn.Conv2d(n_channels, nChannels[0], kernel_size=3, stride=1,
padding=1, bias=False)
# 1st block
self.block1 = NetworkBlock(n, nChannels[0], nChannels[1], block, 1, dropRate)
# 2nd block
self.block2 = NetworkBlock(n, nChannels[1], nChannels[2], block, 2, dropRate)
# 3rd block
self.block3 = NetworkBlock(n, nChannels[2], nChannels[3], block, 2, dropRate)
# global average pooling and classifier
self.final_bn = nn.BatchNorm2d(nChannels[3], affine=True)
self.final_relu = nn.ReLU(inplace=True)
self.fc = nn.Linear(nChannels[3], n_classes)
self.nChannels = nChannels[3]
def forward(self, x):
out = self.conv1(x)
out = self.block1(out)
out = self.block2(out)
out = self.block3(out)
out = self.final_relu(self.final_bn(out))
out = F.avg_pool2d(out, 8)
out = out.view(-1, self.nChannels)
return self.fc(out)
class MetaWideResNet(ReparamModule):
def __init__(self, depth, n_classes, n_channels, widen_factor=1, dropRate=0.0, device='cpu'):
super(MetaWideResNet, self).__init__()
self.device = device
nChannels = [16, 16*widen_factor, 32*widen_factor, 64*widen_factor]
assert((depth - 4) % 6 == 0)
n = (depth - 4) / 6
block = BasicBlock
# 1st conv before any network block
self.conv1 = nn.Conv2d(n_channels, nChannels[0], kernel_size=3, stride=1,
padding=1, bias=False)
# 1st block
self.block1 = NetworkBlock(n, nChannels[0], nChannels[1], block, 1, dropRate)
# 2nd block
self.block2 = NetworkBlock(n, nChannels[1], nChannels[2], block, 2, dropRate)
# 3rd block
self.block3 = NetworkBlock(n, nChannels[2], nChannels[3], block, 2, dropRate)
# global average pooling and classifier
self.final_bn = nn.BatchNorm2d(nChannels[3], affine=True)
self.final_relu = nn.ReLU(inplace=True)
self.fc = nn.Linear(nChannels[3], n_classes)
self.nChannels = nChannels[3]
def forward(self, x):
out = self.conv1(x)
out = self.block1(out)
out = self.block2(out)
out = self.block3(out)
out = self.final_relu(self.final_bn(out))
out = F.avg_pool2d(out, 8)
out = out.view(-1, self.nChannels)
return self.fc(out)
if __name__ == '__main__':
import time
from torchsummary import summary
from utils.helpers import *
set_torch_seeds(0)
x = torch.FloatTensor(2, 3, 32, 32).uniform_(0, 1)
## Test normal WRN
model = WideResNet(depth=40, widen_factor=2, n_channels=3, n_classes=10, dropRate=0.0)
t0 = time.time()
out = model(x)
print(f'time for normal fw pass: {time.time() - t0}s')
summary(model, (3, 32, 32))
## Test meta WRN
model = MetaWideResNet(depth=40, widen_factor=2, n_channels=3, n_classes=10, device='cpu')
weights = model.get_param()
t0 = time.time()
out = model.forward_with_param(x, weights)
print(f'time for meta fw pass: {time.time() - t0}s')
summary(model, (3, 32, 32))
| 5,745 | 38.627586 | 116 | py |
FDS | FDS-main/models/lenet.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from models.meta_factory import ReparamModule
from models.helpers import *
class Flatten(nn.Module):
"""
NN module version of torch.nn.functional.flatten
"""
def __init__(self):
super().__init__()
def forward(self, input):
return torch.flatten(input, start_dim=1, end_dim=-1)
class LeNet(nn.Module):
def __init__(self, n_classes, n_channels, im_size):
super(LeNet, self).__init__()
assert im_size in [28, 32]
h = 16*5*5 if im_size==32 else 16*4*4
self.n_classes = n_classes
self.n_channels = n_channels
self.im_size = im_size
self.layers = nn.Sequential(
nn.Conv2d(n_channels, 6, kernel_size=5, stride=1, padding=0),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2, padding=0),
nn.Conv2d(6, 16, kernel_size=5, stride=1, padding=0),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2, padding=0),
Flatten(),
nn.Linear(h, 120),
nn.ReLU(inplace=True),
nn.Linear(120, 84),
nn.ReLU(inplace=True),
nn.Linear(84, n_classes))
def forward(self, x):
return self.layers(x)
class MetaLeNet(ReparamModule):
def __init__(self, n_classes, n_channels, im_size, device='cpu'):
super(MetaLeNet, self).__init__()
assert im_size in [28, 32]
h = 16*5*5 if im_size==32 else 16*4*4
self.n_classes = n_classes
self.n_channels = n_channels
self.im_size = im_size
self.device = device # must be defined for parent class
self.layers = nn.Sequential(
nn.Conv2d(n_channels, 6, kernel_size=5, stride=1, padding=0),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2, padding=0),
nn.Conv2d(6, 16, kernel_size=5, stride=1, padding=0),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2, padding=0),
Flatten(),
nn.Linear(h, 120),
nn.ReLU(inplace=True),
nn.Linear(120, 84),
nn.ReLU(inplace=True),
nn.Linear(84, n_classes))
def forward(self, x):
return self.layers(x)
if __name__ == '__main__':
import time
from torchsummary import summary
from utils.helpers import *
set_torch_seeds(0)
x = torch.FloatTensor(256, 3, 32, 32).uniform_(0, 1)
## Test meta LeNet
model = MetaLeNet(n_classes=10, n_channels=3, im_size=32, device='cpu')
weights = model.get_param()
t0 = time.time()
out = model.forward_with_param(x, weights)
print(f'time for meta fw pass: {time.time() - t0}s')
summary(model, (3, 32, 32))
| 2,829 | 25.203704 | 75 | py |
FDS | FDS-main/models/meta_factory.py | """
This is a slim version of the code from https://github.com/SsnL/dataset-distillation
"""
import torch
import torchvision
import logging
import torch.nn as nn
import torch.nn.functional as F
import functools
import math
import types
from contextlib import contextmanager
from torch.optim import lr_scheduler
from six import add_metaclass
from itertools import chain
from copy import deepcopy
from models.helpers import *
class MetaFactory(type):
def __call__(cls, *args, **kwargs):
r"""Called when you call ReparamModule(...) """
net = type.__call__(cls, *args, **kwargs)
# collect weight (module, name) pairs
# flatten weights
w_modules_names = []
for m in net.modules():
for n, p in m.named_parameters(recurse=False):
if p is not None:
w_modules_names.append((m, n))
for n, b in m.named_buffers(recurse=False):
if b is not None:
pass
# logging.warning((
# '{} contains buffer {}. The buffer will be treated as '
# 'a constant and assumed not to change during gradient '
# 'steps. If this assumption is violated (e.g., '
# 'BatcHNorm*d\' running_mean/var), the computation will '
# 'be incorrect.').format(m.__class__.__name__, n))
net._weights_module_names = tuple(w_modules_names)
# Put to correct device before we do stuff on parameters
#net = net.to(device)
ws = tuple(m._parameters[n].detach() for m, n in w_modules_names)
assert len(set(w.dtype for w in ws)) == 1
# reparam to a single flat parameter
net._weights_numels = tuple(w.numel() for w in ws)
net._weights_shapes = tuple(w.shape for w in ws)
with torch.no_grad():
flat_w = torch.cat([w.reshape(-1) for w in ws], 0)
# remove old parameters, assign the names as buffers
for m, n in net._weights_module_names:
delattr(m, n)
m.register_buffer(n, None)
# register the flat one
net.register_parameter('flat_w', nn.Parameter(flat_w, requires_grad=True))
return net
@add_metaclass(MetaFactory)
class ReparamModule(nn.Module):
"""
Make an architecture inherit this class instead of nn.Module to allow .forward_with_params()
This changes state_dict() to a one value dict containing 'flat_w'
This requires self.device to be defined in the module
"""
def _apply(self, *args, **kwargs):
rv = super(ReparamModule, self)._apply(*args, **kwargs)
return rv
def get_param(self, clone=False):
if clone:
return self.flat_w.detach().clone().requires_grad_(self.flat_w.requires_grad).to(device=self.device)
return self.flat_w.to(device=self.device)
@contextmanager
def unflatten_weight(self, flat_w):
"""
This changes self.state_dict()
from --> odict_keys(['flat_w'])
to --> odict_keys(['flat_w', 'layers.0.weight', 'layers.0.bias', ... ]
Somehow removes 'bias=False' in self._weights_module_names conv names, and
replaces 'bias=False' by 'bias=True' in linear layers.
type(self.state_dict()) = <class 'collections.OrderedDict'> before and after
"""
ws = (t.view(s) for (t, s) in zip(flat_w.split(self._weights_numels), self._weights_shapes))
for (m, n), w in zip(self._weights_module_names, ws):
setattr(m, n, w)
yield
for m, n in self._weights_module_names:
setattr(m, n, None)
def forward_with_param(self, inp, new_w):
#print(type(self.state_dict()))
with self.unflatten_weight(new_w):
# print('FLATTENED')
# print('state_dict: ', type(self.state_dict()), [(k, v.shape) for k,v in self.state_dict().items()])
# print('self._weights_module_names: ', self._weights_module_names)
return nn.Module.__call__(self, inp)
def __call__(self, inp):
return self.forward_with_param(inp, self.flat_w)
def load_state_dict(self, state_dict, *args, **kwargs):
"""
Make load_state_dict work on both singleton dicts
containing a flattened weight tensor and full dicts
containing unflattened weight tensors. Useful when loading
weights from non-meta architectures
"""
if len(state_dict) == 1 and 'flat_w' in state_dict:
return super(ReparamModule, self).load_state_dict(state_dict, *args, **kwargs)
with self.unflatten_weight(self.flat_w):
flat_w = self.flat_w
del self.flat_w
super(ReparamModule, self).load_state_dict(state_dict, *args, **kwargs)
self.register_parameter('flat_w', flat_w)
def unflattened_weights(self):
#print(float(torch.sum(self.state_dict()['flat_w'])))
with self.unflatten_weight(self.flat_w):
state_dict = deepcopy(self.state_dict())
del state_dict['flat_w']
return state_dict
def layer_names(self):
layer_names = []
layer_count = 0
prev_layer = None
for (name, n) in zip(self._weights_module_names, self._weights_numels):
if name[0] != prev_layer:
layer_count += 1
prev_layer = name[0]
if isinstance(name[0], torch.nn.Conv2d) and name[1]=='weight':
layer_names.append('L{}_conv_W_s{}'.format(layer_count, n))
elif isinstance(name[0], torch.nn.Conv2d) and name[1]=='bias':
layer_names.append('L{}_conv_b_s{}'.format(layer_count, n))
elif isinstance(name[0], torch.nn.BatchNorm2d) and name[1]=='weight':
layer_names.append('L{}_bn_W_s{}'.format(layer_count, n))
elif isinstance(name[0], torch.nn.BatchNorm2d) and name[1]=='bias':
layer_names.append('L{}_bn_b_s{}'.format(layer_count, n))
elif isinstance(name[0], torch.nn.Linear) and name[1]=='weight':
layer_names.append('L{}_fc_W_s{}'.format(layer_count, n))
elif isinstance(name[0], torch.nn.Linear) and name[1]=='bias':
layer_names.append('L{}_fc_b_s{}'.format(layer_count, n))
else:
raise ValueError('Unknown layer type {}'.format(name))
return layer_names
def get_bn_masks(self):
"""
Returns 2 boolean masks of size n_weights,
where ones correspond to batchnorm gammas in first mask,
and batchnorm betas in second mask
"""
gammas_mask = torch.zeros(self.flat_w.shape[0], dtype=torch.bool)
betas_mask = torch.zeros(self.flat_w.shape[0], dtype=torch.bool)
i = 0
for (name, n) in zip(self._weights_module_names, self._weights_numels):
is_BN = isinstance(name[0], torch.nn.BatchNorm2d) or isinstance(name[0], torch.nn.BatchNorm1d)
if is_BN and name[1]=='weight':
gammas_mask[i:i+n] = 1
elif is_BN and name[1]=='bias':
betas_mask[i:i+n] = 1
i += n
return gammas_mask, betas_mask
def flattened_unflattened_weights(self):
"""
somehow unflattening weights changes the value of their sum.
This looks like it's because permutation matters in float 32 sum operation and
so different data structures give different results to the same operations
even though they contain the same values. Here unflattening and reflattening
recovers the sum value of the original self.get_param() method.
"""
with self.unflatten_weight(self.flat_w):
state_dict = deepcopy(self.state_dict())
del state_dict['flat_w']
flat_w = torch.cat([w.reshape(-1) for w in state_dict.values()], 0) #.type(torch.DoubleTensor) doesn't change behaviour
return flat_w
def initialize(self, init_type='xavier', init_param=1, init_norm_weights=1, inplace=True):
if inplace:
flat_w = self.flat_w
else:
flat_w = torch.empty_like(self.flat_w).requires_grad_()
with torch.no_grad():
with self.unflatten_weight(flat_w):
initialize(self, init_type=init_type, init_param=init_param, init_norm_weights=init_norm_weights)
return flat_w
| 8,482 | 35.722944 | 127 | py |
FDS | FDS-main/models/helpers.py | import torch.nn as nn
from torch.nn import init
def initialize(net, init_type, init_param, init_norm_weights=1):
""" various initialization schemes """
def init_func(m):
classname = m.__class__.__name__
if classname.startswith('Conv') or classname == 'Linear':
if getattr(m, 'bias', None) is not None:
init.constant_(m.bias, 0.0) #if init_type = default bias isn't kept to zero
if getattr(m, 'weight', None) is not None:
if init_type == 'normal':
init.normal_(m.weight, 0.0, init_param)
elif init_type == 'xavier':
init.xavier_normal_(m.weight, gain=init_param)
elif init_type == 'xavier_unif':
init.xavier_uniform_(m.weight, gain=init_param)
elif init_type == 'kaiming':
init.kaiming_normal_(m.weight, a=init_param, mode='fan_in')
elif init_type == 'kaiming_out':
init.kaiming_normal_(m.weight, a=init_param, mode='fan_out')
elif init_type == 'orthogonal':
init.orthogonal_(m.weight, gain=init_param)
elif init_type == 'default':
if hasattr(m, 'reset_parameters'):
m.reset_parameters()
else:
raise NotImplementedError('initialization method [%s] is not implemented' % init_type)
elif 'Norm' in classname: #different Pytorch versions differ in BN init so do it manually
if getattr(m, 'weight', None) is not None:
m.weight.data.fill_(init_norm_weights)
if getattr(m, 'bias', None) is not None:
m.bias.data.zero_()
net.apply(init_func)
return net
| 1,798 | 43.975 | 106 | py |
FDS | FDS-main/models/selector.py | from models.lenet import *
from models.wresnet import *
def select_model(meta,
dataset,
architecture,
init_type='xavier',
init_param=1,
device='cpu'):
"""
Meta models require device to be provided during init.
"""
if dataset in ['MNIST', 'FashionMNIST']:
n_classes, n_channels, im_size = 10, 1, 28
kwargs0 = {'n_classes':n_classes, 'n_channels':n_channels, 'im_size':im_size}
if architecture == 'LeNet':
model = MetaLeNet(**kwargs0, device=device) if meta else LeNet(**kwargs0)
elif architecture == 'LeNet-BN': #debug neg learning rates
model = MetaLeNetBN(**kwargs0, device=device) if meta else LeNetBN(**kwargs0)
else:
raise NotImplementedError
elif dataset in ['SVHN', 'CIFAR10', 'CIFAR100']:
n_channels, im_size = 3, 32
n_classes = 100 if dataset == 'CIFAR100' else 10
kwargs0 = {'n_classes':n_classes, 'n_channels':n_channels}
if architecture == 'LeNet':
kwargs1 = {'im_size':im_size}
model = MetaLeNet(**kwargs0, **kwargs1, device=device) if meta else LeNet(**kwargs0, **kwargs1)
elif architecture == 'LeNetBN':
kwargs1 = {'im_size':im_size}
model = MetaLeNetBN(**kwargs0, **kwargs1, device=device) if meta else LeNetBN(**kwargs0, **kwargs1)
elif architecture == 'WRN-10-1':
kwargs1 = {'depth':10, 'widen_factor':1, 'dropRate':0.0}
model = MetaWideResNet(**kwargs0, **kwargs1, device=device) if meta else WideResNet(**kwargs0, **kwargs1)
elif architecture == 'WRN-16-1':
kwargs1 = {'depth':16, 'widen_factor':1, 'dropRate':0.0}
model = MetaWideResNet(**kwargs0, **kwargs1, device=device) if meta else WideResNet(**kwargs0, **kwargs1)
elif architecture == 'WRN-40-2':
kwargs1 = {'depth':40, 'widen_factor':2, 'dropRate':0.0}
model = MetaWideResNet(**kwargs0, **kwargs1, device=device) if meta else WideResNet(**kwargs0, **kwargs1)
else:
raise NotImplementedError
else:
raise NotImplementedError
## Initialization schemes
if meta:
model.initialize(init_type=init_type, init_param=init_param, init_norm_weights=1, inplace=True)
else:
initialize(model, init_type=init_type, init_param=init_param, init_norm_weights=1)
return model
if __name__ == '__main__':
from torchsummary import summary
from utils.helpers import *
## Check meta and normal models do the same calculations
# x1 = torch.FloatTensor(64, 3, 32, 32).uniform_(0, 1)
# x2 = torch.FloatTensor(64, 3, 32, 32).uniform_(0, 1)
# set_torch_seeds(0)
# model = select_model(False, dataset='CIFAR10', architecture='WRN-10-1', activation='ReLU', norm_type='BN', norm_affine=True, noRes=False)
# set_torch_seeds(0)
# meta_model = select_model(True, dataset='CIFAR10', architecture='WRN-10-1', activation='ReLU', norm_type='BN', norm_affine=True, noRes=False)
# meta_weights = meta_model.get_param()
#
# model.train(), meta_model.train() #x1 before and after x2 if comment out eval mode below.
#
# model_output = model(x1)
# meta_model_output = meta_model.forward_with_param(x1, meta_weights)
# print(float(torch.sum(model_output)), float(torch.sum(meta_model_output)))
#
# model_output = model(x2)
# meta_model_output = meta_model.forward_with_param(x2, meta_weights)
# print(float(torch.sum(model_output)), float(torch.sum(meta_model_output)))
#
# model.eval(), meta_model.eval() #x1 output changes in eval now because running stats were calculated
# model_output = model(x1)
# meta_model_output = meta_model.forward_with_param(x1, meta_weights)
# print(float(torch.sum(model_output)), float(torch.sum(meta_model_output)))
# x = torch.FloatTensor(64, 3, 32, 32).uniform_(0, 1)
#
# t0 = time.time()
# model = select_model(True, dataset='CIFAR10', architecture='WRN-16-1', activation='ReLU', norm_type='BN', norm_affine=True, noRes=False)
# output = model(x)
# print("Time taken for forward pass: {} s".format(time.time.time() - t0))
# print("\nOUTPUT SHAPE: ", output.shape)
# summary(model, (3, 32, 32), max_depth=5)
## Weights init for normal model
# model = select_model(False, dataset='CIFAR10', architecture='WRN-16-1', activation='ReLU', norm_type='BN', norm_affine=True, noRes=False)
# def weights_to_gaussian(m):
# if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear): #TODO separate init for linear layer?
# torch.nn.init.normal_(m.weight, mean=0, std=0.1)
# if m.bias is not None:
# torch.nn.init.zeros_(m.bias)
# model.apply(weights_to_gaussian)
## Weights init for meta model
# model = select_model(True, dataset='CIFAR10', architecture='WRN-16-1', activation='ReLU', norm_type='BN', norm_affine=False, noRes=False)
# weights = model.get_param()
# print(len(weights))
# print(torch.sum(weights))
# torch.nn.init.normal_(weights, mean=0, std=0.1)
# print(torch.sum(weights))
## Change BN init for meta model
#model = select_model(False, dataset='CIFAR10', architecture='LeNet', activation='ReLU', norm_type='BN', norm_affine=True, noRes=False)
#model = select_model(True, dataset='CIFAR10', architecture='LeNet', activation='ReLU', norm_type='BN', norm_affine=True, noRes=False)
#summary(model, (3, 32, 32), max_depth=10)
# weights = model.get_param()
# #weights_numels = model._weights_numels
# #layer_names = model.layer_names()
# #print(weights.shape[0], sum(weights_numels)) #same
# #print(len(weights_numels), len(layer_names)) #same
# gammas_mask, betas_mask = model.get_bn_masks()
# print(len(weights[gammas_mask]), len(weights[betas_mask]))
# #print(weights[gammas_mask])
# print(weights[betas_mask])
## Check init
set_torch_seeds(0)
model = select_model(False, dataset='CIFAR10', architecture='ShuffleNetv2-s05', activation='ReLU', norm_type='BN', norm_affine=True, noRes=False,
init_type='normal', init_param=1, init_norm_weights=1)
for n,p in model.named_parameters():
print(n, float(torch.sum(p))) | 6,359 | 45.764706 | 150 | py |
FDS | FDS-main/utils/datasets.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.transforms as transforms
import torchvision.datasets as datasets
from torch.utils.data import Dataset, DataLoader
from torch.utils.data.sampler import SubsetRandomSampler
import os
import math
import numpy as np
import matplotlib.pyplot as plt
import warnings
from utils.helpers import *
def unormalize_CIFAR10_image(image):
return image*torch.tensor([0.2023, 0.1994, 0.2010]).view(3,1,1) + torch.tensor([0.4914, 0.4822, 0.4465]).view(3,1,1)
def plot_image(input, unormalize=True):
if len(input.shape) > 3:
print("Use plot_images function instead!")
raise NotImplementedError
npimg = input.numpy()
if unormalize:
npimg = npimg * np.array([0.2023, 0.1994, 0.2010]).reshape(3,1,1) + np.array([0.4914, 0.4822, 0.4465]).reshape(3,1,1)
npimg = np.transpose(npimg, (1, 2, 0))
if npimg.shape[-1] != 3:
npimg = npimg[:, :, 0]
#print(npimg.shape)
fig = plt.figure(figsize=(20, 20))
ax = fig.add_subplot(111)
ax.axis('off')
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.imshow(npimg, cmap='gray')
plt.show()
return fig
def plot_images(batch, padding=2, unormalize=True):
if len(batch.shape) == 3:
plot_image(batch, unormalize=unormalize)
elif len(batch.shape) == 4:
n_images = batch.shape[0]
if n_images == 1:
plot_image(batch[0], unormalize=unormalize)
else:
grid_img = torchvision.utils.make_grid(batch, nrow=int(np.ceil(np.sqrt(n_images))), padding=padding)
plot_image(grid_img, unormalize=unormalize)
class Cutout(object):
def __init__(self, length, prob=1.0):
self.length = length
self.prob = prob
assert prob<=1, f"Cutout prob given ({prob}) must be <=1"
def __call__(self, img):
if np.random.binomial(1, self.prob):
h, w = img.size(1), img.size(2)
mask = np.ones((h, w), np.float32)
y = np.random.randint(h)
x = np.random.randint(w)
y1 = np.clip(y - self.length // 2, 0, h)
y2 = np.clip(y + self.length // 2, 0, h)
x1 = np.clip(x - self.length // 2, 0, w)
x2 = np.clip(x + self.length // 2, 0, w)
mask[y1: y2, x1: x2] = 0.
mask = torch.from_numpy(mask)
mask = mask.expand_as(img)
img *= mask
return img
class InfiniteDataLoader(DataLoader):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.dataset_iterator = super().__iter__()
def __iter__(self):
return self
def __next__(self):
try:
batch = next(self.dataset_iterator)
except StopIteration:
self.dataset_iterator = super().__iter__()
batch = next(self.dataset_iterator)
return batch
def get_loaders(datasets_path,
dataset,
train_batch_size=128,
val_batch_size=128,
val_source='train',
val_train_fraction=0.1,
val_train_overlap=False,
workers=0,
train_infinite=False,
val_infinite=False,
cutout=False,
cutout_length=16,
cutout_prob=1):
"""
NB: val_train_fraction and val_train_overlap only used if val_source='train'
Note that infinite=True changes the seed/order of the batches
Validation is never augmented since validation stochasticity comes
from sampling different validation images anyways
"""
assert val_source in ['test', 'train']
TrainLoader = InfiniteDataLoader if train_infinite else DataLoader
ValLoader = InfiniteDataLoader if val_infinite else DataLoader
## Select relevant dataset
if dataset in ['MNIST', 'FashionMNIST']:
mean, std = (0.1307,), (0.3081,)
transform_train = transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean, std)])
if cutout: transform_train.transforms.append(Cutout(length=cutout_length, prob=cutout_prob))
transform_test = transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean, std)])
if dataset == 'MNIST':
train_dataset = datasets.MNIST(datasets_path, train=True, download=True, transform=transform_train)
test_dataset = datasets.MNIST(datasets_path, train=False, download=True, transform=transform_test)
val_dataset = test_dataset if val_source=='test' else datasets.MNIST(datasets_path, train=True, download=True, transform=transform_test)
elif dataset == 'FashionMNIST':
train_dataset = datasets.FashionMNIST(datasets_path, train=True, download=True, transform=transform_train)
test_dataset = datasets.FashionMNIST(datasets_path, train=False, download=True, transform=transform_test)
val_dataset = test_dataset if val_source=='test' else datasets.FashionMNIST(datasets_path, train=True, download=True, transform=transform_test)
elif dataset == 'SVHN':
mean = (0.4377, 0.4438, 0.4728)
std = (0.1980, 0.2010, 0.1970)
dataset_path = os.path.join(datasets_path, 'SVHN') #Pytorch is inconsistent in folder structure
transform_train = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean, std)])
if cutout: transform_train.transforms.append(Cutout(length=cutout_length, prob=cutout_prob))
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean, std)])
train_dataset = datasets.SVHN(dataset_path, split='train', download=True, transform=transform_train)
test_dataset = datasets.SVHN(dataset_path, split='test', download=True, transform=transform_test)
val_dataset = test_dataset if val_source=='test' else datasets.SVHN(dataset_path, split='train', download=True, transform=transform_test)
#print(len(train_dataset))
elif dataset in ['CIFAR10', 'CIFAR100']:
# official CIFAR10 std seems to be wrong (actual is [0.2470, 0.2435, 0.2616])
mean = (0.4914, 0.4822, 0.4465) if dataset == 'CIFAR10' else (0.5071, 0.4867, 0.4408)
std = (0.2023, 0.1994, 0.2010) if dataset == 'CIFAR10' else (0.2675, 0.2565, 0.2761)
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean, std)])
if cutout: transform_train.transforms.append(Cutout(length=cutout_length, prob=cutout_prob))
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean, std)])
if dataset == 'CIFAR10':
dataset_path = os.path.join(datasets_path, 'CIFAR10') #Pytorch is inconsistent in folder structure
train_dataset = datasets.CIFAR10(dataset_path, train=True, download=True, transform=transform_train)
test_dataset = datasets.CIFAR10(dataset_path, train=False, download=True, transform=transform_test)
val_dataset = test_dataset if val_source=='test' else datasets.CIFAR10(datasets_path, train=True, download=True, transform=transform_test)
elif dataset == 'CIFAR100':
dataset_path = os.path.join(datasets_path, 'CIFAR100')
train_dataset = datasets.CIFAR100(dataset_path, train=True, download=True, transform=transform_train)
test_dataset = datasets.CIFAR100(dataset_path, train=False, download=True, transform=transform_test)
val_dataset = test_dataset if val_source=='test' else datasets.CIFAR10(datasets_path, train=True, download=True, transform=transform_test)
else:
print(f'{dataset} is not implemented')
raise NotImplementedError
## Create dataloaders
n_train_images = len(train_dataset)
#print(train_dataset)
pin_memory = True if dataset == 'ImageNet' else False
if val_source == 'test':
train_loader = TrainLoader(
dataset=train_dataset, batch_size=train_batch_size,
shuffle=True, drop_last=True, num_workers=workers, pin_memory=pin_memory)
val_loader = ValLoader(
dataset=val_dataset, batch_size=val_batch_size,
shuffle=True, drop_last=True, num_workers=workers, pin_memory=pin_memory)
elif val_source == 'train':
all_indices = list(range(n_train_images))
val_indices = np.random.choice(all_indices, size=int(val_train_fraction * n_train_images), replace=False)
val_loader = ValLoader(
dataset=val_dataset, batch_size=val_batch_size,
sampler=SubsetRandomSampler(val_indices), drop_last=True,
num_workers=workers, pin_memory=pin_memory)
if val_train_overlap:
train_loader = TrainLoader(
dataset=train_dataset, batch_size=train_batch_size,
shuffle=True, drop_last=True, num_workers=workers, pin_memory=pin_memory)
else:
train_indices = list(set(all_indices) - set(val_indices))
train_loader = TrainLoader(
dataset=train_dataset, batch_size=train_batch_size,
sampler=SubsetRandomSampler(train_indices), drop_last=True,
num_workers=workers, pin_memory=pin_memory)
test_loader = DataLoader(
dataset=test_dataset, batch_size=val_batch_size,
shuffle=True, drop_last=True, num_workers=workers, pin_memory=pin_memory) # test loader never infinite
return train_loader, val_loader, test_loader
if __name__ == '__main__':
train_loader, val_loader, test_loader = get_loaders('~/Datasets/Pytorch/',
'MNIST',
train_batch_size=500,
val_batch_size=500,
val_source='train',
val_train_fraction=0.05,
val_train_overlap=False,
workers=0,
train_infinite=False,
val_infinite=False,
cutout=True,
cutout_length=16,
cutout_prob=1)
print(len(train_loader)*500)
print(len(val_loader)*500)
for x_val, y_val in val_loader:
print(x_val.shape)
for x_train, y_train in train_loader:
break
#plot_images(x_val[:100])
plot_images(x_train[:100])
| 10,895 | 41.232558 | 155 | py |
FDS | FDS-main/utils/helpers.py | import csv
import torch
import torch.nn.functional as F
from torchvision import datasets, transforms
from torch.utils.data import Dataset, DataLoader
import shutil
import datetime
import json
import os
import argparse
import gc
import numpy as np
import torchvision
import functools
import time
import warnings
#warnings.simplefilter("ignore", UserWarning)
### Metrics
class AggregateTensor(object):
"""
Computes and stores the average of stream.
Mostly used to average losses and accuracies.
Works for both scalars and vectors but input needs
to be a pytorch tensor.
"""
def __init__(self):
self.reset()
def reset(self):
self.count = 0.0001 # DIV/0!
self.sum = 0
#self.sum2 = 0
def update(self, val, w=1):
"""
:param val: new running value
:param w: weight, e.g batch size
Turn everything into floats so that we don't keep bits of the graph
"""
self.sum += w * val.detach()
self.count += w
def avg(self):
return self.sum / self.count
# def std(self):
# return np.sqrt(self.sum2/self.count - self.avg()**2)
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(1/batch_size))
return res
def avg_entropy(pmf):
"""
:param pmf: pytorch tensor pmf of shape [batch_size, n_classes]
:return: average entropy of pmf across entire batch
"""
#assert
assert ((pmf>=0)*(pmf<=1.00001)).all(), "All inputs must be in range [0,1] but min/max is {}/{}".format(float(torch.min(pmf)), float(torch.max(pmf)))
p_log_p = torch.log2(torch.clamp(pmf, min=0.0001, max=1.0))*pmf #log(0) causes error
return torch.mean(-p_log_p.sum(1))
def avg_max(pmf):
"""
:param pmf: pytorch tensor pmf of shape [batch_size, n_classes]
when learned the pmf doesn't have to be within [0,1]
:return: average of max predictions of pmf across entire batch
"""
assert ((pmf >= 0) * (pmf <= 1)).all(), "All inputs must be in range [0,1]"
return torch.mean(torch.max(pmf, 1)[0])
def onehot(targets, n_classes):
"""
Convert labels of form [[2], [7], ...] to
[0,0,1,0,0,0,0,0,0,0], [0,0,0,0,0,0,0,0,1,0,0], ...]
:param targets:
:param n_classes:
:param device:
:return:
"""
return torch.zeros((targets.shape[0], n_classes), device=targets.device).scatter(1, targets.unsqueeze(-1), 1)
def gc_tensor_view(verbose=True):
"""
Doesn't catch intermediate variables stored by Pytorch graph
if they are not in the Python scope.
assumes all tensors are torch.float() i.e. 32 bit (4MB)
"""
total_MB_size = 0
object_counts = {}
object_MBs = {}
if verbose: print('\n------- TENSORS SEEN BY GARBAGE COLLECTOR -------')
for obj in gc.get_objects():
try:
if torch.is_tensor(obj) or (hasattr(obj, 'data') and torch.is_tensor(obj.data)):
MB_size = np.prod(obj.size()) * 4 / 1024**2 #assume float32
total_MB_size += MB_size #str(type(obj))
key = str(obj.size())[10:]
object_counts[key] = object_counts.get(key, 0) + 1
object_MBs[key] = MB_size
except:
pass
if verbose:
object_totals = {k:object_counts[k] * object_MBs[k] for k in object_MBs.keys()}
for key, value in sorted(object_totals.items(), key=lambda item: item[1], reverse=True):
print("{} x {} ({:.0f}MB) = {:.0f}MB".format(object_counts[key], key, object_MBs[key], object_counts[key]*object_MBs[key]))
print("TOTAL MEMORY USED BY PYTORCH TENSORS: {:.0f} MB".format(total_MB_size))
def set_torch_seeds(seed):
import random
import numpy as np
import torch
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
def timer(func):
"""Print the runtime of the decorated function"""
@functools.wraps(func)
def wrapper_timer(*args, **kwargs):
start_time = time.perf_counter() # 1
value = func(*args, **kwargs)
end_time = time.perf_counter() # 2
run_time = end_time - start_time # 3
#print('\n------------------------------')
print(f"--- Ran func {func.__name__!r} in {format_time(run_time)} ---")
#print('------------------------------\n')
return value
return wrapper_timer
### Data view and read
def unormalize_CIFAR10_image(image):
return image*torch.tensor([0.2023, 0.1994, 0.2010]).view(3,1,1) + torch.tensor([0.4914, 0.4822, 0.4465]).view(3,1,1)
# def plot_image(input, unormalize=False):
# if len(input.shape) > 3:
# print("Use plot_images function instead!")
# raise NotImplementedError
# npimg = input.numpy()
# if unormalize:
# npimg = npimg * np.array([0.2023, 0.1994, 0.2010]).reshape(3,1,1) + np.array([0.4914, 0.4822, 0.4465]).reshape(3,1,1)
# npimg = np.transpose(npimg, (1, 2, 0))
# if npimg.shape[-1] != 3:
# npimg = npimg[:, :, 0]
# #print(npimg.shape)
#
# fig = plt.figure(figsize=(20, 20))
# ax = fig.add_subplot(111)
# ax.axis('off')
# ax.set_xticklabels([])
# ax.set_yticklabels([])
#
# ax.imshow(npimg, cmap='gray')
# plt.show()
# return fig
# def plot_images(batch, padding=2, unormalize=False):
# if len(batch.shape) == 3:
# plot_image(batch, unormalize=unormalize)
# elif len(batch.shape) == 4:
# n_images = batch.shape[0]
# if n_images == 1:
# plot_image(batch[0], unormalize=unormalize)
# else:
# grid_img = torchvision.utils.make_grid(batch, nrow=int(np.ceil(np.sqrt(n_images))), padding=padding)
# plot_image(grid_img, unormalize=unormalize)
def str2bool(v):
# codes from : https://stackoverflow.com/questions/15008758/parsing-boolean-values-with-argparse
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def delete_files_from_name(folder_path, file_name, type='contains'):
assert type in ['is', 'contains']
for f in os.listdir(folder_path):
if (type=='is' and file_name==f) or (type=='contains' and file_name in f):
os.remove(os.path.join(folder_path, f))
def copy_file(file_path, folder_path):
destination_path = os.path.join(folder_path, os.path.basename(file_path))
shutil.copyfile(file_path, destination_path)
def format_time(seconds):
minutes, seconds = divmod(seconds, 60)
hours, minutes = divmod(minutes, 60)
return "%dh%02dm%02ds" % (hours, minutes, seconds)
def create_empty_file(path):
"""Easy way to log final test accuracy in some experiment folder"""
with open(path, 'w+') as f: f.write("NA")
if __name__ == '__main__':
from time import time
import torch
## Test AggregateTensor
# x = np.random.rand(1000)*50
# w = np.random.rand(1000)*5
# true_mu = w@x/np.sum(w)
# true_std = np.sqrt(np.sum(w*(x-true_mu)**2)/((len(x)-1)*np.sum(w)/len(x)))
#
# t0 = time.time()
# a = "yolo"
# print("Init of string takes: {} us".format(1e6*(time()-t0)))
#
# t0 = time.time()
# meter = AggregateTensor()
# print("Init of AggregateTensor takes: {} us".format(1e6*(time()-t0)))
#
# t0 = time.time()
# a = 1000*5
# print("Multiplication takes: {} us".format(1e6 * (time.time() - t0)))
#
# t = 0
# for val,weight in zip(x,w):
# t0 = time.time()
# meter.update(val, weight)
# t += time.time() - t0
# print("Avg update time: {} us".format(1e6*t/len(x)))
#
# print(true_mu, meter.avg())
# #print(np.std(x), true_std, meter.std())
#
# ### Test AggregateDict
# keys = ['loss', 'acc', 'yolo']
# meter = AggregateDict()
#
# values = [[1,2,3], [3,4,5], [1,1,1]]
# true_mus = [np.mean(el) for el in values]
# true_stds = [np.std(el) for el in values]
#
# for i in range(3):
# dict = {k: v[i] for k,v in zip(keys, values)}
# print(dict)
# meter.update(val=dict, w=1)
#
#
# print(true_mus, meter.avg())
#print(true_stds, meter.std())
### Test cutout
# Data loader tests
# from time import time
# import torchvision.datasets as datasets
# import torchvision.transforms as transforms
#
# device = torch.device('cpu')
# dataset_path = "~/Datasets/Pytorch/"
#
#
# transform = transforms.Compose([
# transforms.RandomCrop(32, padding=4),
# transforms.RandomHorizontalFlip(),
# transforms.ToTensor(),
# #transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
# Cutout(n_holes=1, length=16, cutout_proba=0.5)])
#
# dataset = datasets.CIFAR10(dataset_path, train=False, download=True, transform=transform)
# loader = torch.utils.data.DataLoader(
# dataset=dataset,
# batch_size=5,
# shuffle=False, drop_last=False, num_workers=4)
#
# for x,y in loader:
# print(x.shape, y.shape)
# image = x[4]#*torch.Tensor[0.2023, 0.1994, 0.2010])-torch.Tensor([0.4914, 0.4822, 0.4465]
# plot_image(image)
# break
### Test entropy
# output = torch.Tensor([[0.1, 0.5, 0.4],
# [0.3,0.3,0.4],
# [0.99, 0.005, 0.005],
# [0.5, 0.5, 0.000001]])
#
# print(avg_entropy(output))
## Test Dataloader indices
# transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])
# dataset = datasets.MNIST("~/Datasets/Pytorch/", train=True, download=True, transform=transform)
# dataset = DatasetWithIndices(dataset)
# loader = torch.utils.data.DataLoader(dataset=dataset,batch_size=5,shuffle=True,drop_last=True,num_workers=1)
#
# cnt = 0
# for x, y, indices in loader:
# print(x.shape, y.shape, indices.shape)
# print(indices)
# if cnt>5:
# break
# cnt+=1
#print(len(dataset), len(loader))
## Test Dataloader coefficient
# transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])
# dataset = datasets.MNIST("~/Datasets/Pytorch/", train=True, download=True, transform=transform)
# dataset = DatasetWithLearnableCoefficients(dataset)
# loader = torch.utils.data.DataLoader(dataset=dataset,batch_size=5,shuffle=True,drop_last=True,num_workers=1)
#
# cnt = 0
# for x, y, indices in loader:
# print(x.shape, y.shape, indices.shape)
# print(indices)
# if cnt>5:
# break
# cnt+=1
#
# print(len(dataset), len(loader))
## Test Corrupter
# corrupter = Corrupter(n_images=50000, fraction_to_corrupt=0.1, n_classes=10)
# indices = torch.arange(10000, 10000+260, dtype=torch.long)
# targets = torch.arange(10, dtype=torch.long).repeat(26)
#
# t0 = time.time()
# corrupted = corrupter(indices, targets)
# print(time.time() - t0)
#
# print(corrupted)
# print(len(corrupted))
## Test Aggregate for vector
#a = torch.tensor([1,2,3])
#b = torch.tensor([3,4,5])
a = torch.FloatTensor([1])
b = torch.FloatTensor([2])
c = AggregateVector()
c.update(a)
c.update(b)
print(c.avg())
###
pass
| 11,853 | 30.442971 | 153 | py |
corrupted_data_classification | corrupted_data_classification-main/main.py | # -*- coding: utf-8 -*-
'''
The following libraries are used:
[1] NIFTy – Numerical Information Field Theory, https://gitlab.mpcdf.mpg.de/ift/nifty
[2] NumPy - Numerical Python, https://numpy.org/
[3] Tensorflow - Tensorflow, https://www.tensorflow.org/
[4] Keras - Keras, https://keras.io/
[5] Matplotlib - Matplotlib, https://matplotlib.org/
[6] SciPy - Scientific Python, https://www.scipy.org/
[7] random - random, https://docs.python.org/3/library/random.html
[8] sklearn - https://scikit-learn.org/
Within helper_functions.py, Conv.py and Mask.py, the following libraries are used (these may be obsolete and omittable for the core task):
[9] PIL - Pillow (only Image-function), https://pillow.readthedocs.io/en/stable/
[10] warnings - warnings, https://docs.python.org/3/library/warnings.html
[11] random - random, https://docs.python.org/3/library/random.html
[12] skimage - scikit-image (only resize-function), https://scikit-image.org
All Neural Networks were built with Keras and saved as tensorflow-objects. Neural Netowrks are optimized for MNIST, good performance is observed for
F-MNIST.
'''
# Commented out IPython magic to ensure Python compatibility.
# Colab and system related
import os
import sys
import nifty6 as ift
###
# Necessary to convert tensorflow-object (e.g. Neural Network) to Nifty-Operator
sys.path.append('corrupted_data_classification/helper_functions/')
from operators.tensorflow_operator import TensorFlowOperator
###
import tensorflow as tf
# Include path to access helper functions and Mask / Conv Operator
sys.path.append('corrupted_data_classification/helper_functions/')
from helper_functions import clear_axis, gaussian, get_cmap, info_text, get_noise, rotation, split_validation_set
import Mask # Masking Operator
import Conv # Convolution Operator
sys.path.remove
# Tensorflow
# Plotting
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import matplotlib.pyplot as plt
# %matplotlib inline
plt.rcParams['figure.dpi'] = 200 # 200 e.g. is really fine, but slower
# Numerics
import random
import numpy as np
from sklearn.neighbors import KernelDensity
from scipy.stats import multivariate_normal
import sklearn as sk
from sklearn import decomposition
# Choose dataset
dataset = 'mnist' #'mnist, 'fashion_mnist'
datasource = getattr(tf.keras.datasets, dataset)
(XTrain, YTrain), (XTest, YTest) = datasource.load_data()
XTrain, XTest = XTrain / 255.0, XTest / 255.0
x_shape = XTrain[1].shape[0]
y_shape = XTrain[1].shape[1]
try:
z_shape = XTrain[1].shape[2]
img_shape = [x_shape, y_shape, z_shape]
except:
img_shape = [x_shape, y_shape]
xy_shape = x_shape * y_shape
flattened_shape = np.prod(img_shape)
# Reshape Xtrain and XTest to flattened Vectors instead of square arrays
if dataset == 'mnist' or dataset== 'fashion_mnist':
XTrain = XTrain.reshape((len(XTrain), np.prod(XTrain.shape[1:])))
XTest = XTest.reshape((len(XTest), np.prod(XTest.shape[1:])))
n_classes = len(np.unique(YTrain))
# Session for tensorflow v1 compatibility
sess = tf.compat.v1.InteractiveSession()
graph = tf.compat.v1.get_default_graph()
###
# [4]
###
# Split Training-Dataset into additional validation set.
XTrain, YTrain, XVal, YVal = split_validation_set(XTrain, YTrain, val_perc=0.2)
# Read in model#
if dataset=='mnist':
Decoder_tf = tf.keras.models.load_model('./corrupted_data_classification/NNs/MNIST/pretrained_supervised_ae10/Decoder', compile=False)
Encoder_tf = tf.keras.models.load_model('./corrupted_data_classification/NNs/MNIST/pretrained_supervised_ae10/Encoder', compile=False)
if dataset=='fashion_mnist':
Decoder_tf = tf.keras.models.load_model('./corrupted_data_classification/NNs/Fashion-MNIST/pretrained_supervised_ae10/Decoder', compile=False)
Encoder_tf = tf.keras.models.load_model('./corrupted_data_classification/NNs/Fashion-MNIST/pretrained_supervised_ae10/Encoder', compile=False)
# Define ift-space
# position_space: Also data-space. Equal to the vectorized image dimension. For MNIST-Images, the position-space's
# dimensions are 784x1
position_space = ift.UnstructuredDomain(Decoder_tf.get_layer(index=-1).output_shape[1:])
# n_latent: number of latent space activations
n_latent = Encoder_tf.get_layer(index=-1).output_shape[-1]
# latent_space: Domain with dimensions of the latent space
latent_space = ift.UnstructuredDomain([n_latent])
# Initialize Parameters
# Pre-Defined parameters by Max-Planck-Institute
comm, _, _, master = ift.utilities.get_MPI_params()
# Convert Encoder and Decoder to nifty-operators (``TensorFlowOperator``)
Decoder = TensorFlowOperator(Decoder_tf.layers[-1].output, Decoder_tf.layers[0].output, latent_space, position_space)
Encoder = TensorFlowOperator(Encoder_tf.layers[-1].output, Encoder_tf.layers[0].output, position_space, latent_space)
# Choose how to classify data, once it has been reconstructed (any classifier of MNIST data my be chosen here).
Classifier = Encoder
#Classifier = TensorFlowOperator(Classifier_tf.layers[-2].output, Classifier_tf.layers[0].output, position_space,ift.UnstructuredDomain(n_classes))
# Get all activations in the latent space from Encoder with Validation Dataset -> latent_values
latent_values = np.zeros((len(XVal), n_latent))
for i, pic in enumerate(XVal):
pic = np.reshape(pic, position_space.shape)
latent_values[i, :] = Encoder(ift.Field.from_raw(position_space, pic)).val
# Fill means-array with mean activation of every picture
means = np.zeros([n_latent, n_classes])
for pic in range(n_classes):
for weight in range(n_latent):
means[weight, pic] = np.mean(latent_values[np.where(YVal == pic), weight])
# Define overall mean of all activations in latent-space
mean = ift.Field.from_raw(latent_space, np.mean(latent_values, axis=0)) #mean of all activations in latent
Mean = ift.Adder(mean)
# Fill cov_all_variables with covariances of activation of every digit;
# Get cov_supervised_variables with covariances of only supervised activations
cov_all_variables = [[np.zeros([n_latent, n_latent])] for y in range(n_classes)]
cov_supervised_variables = [[np.zeros([n_classes, n_classes])] for y in range(n_classes)]
for i in range(n_classes):
cov_all_variables[i] = np.cov(latent_values[np.where(YVal==i)[0]][:,:], rowvar=False)
cov_supervised_variables[i] = np.cov(latent_values[np.where(YVal==i)[0]][:,:10], rowvar=False)
# Fill overall covariance of all activations in latent space
cov = np.zeros([n_latent, n_latent])
cov = np.cov(latent_values, rowvar=False)
# Transform covariance matrix into standardized space by Cholesky factorization
# cov = AA^T
A = ift.MatrixProductOperator(ift.UnstructuredDomain([n_latent]), np.linalg.cholesky(cov))
'''
Generate Ground Truth either
--> from Sampling from latent distribution OR
--> from drawing a sample from independent partition of dataset
'''
## Sampling from latent distribution
#xi = ift.from_random(latent_space, 'normal')
#s = A.apply(xi, 1) + mean
#ground_truth = Decoder(s)
## Drawing sample from dataset
p=3
#p = 10
ground_truth = ift.Field.from_raw(position_space, np.reshape(XTest[p], position_space.shape))
'''
Data Corruption:
1. Mask --> Operator: M (no_mask, half_mask, corner_mask, checkerboard_mask, random_mask)
2. Noise --> Operator: N
3. Convolution --> Operator: C (sobel, gaussian_blur, edge_detection, own)
Data Modification (not included in modeling-process; thus the Model "does not
know" these modifications):
4. Rotation (angle)
X. Response --> Operator: R (Concatenated Mask, Noise and Convolution)
'''
p = 10 # Specify element of XTest that is to be corrupted and to be evaluated; can be arbitrary integer within length of XTest
ground_truth = ift.Field.from_raw(position_space, np.reshape(XTest[p], position_space.shape))
# 1. Mask
M = Mask.no_mask(position_space=position_space)
#M = Mask.half_mask(position_space=position_space, mask_range=0.5)
#M = Mask.random_mask(position_space=position_space, seed=10, n_blobs=25)
# 2. Noise
N, n = get_noise(noise_level=1, position_space=position_space, seed=10)
# 3. Convolution
#C = Conv.gaussian_blur(7, 1, position_space=position_space) # sobel, edge_detection,
# 4. Rotation (not included in data-model, reconstruction may be poor!)
# Specify angle in degrees (clockwise rotation)
ground_truth_rot = rotation(ground_truth, img_shape, angle=0)
# Apply Data Corruption to Ground Truth and creeate Response operator
GR = ift.GeometryRemover(position_space)
R = GR(M) # Without Convolution
#R = GR(M @ C) # With Convolution
data = R((ground_truth_rot))+n # Apply Response R on (rotated) ground truth --> Noise is applicated after masking
plt.imshow(np.reshape(data.val, [28,28]))
# Define Hyperparameters for minimizer via Iteration-Controllers
# These Hyperparameters are not fully optimized!
ic_sampling = ift.AbsDeltaEnergyController(name='Sampling', deltaE=1e-2, iteration_limit=150)
ic_newton = ift.AbsDeltaEnergyController(name='Newton', deltaE=5e-2, iteration_limit=150)
minimizer = ift.NewtonCG(ic_newton)
'''
Define Likelihood as Gaussian Energy
mean: data (corruped image with R applied)
inverse_covariance: Inverse of Noise-Matrix N
R: Response Operator
Decoder: Generator mapping data from latent space to image space
Mean: Adder Operator; Mean of all latent Space activations
A: Product Operator; Transformed Covariance of all latent space activations
Mean and A originate from the following transformation:
s = A*xi+Mean
'''
likelihood = ift.GaussianEnergy(mean=data, inverse_covariance=N.inverse) @ R @ Decoder @ Mean @ A
H = ift.StandardHamiltonian(likelihood, ic_sampling)
# Run MGVI (Metric Gaussian Variational Inference)
n_samples = 50 # Define number of samples with which posterior distribution is approximated; more samples => higher runtime, higher accuracy
def MGVI(n_samples, H):
initial_mean = ift.Field.full(latent_space, 0.) # Define initial activation; random initialization works as well
mu = initial_mean
for i in range(5):
# Draw new samples and minimize KL
KL = ift.MetricGaussianKL(mu, H, n_samples, mirror_samples=False) # Set up KL with current mu
KL, convergence = minimizer(KL) # Minimize KL and check for convergence
mu = KL.position # Set minimized KL as new mu
KL = ift.MetricGaussianKL(mu, H, n_samples, mirror_samples=False)
KL, convergence = minimizer(KL)
return KL
iters=1 # Define number of iterations of posterior approximation. This might be helpful to check "how certain" the approximation is and if only an unstable local minimum is found
KL_iterations = []
for i in range(iters):
KL_iterations.append(MGVI(n_samples, H))
# Draw inferred signal from posterior samples and transform to original space
sc = ift.StatCalculator()
for i in range(iters):
KL = KL_iterations[i]
for sample in KL.samples:
sc.add(A.apply(sample + KL.position, 1) + mean) # Retransform signal s = A*xi+mu
posterior_mean = sc.mean # Get mean of all samples
posterior_std = ift.sqrt(sc.var) # Get standard deviation of all samples
# Classify posteriors via mahalanobis-distance and by classifying all posterior samples
# with seperatly trained network ('Classifier')
mahalanobis_distance_supervised = np.zeros([iters*n_samples, n_classes])
mahalanobis_distance = np.zeros([iters*n_samples, n_classes])
classified_posteriors = np.zeros([iters*n_samples, n_latent])
latent_posteriors = np.zeros([iters*n_samples, n_latent])
for k in range(iters):
KL = KL_iterations[k]
for j, sample in enumerate(KL.samples):
s_posterior = A.apply(sample + KL.position, 1) + mean
latent_posteriors[j+k*n_samples, :] = s_posterior.val
classified_posteriors[j+k*n_samples, :] = Classifier(Decoder(s_posterior)).val
for i in range(n_classes):
mahalanobis_distance_supervised[j+k*n_samples, i] = np.sqrt((s_posterior.val[:n_classes] - means[:n_classes,i]).T @ np.linalg.inv(cov_supervised_variables[i]) @ (s_posterior.val[:n_classes] - means[:n_classes,i]))
mahalanobis_distance[j+k*n_samples, i] = np.sqrt((s_posterior.val - means[:,i]).T @ np.linalg.inv(cov_all_variables[i]) @ (s_posterior.val - means[:,i]))
#mahalanobis_distance[j+k*n_samples, i] = np.sqrt((s_posterior.val - means[:,i]).T @ (s_posterior.val - means[:,i])) # Euclidian Distance
mahalanobis_mean = np.mean(mahalanobis_distance, axis=0)
mahalanobis_std = np.sqrt(np.var(mahalanobis_distance, axis=0))
mahalanobis_mean_supervised = np.mean(mahalanobis_distance_supervised, axis=0)
mahalanobis_std_supervised = np.sqrt(np.var(mahalanobis_distance_supervised, axis=0))
classified_mean = np.mean(classified_posteriors, axis=0)
classified_std = np.std(classified_posteriors, axis=0)
# Get all classifications of posterior samples for pie-plot visualization
classified_posteriors_nn = np.sort(np.argmax(classified_posteriors, axis=1))
classified_posteriors_dm = np.sort(np.argmin(mahalanobis_distance, axis=1))
for i in range(n_classes):
unique_digit_nn, count_nn = np.unique(classified_posteriors_nn, return_counts=True)
unique_digit_dm, count_dm = np.unique(classified_posteriors_dm, return_counts=True)
counts_nn = dict(zip(unique_digit_nn, count_nn))
counts_dm = dict(zip(unique_digit_dm, count_dm))
viridis = cm.get_cmap('viridis', n_classes)
pie_colors = viridis(np.linspace(0, 1, n_classes))
# Create dictionary with important information:
# Top scores of respective classification method (M-Dist, NN)
# True or false classification (only valid if Labels given)
# Overlapping standard-deviations
n_scores = 3 # Number of top scoring elements to be displayed (max: n_classes)
top_scores_nn = list(reversed(np.argsort(classified_mean)[-n_scores:]))
top_scores_dm = list(np.argsort(mahalanobis_mean)[:n_scores])
overlap_bottom_nn = np.zeros(n_scores-1)
overlap_bottom_dm = np.zeros(n_scores-1)
for i in range(n_scores-1):
overlap_bottom_nn[i] = (classified_mean[top_scores_nn[0]] - classified_std[top_scores_nn[0]]) - (classified_mean[top_scores_nn[i+1]] + classified_std[top_scores_nn[i+1]])
overlap_bottom_dm[i] = (mahalanobis_mean[top_scores_dm[i+1]] - mahalanobis_std[top_scores_dm[i+1]]) - (mahalanobis_mean[top_scores_dm[0]] + mahalanobis_std[top_scores_dm[0]])
keys_nn = ['Measure','Top Scores:', 'Classification:', 'ID:', 'N Samples:']
keys_dm = ['Measure','Top Scores:', 'Classification:', 'ID:', 'N Samples:', 'M-Dist of {}:'.format(top_scores_dm[0])]
if top_scores_nn[0] == YTrain[-p]:
values_nn = ['Neural Net Classifier','{}'.format(tuple(top_scores_nn)), 'True', 'YTrain[-{}]'.format(p), '{}'.format(n_samples)]
if top_scores_dm[0] == YTrain[-p]:
values_dm = ['Mahalanobis Distance','{}'.format(tuple(top_scores_dm)), 'True', 'YTrain[-{}]'.format(p), '{}'.format(n_samples), '{}'.format(mahalanobis_mean[top_scores_dm[0]])]
if top_scores_nn[0] != YTrain[-p]:
values_nn = ['Neural Net Classifier','{}'.format(tuple(top_scores_nn)), 'False', 'YTrain[-{}]'.format(p), '{}'.format(n_samples)]
if top_scores_dm[0] != YTrain[-p]:
values_dm = ['Mahalanobis Distance','{}'.format(tuple(top_scores_dm)), 'False', 'YTrain[-{}]'.format(p), '{}'.format(n_samples), '{}'.format(mahalanobis_mean[top_scores_dm[0]])]
# Store Overlapping in Dictionary, expressed in terms of sigmas/STD of top Scoring digit
for i in range(n_scores - 1):
keys_nn.append('Overlap [sigmas] {} --> {}'.format(top_scores_nn[0], top_scores_nn[i+1]))
values_nn.append(overlap_bottom_nn[i] / classified_std[top_scores_nn[0]])
keys_dm.append('Overlap [sigmas] {} --> {}'.format(top_scores_dm[0], top_scores_dm[i+1]))
values_dm.append(overlap_bottom_dm[i] / mahalanobis_std[top_scores_dm[0]])
overlapping_nn = dict(zip(keys_nn, values_nn))
overlapping_dm = dict(zip(keys_dm, values_dm))
min = np.min([posterior_mean.val])
max = np.max([posterior_mean.val])
plt.subplot(3, 4, 1)
barplot = plt.bar(range(n_classes), posterior_mean.val[0:n_classes], alpha=1, width=0.8, yerr=posterior_std.val[0:n_classes], label='MGVI with STD')
barplot[np.where(posterior_mean.val == np.max(posterior_mean.val[:10]))[0][0]].set_color('r')
plt.legend(fontsize=3)
plt.title('$h\pm\delta_r$', fontsize=8)
plt.xticks(range(n_classes), fontsize=6)
plt.yticks(fontsize=6)
plt.subplot(3, 4, 2)
barplot = plt.bar(range(n_classes), classified_mean[:10], yerr=classified_std[:10])
plt.xticks(np.arange(n_classes), fontsize=6)
plt.yticks(fontsize=6)
barplot[np.where(classified_mean == np.max(classified_mean))[0][0]].set_color('r')
plt.title('$f(g(h))\pm \delta_r$', fontsize=8)
plt.subplot(3, 4, 3)
m_mean = mahalanobis_mean_supervised
m_std = mahalanobis_std_supervised
barplot = plt.bar(range(n_classes), m_mean, yerr=m_std)
barplot[np.where(m_mean == np.min(m_mean))[0][0]].set_color('r')
for bar in barplot:
yval = bar.get_height()
yval = np.round(yval, decimals=2)
plt.annotate('{}'.format(yval),
xy=(bar.get_x() + bar.get_width() / 2, bar.get_height()),
xytext=(0, 3), # 3 points vertical offset
textcoords="offset points",
ha='center', va='bottom', fontsize=5, rotation=45)
plt.title('$\delta_m\pm \delta_r$', fontsize=8)
plt.ylim(0, 1.3*np.max(m_mean))
plt.xticks(np.arange(n_classes), fontsize=6)
plt.yticks(fontsize=6)
plt.subplot(3, 4, 5)
plt.imshow(np.reshape(data.val, img_shape))
plt.xlabel('Mock Signal')
clear_axis()
plt.xticks(fontsize=6)
plt.yticks(fontsize=6)
plt.subplot(3, 4, 6)
plt.imshow(np.reshape(ground_truth.val, img_shape))
plt.xlabel('Ground Truth: {}'.format(YTest[p]), fontsize=8)
clear_axis()
plt.subplot(3, 4, 7)
plt.imshow(np.reshape(Decoder(posterior_mean).val, img_shape))
plt.xlabel('Reconstruction', fontsize=8)
clear_axis()
plt.subplot(3, 4, 4)
plt.pie([float(v) for v in counts_nn.values()], labels=[float(k) for k in counts_nn.keys()],autopct='%1.1f%%', colors=pie_colors[list(counts_nn.keys())], textprops={'fontsize': 4} )
plt.xlabel('Class. Post. NN', fontsize=8)
plt.subplot(3, 4, 8)
plt.pie([float(v) for v in counts_dm.values()], labels=[float(k) for k in counts_dm.keys()],autopct='%1.1f%%', colors=pie_colors[list(counts_dm.keys())], textprops={'fontsize': 4})
plt.xlabel('Class. Post. $d_M$', fontsize=8)
plt.savefig('./corrupted_data_classification/{}'.format('example_results'))
# Visualize reconstructions of all posterior samples. Output dependent on n_samples.
grid = plt.GridSpec(np.int(np.floor(np.sqrt(len(latent_posteriors)))), np.int(np.ceil(np.sqrt(len(latent_posteriors)))), wspace=0.1, hspace=0.1)
k=0
latent_posteriors=latent_posteriors[latent_posteriors[:,5].argsort()]
for i in range(np.int(np.floor(np.sqrt(len(latent_posteriors))))):
for j in range(np.int(np.ceil(np.sqrt(len(latent_posteriors))))):
if k < iters*n_samples:
plt.subplot(grid[i, j])
plt.imshow(np.reshape(Decoder(ift.Field.from_raw(latent_space, latent_posteriors[k, :])).val, img_shape), 'gray')
clear_axis()
k += 1
else:
break
fig = plt.gcf()
plt.savefig('./corrupted_data_classification/{}'.format('example_samples'))
print('Done. Results saved.')
| 18,972 | 42.71659 | 219 | py |
corrupted_data_classification | corrupted_data_classification-main/helper_functions/helper_functions.py | import pandas as pd
import numpy as np
import math
import torch.optim as optim
from torch.autograd import Variable
import matplotlib.pyplot as plt
from torchvision import datasets, transforms
import matplotlib.pyplot as plt
import numpy as np
import io
import cv2
import numpy as np
import matplotlib.pyplot as plt
import random
from skimage.transform import resize
from scipy.special import binom
import warnings
try:
import nifty6 as ift
except:
warnings.warn("Failed importing nifty6")
from PIL import Image
def clear_axis():
ax = plt.gca()
ax.axes.yaxis.set_ticks([])
ax.axes.xaxis.set_ticks([])
def convolution(colatitude):
angle = colatitude * (180 / np.pi)
return angle
def gaussian(x, mu, sig):
return np.exp(-np.power(x - mu, 2.) / (2 * np.power(sig, 2.)))
n= 14
x_values = np.linspace(0, 1, n)
kernel = np.ones(n)
kernel = gaussian(x_values, 1, 3)
kernels = np.zeros(784)
for i in range(784//n):
kernels[i*n:(i+1)*n] = kernel
def conv(colatitude):
#plt.imshow(np.reshape(colatitude, [28, 28]))
#GT = convolve(GT, kernel=[0, 0.5, 1, 2, 3.5, 5, 3.5, 2, 1, 0.5, 0], boundary='extend')
return convolve(colatitude, kernel=[0.1, 0.5, 1, 2, 3.5, 5, 3.5, 2, 1, 0.5, 0.1], boundary='extend')
def get_cmap(n, name='hsv'):
'''Returns a function that maps each index in 0, 1, ..., n-1 to a distinct
RGB color; the keyword argument name must be a standard mpl colormap name.'''
return plt.cm.get_cmap(name, n)
def info_text(overlapping_nn, overlapping_dm):
text = []
text.append('----------------------------------------------------')
text.append('{:<40} {}'.format('Key','Label'))
for k, v in overlapping_nn.items():
text.append("{:<40} {}".format(k, v))
text.append('----------------------------------------------------')
text.append('{:<40} {}'.format('Key','Label'))
for k, v in overlapping_dm.items():
text.append("{:<40} {}".format(k, v))
return text
def get_noise(noise_level, position_space, seed):
N_ift = ift.ScalingOperator(position_space, noise_level)
with ift.random.Context(seed):
n = N_ift.draw_sample_with_dtype(dtype=np.float64)
return N_ift, n # N respresents the noise operator (diagnonal covariance), n represents acutal sampled noise values
def rotation(image, img_shape, angle):
im = np.reshape(image.val, img_shape)
im = Image.fromarray(np.uint8(im*255))
im = im.rotate(angle)
im = np.asarray(im)/255
im = np.reshape(im, image.shape)
return ift.Field.from_raw(image.domain, im)
def split_validation_set(XTrain, YTrain, val_perc):
'''
Permutation of Training Dataset is inspired by an article pusblished on Medium:
https://medium.com/@mjbhobe/mnist-digits-classification-with-keras-ed6c2374bd0e
Author: Bhobeé, Manish
Date of Publication: 29.09.2018
Relevant Code Section: Permutation of Data and Cut-Out of Validation Set
Visit: 23.10.2020
Minor modifications were made on val_percent and names of variables (adjusted to
my given variable names) and dimensionality of Datasets (mine is reshaped to vectors,
the author used 2D Arrays.)
'''
# shuffle the training dataset (5 times!)
for i in range(5):
np.random.seed(i)
indexes = np.random.permutation(len(XTrain))
XTrain = XTrain[indexes]
YTrain = YTrain[indexes]
# now set-aside 20% of the train_data/labels as the
# cross-validation sets
val_perc = 0.2
val_count = int(val_perc * len(XTrain))
# first pick validation set from train_data/labels
XVal = XTrain[:val_count]
YVal = YTrain[:val_count]
# leave rest in training set
XTrain = XTrain[val_count:]
YTrain = YTrain[val_count:]
return XTrain, YTrain, XVal, YVal
| 3,772 | 30.705882 | 120 | py |
corrupted_data_classification | corrupted_data_classification-main/NNs/Fashion-MNIST/pretrained_supervised_ae10/autoencoder_fmnist.py | # -*- coding: utf-8 -*-
# Commented out IPython magic to ensure Python compatibility.
# %matplotlib inline
# Commented out IPython magic to ensure Python compatibility.
# Colab and system related
import os
import sys
###
# Necessary to convert tensorflow-object (e.g. Neural Network) to Nifty-Operator
sys.path.append('corrupted_data_classification/helper_functions/')
###
import tensorflow as tf
# Include path to access helper functions and Mask / Conv Operator
sys.path.append('corrupted_data_classification/helper_functions/')
from helper_functions import clear_axis, gaussian, get_cmap, info_text, get_noise, rotation, split_validation_set
import Mask # Masking Operator
import Conv # Convolution Operator
sys.path.remove
# Tensorflow
# Plotting
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import matplotlib.pyplot as plt
# %matplotlib inline
plt.rcParams['figure.dpi'] = 200 # 200 e.g. is really fine, but slower
# Numerics
import random
import numpy as np
from sklearn.neighbors import KernelDensity
from scipy.stats import multivariate_normal
import sklearn as sk
from sklearn import decomposition
# Load MNIST Dataset
mnist = tf.keras.datasets.fashion_mnist
(XTrain, YTrain), (XTest, YTest) = mnist.load_data()
XTrain, XTest = XTrain / 255.0, XTest / 255.0
# Cut out last 100 Training images for comparison
XTrain = XTrain[0:-100]
YTrain = YTrain[0:-100]
# Reshape Xtrain and XTest to 1x784 Vectors instead of 28x28 arrays
XTrain = XTrain.reshape((len(XTrain), np.prod(XTrain.shape[1:])))
XTest = XTest.reshape((len(XTest), np.prod(XTest.shape[1:])))
XTrain, YTrain, XVal, YVal = split_validation_set(XTrain, YTrain, val_perc=0.2)
def autoencoder_deep(latent_space_size):
Input = tf.keras.layers.Input(shape=784)
h1 = tf.keras.layers.Dense(512, activation='selu', kernel_initializer='lecun_normal')(Input)
h2 = tf.keras.layers.Dense(256, activation='selu', kernel_initializer='lecun_normal')(h1)
h3 = tf.keras.layers.Dense(128, activation='selu', kernel_initializer='lecun_normal')(h2)
encoded = tf.keras.layers.Dense(latent_space_size, activation='linear',
activity_regularizer=tf.keras.regularizers.L2(0.1))(h3)
# Decoder
Decoder_Input = tf.keras.layers.Input(shape=latent_space_size) # Input for Decoder
h5 = tf.keras.layers.Dense(128, activation='selu', kernel_initializer='lecun_normal')(Decoder_Input)
h6 = tf.keras.layers.Dense(256, activation='selu', kernel_initializer='lecun_normal')(h5)
h7 = tf.keras.layers.Dense(512, activation='selu', kernel_initializer='lecun_normal')(h6)
decoded = tf.keras.layers.Dense(784, activation='sigmoid')(h7)
# Decouple Encoder and Decoder from overall model
Encoder = tf.keras.Model(Input, encoded)
Decoder = tf.keras.Model(Decoder_Input, decoded)
decoded = Decoder(encoded)
model = tf.keras.Model(Input, [decoded, encoded])
return Encoder, Decoder, model
Encoder, Decoder, model = autoencoder_deep(10)
# Loss Function for Reconstruction of images (i.e. overall Autoencoder)
def loss_fn_AE(y_true, y_pred):
# y_pred = tf.nn.elu(y_pred) * tf.nn.softplus(y_pred)
# return tf.losses.categorical_crossentropy(y_true, y_pred)
# y_pred = tf.nn.softmax(y_pred)
return tf.losses.binary_crossentropy(y_true,y_pred)
#return tf.keras.losses.MeanSquaredError(y_true, y_pred)
# Loss Function for Classification of Images in latent space
def loss_fn_Encoder(y_true, y_pred):
y_pred = tf.nn.softmax(y_pred)
return tf.losses.sparse_categorical_crossentropy(y_true, y_pred)
# Training Options
model.compile(optimizer='adam',
#loss=[loss_fn_AE, loss_fn_Encoder],
loss=[loss_fn_AE, loss_fn_Encoder],
metrics=['accuracy'])
# Training and Testing
# Training and Testing
with tf.device('/device:GPU:0'):
results = model.fit(XTrain, [XTrain, YTrain], epochs=25)
model.evaluate(XTest, [XTest, YTest], verbose=2)
# Save trained Decoder and trained Encoder
Decoder.save('./corrupted_data_classification/NNs/Fashion-MNIST/pretrained_supervised_ae10/Decoder/', save_format='tf')
Encoder.save('./corrupted_data_classification/NNs/Fashion-MNIST/pretrained_supervised_ae10/Encoder/', save_format='tf')
| 4,227 | 40.048544 | 119 | py |
corrupted_data_classification | corrupted_data_classification-main/NNs/MNIST/pretrained_supervised_ae10/autoencoder.py | # -*- coding: utf-8 -*-
# Commented out IPython magic to ensure Python compatibility.
# %matplotlib inline
# Commented out IPython magic to ensure Python compatibility.
# Colab and system related
import os
import sys
###
# Necessary to convert tensorflow-object (e.g. Neural Network) to Nifty-Operator
sys.path.append('corrupted_data_classification/helper_functions/')
###
import tensorflow as tf
# Include path to access helper functions and Mask / Conv Operator
sys.path.append('corrupted_data_classification/helper_functions/')
from helper_functions import clear_axis, gaussian, get_cmap, info_text, get_noise, rotation, split_validation_set
import Mask # Masking Operator
import Conv # Convolution Operator
sys.path.remove
# Tensorflow
# Plotting
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import matplotlib.pyplot as plt
# %matplotlib inline
plt.rcParams['figure.dpi'] = 200 # 200 e.g. is really fine, but slower
# Numerics
import random
import numpy as np
from sklearn.neighbors import KernelDensity
from scipy.stats import multivariate_normal
import sklearn as sk
from sklearn import decomposition
# Load MNIST Dataset
mnist = tf.keras.datasets.mnist
(XTrain, YTrain), (XTest, YTest) = mnist.load_data()
XTrain, XTest = XTrain / 255.0, XTest / 255.0
# Cut out last 100 Training images for comparison
XTrain = XTrain[0:-100]
YTrain = YTrain[0:-100]
# Reshape Xtrain and XTest to 1x784 Vectors instead of 28x28 arrays
XTrain = XTrain.reshape((len(XTrain), np.prod(XTrain.shape[1:])))
XTest = XTest.reshape((len(XTest), np.prod(XTest.shape[1:])))
XTrain, YTrain, XVal, YVal = split_validation_set(XTrain, YTrain, val_perc=0.2)
def autoencoder_deep(latent_space_size):
Input = tf.keras.layers.Input(shape=784)
h1 = tf.keras.layers.Dense(512, activation='selu', kernel_initializer='lecun_normal')(Input)
h2 = tf.keras.layers.Dense(256, activation='selu', kernel_initializer='lecun_normal')(h1)
h3 = tf.keras.layers.Dense(128, activation='selu', kernel_initializer='lecun_normal')(h2)
encoded = tf.keras.layers.Dense(latent_space_size, activation='linear',
activity_regularizer=tf.keras.regularizers.L2(0.001))(h3)
# Decoder
Decoder_Input = tf.keras.layers.Input(shape=latent_space_size) # Input for Decoder
h5 = tf.keras.layers.Dense(128, activation='selu', kernel_initializer='lecun_normal')(Decoder_Input)
h6 = tf.keras.layers.Dense(256, activation='selu', kernel_initializer='lecun_normal')(h5)
h7 = tf.keras.layers.Dense(512, activation='selu', kernel_initializer='lecun_normal')(h6)
decoded = tf.keras.layers.Dense(784, activation='sigmoid')(h7)
# Decouple Encoder and Decoder from overall model
Encoder = tf.keras.Model(Input, encoded)
Decoder = tf.keras.Model(Decoder_Input, decoded)
decoded = Decoder(encoded)
model = tf.keras.Model(Input, [decoded, encoded])
return Encoder, Decoder, model
Encoder, Decoder, model = autoencoder_deep(10)
# Loss Function for Reconstruction of images (i.e. overall Autoencoder)
def loss_fn_AE(y_true, y_pred):
# y_pred = tf.nn.elu(y_pred) * tf.nn.softplus(y_pred)
# return tf.losses.categorical_crossentropy(y_true, y_pred)
# y_pred = tf.nn.softmax(y_pred)
return tf.losses.binary_crossentropy(y_true,y_pred)
#return tf.keras.losses.MeanSquaredError(y_true, y_pred)
# Loss Function for Classification of Images in latent space
def loss_fn_Encoder(y_true, y_pred):
y_pred = tf.nn.softmax(y_pred)
return tf.losses.sparse_categorical_crossentropy(y_true, y_pred)
# Training Options
model.compile(optimizer='adam',
#loss=[loss_fn_AE, loss_fn_Encoder],
loss=[loss_fn_AE, loss_fn_Encoder],
metrics=['accuracy'])
# Training and Testing
results = model.fit(XTrain, [XTrain, YTrain], epochs=25)
model.evaluate(XTest, [XTest, YTest], verbose=2)
# Save trained Decoder and trained Encoder
Decoder.save('./corrupted_data_classification/NNs/MNIST/pretrained_supervised_ae10/Decoder/', save_format='tf')
Encoder.save('./corrupted_data_classification/NNs/MNIST/pretrained_supervised_ae10/Encoder/', save_format='tf')
plt.plot(results.history['dense_3_accuracy'])
| 4,195 | 39.346154 | 113 | py |
mmyolo | mmyolo-main/setup.py | #!/usr/bin/env python
# Copyright (c) OpenMMLab. All rights reserved.
import os
import os.path as osp
import platform
import shutil
import sys
import warnings
from setuptools import find_packages, setup
from torch.utils.cpp_extension import BuildExtension
def readme():
with open('README.md', encoding='utf-8') as f:
content = f.read()
return content
version_file = 'mmyolo/version.py'
def get_version():
with open(version_file) as f:
exec(compile(f.read(), version_file, 'exec'))
return locals()['__version__']
def parse_requirements(fname='requirements.txt', with_version=True):
"""Parse the package dependencies listed in a requirements file but strips
specific versioning information.
Args:
fname (str): path to requirements file
with_version (bool, default=False): if True include version specs
Returns:
List[str]: list of requirements items
CommandLine:
python -c "import setup; print(setup.parse_requirements())"
"""
import re
import sys
from os.path import exists
require_fpath = fname
def parse_line(line):
"""Parse information from a line in a requirements text file."""
if line.startswith('-r '):
# Allow specifying requirements in other files
target = line.split(' ')[1]
for info in parse_require_file(target):
yield info
else:
info = {'line': line}
if line.startswith('-e '):
info['package'] = line.split('#egg=')[1]
elif '@git+' in line:
info['package'] = line
else:
# Remove versioning from the package
pat = '(' + '|'.join(['>=', '==', '>']) + ')'
parts = re.split(pat, line, maxsplit=1)
parts = [p.strip() for p in parts]
info['package'] = parts[0]
if len(parts) > 1:
op, rest = parts[1:]
if ';' in rest:
# Handle platform specific dependencies
# http://setuptools.readthedocs.io/en/latest/setuptools.html#declaring-platform-specific-dependencies
version, platform_deps = map(str.strip,
rest.split(';'))
info['platform_deps'] = platform_deps
else:
version = rest # NOQA
info['version'] = (op, version)
yield info
def parse_require_file(fpath):
with open(fpath) as f:
for line in f.readlines():
line = line.strip()
if line and not line.startswith('#'):
yield from parse_line(line)
def gen_packages_items():
if exists(require_fpath):
for info in parse_require_file(require_fpath):
parts = [info['package']]
if with_version and 'version' in info:
parts.extend(info['version'])
if not sys.version.startswith('3.4'):
# apparently package_deps are broken in 3.4
platform_deps = info.get('platform_deps')
if platform_deps is not None:
parts.append(';' + platform_deps)
item = ''.join(parts)
yield item
packages = list(gen_packages_items())
return packages
def add_mim_extension():
"""Add extra files that are required to support MIM into the package.
These files will be added by creating a symlink to the originals if the
package is installed in `editable` mode (e.g. pip install -e .), or by
copying from the originals otherwise.
"""
# parse installment mode
if 'develop' in sys.argv:
# installed by `pip install -e .`
if platform.system() == 'Windows':
# set `copy` mode here since symlink fails on Windows.
mode = 'copy'
else:
mode = 'symlink'
elif 'sdist' in sys.argv or 'bdist_wheel' in sys.argv:
# installed by `pip install .`
# or create source distribution by `python setup.py sdist`
mode = 'copy'
else:
return
filenames = ['tools', 'configs', 'demo', 'model-index.yml']
repo_path = osp.dirname(__file__)
mim_path = osp.join(repo_path, 'mmyolo', '.mim')
os.makedirs(mim_path, exist_ok=True)
for filename in filenames:
if osp.exists(filename):
src_path = osp.join(repo_path, filename)
tar_path = osp.join(mim_path, filename)
if osp.isfile(tar_path) or osp.islink(tar_path):
os.remove(tar_path)
elif osp.isdir(tar_path):
shutil.rmtree(tar_path)
if mode == 'symlink':
src_relpath = osp.relpath(src_path, osp.dirname(tar_path))
os.symlink(src_relpath, tar_path)
elif mode == 'copy':
if osp.isfile(src_path):
shutil.copyfile(src_path, tar_path)
elif osp.isdir(src_path):
shutil.copytree(src_path, tar_path)
else:
warnings.warn(f'Cannot copy file {src_path}.')
else:
raise ValueError(f'Invalid mode {mode}')
if __name__ == '__main__':
add_mim_extension()
setup(
name='mmyolo',
version=get_version(),
description='OpenMMLab Toolbox of YOLO',
long_description=readme(),
long_description_content_type='text/markdown',
author='MMYOLO Contributors',
author_email='openmmlab@gmail.com',
keywords='computer vision, object detection',
url='https://github.com/open-mmlab/mmyolo',
packages=find_packages(exclude=('configs', 'tools', 'demo')),
include_package_data=True,
classifiers=[
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
],
license='GPL License 3.0',
install_requires=parse_requirements('requirements/runtime.txt'),
extras_require={
'all': parse_requirements('requirements.txt'),
'tests': parse_requirements('requirements/tests.txt'),
'build': parse_requirements('requirements/build.txt'),
'mim': parse_requirements('requirements/mminstall.txt'),
},
ext_modules=[],
cmdclass={'build_ext': BuildExtension},
zip_safe=False)
| 6,862 | 34.744792 | 125 | py |
mmyolo | mmyolo-main/tools/test.py | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os
import os.path as osp
from mmdet.engine.hooks.utils import trigger_visualization_hook
from mmengine.config import Config, ConfigDict, DictAction
from mmengine.evaluator import DumpResults
from mmengine.runner import Runner
from mmyolo.registry import RUNNERS
from mmyolo.utils import is_metainfo_lower
# TODO: support fuse_conv_bn
def parse_args():
parser = argparse.ArgumentParser(
description='MMYOLO test (and eval) a model')
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint', help='checkpoint file')
parser.add_argument(
'--work-dir',
help='the directory to save the file containing evaluation metrics')
parser.add_argument(
'--out',
type=str,
help='output result file (must be a .pkl file) in pickle format')
parser.add_argument(
'--json-prefix',
type=str,
help='the prefix of the output json file without perform evaluation, '
'which is useful when you want to format the result to a specific '
'format and submit it to the test server')
parser.add_argument(
'--tta',
action='store_true',
help='Whether to use test time augmentation')
parser.add_argument(
'--show', action='store_true', help='show prediction results')
parser.add_argument(
'--deploy',
action='store_true',
help='Switch model to deployment mode')
parser.add_argument(
'--show-dir',
help='directory where painted images will be saved. '
'If specified, it will be automatically saved '
'to the work_dir/timestamp/show_dir')
parser.add_argument(
'--wait-time', type=float, default=2, help='the interval of show (s)')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
return args
def main():
args = parse_args()
# load config
cfg = Config.fromfile(args.config)
# replace the ${key} with the value of cfg.key
# cfg = replace_cfg_vals(cfg)
cfg.launcher = args.launcher
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
# work_dir is determined in this priority: CLI > segment in file > filename
if args.work_dir is not None:
# update configs according to CLI args if args.work_dir is not None
cfg.work_dir = args.work_dir
elif cfg.get('work_dir', None) is None:
# use config filename as default work_dir if cfg.work_dir is None
cfg.work_dir = osp.join('./work_dirs',
osp.splitext(osp.basename(args.config))[0])
cfg.load_from = args.checkpoint
if args.show or args.show_dir:
cfg = trigger_visualization_hook(cfg, args)
if args.deploy:
cfg.custom_hooks.append(dict(type='SwitchToDeployHook'))
# add `format_only` and `outfile_prefix` into cfg
if args.json_prefix is not None:
cfg_json = {
'test_evaluator.format_only': True,
'test_evaluator.outfile_prefix': args.json_prefix
}
cfg.merge_from_dict(cfg_json)
# Determine whether the custom metainfo fields are all lowercase
is_metainfo_lower(cfg)
if args.tta:
assert 'tta_model' in cfg, 'Cannot find ``tta_model`` in config.' \
" Can't use tta !"
assert 'tta_pipeline' in cfg, 'Cannot find ``tta_pipeline`` ' \
"in config. Can't use tta !"
cfg.model = ConfigDict(**cfg.tta_model, module=cfg.model)
test_data_cfg = cfg.test_dataloader.dataset
while 'dataset' in test_data_cfg:
test_data_cfg = test_data_cfg['dataset']
# batch_shapes_cfg will force control the size of the output image,
# it is not compatible with tta.
if 'batch_shapes_cfg' in test_data_cfg:
test_data_cfg.batch_shapes_cfg = None
test_data_cfg.pipeline = cfg.tta_pipeline
# build the runner from config
if 'runner_type' not in cfg:
# build the default runner
runner = Runner.from_cfg(cfg)
else:
# build customized runner from the registry
# if 'runner_type' is set in the cfg
runner = RUNNERS.build(cfg)
# add `DumpResults` dummy metric
if args.out is not None:
assert args.out.endswith(('.pkl', '.pickle')), \
'The dump file must be a pkl file.'
runner.test_evaluator.metrics.append(
DumpResults(out_file_path=args.out))
# start testing
runner.test()
if __name__ == '__main__':
main()
| 5,443 | 35.05298 | 79 | py |
mmyolo | mmyolo-main/tools/train.py | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import logging
import os
import os.path as osp
from mmengine.config import Config, DictAction
from mmengine.logging import print_log
from mmengine.runner import Runner
from mmyolo.registry import RUNNERS
from mmyolo.utils import is_metainfo_lower
def parse_args():
parser = argparse.ArgumentParser(description='Train a detector')
parser.add_argument('config', help='train config file path')
parser.add_argument('--work-dir', help='the dir to save logs and models')
parser.add_argument(
'--amp',
action='store_true',
default=False,
help='enable automatic-mixed-precision training')
parser.add_argument(
'--resume',
nargs='?',
type=str,
const='auto',
help='If specify checkpoint path, resume from it, while if not '
'specify, try to auto resume from the latest checkpoint '
'in the work directory.')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
return args
def main():
args = parse_args()
# load config
cfg = Config.fromfile(args.config)
# replace the ${key} with the value of cfg.key
# cfg = replace_cfg_vals(cfg)
cfg.launcher = args.launcher
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
# work_dir is determined in this priority: CLI > segment in file > filename
if args.work_dir is not None:
# update configs according to CLI args if args.work_dir is not None
cfg.work_dir = args.work_dir
elif cfg.get('work_dir', None) is None:
# use config filename as default work_dir if cfg.work_dir is None
cfg.work_dir = osp.join('./work_dirs',
osp.splitext(osp.basename(args.config))[0])
# enable automatic-mixed-precision training
if args.amp is True:
optim_wrapper = cfg.optim_wrapper.type
if optim_wrapper == 'AmpOptimWrapper':
print_log(
'AMP training is already enabled in your config.',
logger='current',
level=logging.WARNING)
else:
assert optim_wrapper == 'OptimWrapper', (
'`--amp` is only supported when the optimizer wrapper type is '
f'`OptimWrapper` but got {optim_wrapper}.')
cfg.optim_wrapper.type = 'AmpOptimWrapper'
cfg.optim_wrapper.loss_scale = 'dynamic'
# resume is determined in this priority: resume from > auto_resume
if args.resume == 'auto':
cfg.resume = True
cfg.load_from = None
elif args.resume is not None:
cfg.resume = True
cfg.load_from = args.resume
# Determine whether the custom metainfo fields are all lowercase
is_metainfo_lower(cfg)
# build the runner from config
if 'runner_type' not in cfg:
# build the default runner
runner = Runner.from_cfg(cfg)
else:
# build customized runner from the registry
# if 'runner_type' is set in the cfg
runner = RUNNERS.build(cfg)
# start training
runner.train()
if __name__ == '__main__':
main()
| 3,969 | 33.224138 | 79 | py |
mmyolo | mmyolo-main/tools/misc/download_dataset.py | import argparse
from itertools import repeat
from multiprocessing.pool import ThreadPool
from pathlib import Path
from tarfile import TarFile
from zipfile import ZipFile
import torch
def parse_args():
parser = argparse.ArgumentParser(
description='Download datasets for training')
parser.add_argument(
'--dataset-name', type=str, help='dataset name', default='coco2017')
parser.add_argument(
'--save-dir',
type=str,
help='the dir to save dataset',
default='data/coco')
parser.add_argument(
'--unzip',
action='store_true',
help='whether unzip dataset or not, zipped files will be saved')
parser.add_argument(
'--delete',
action='store_true',
help='delete the download zipped files')
parser.add_argument(
'--threads', type=int, help='number of threading', default=4)
args = parser.parse_args()
return args
def download(url, dir, unzip=True, delete=False, threads=1):
def download_one(url, dir):
f = dir / Path(url).name
if Path(url).is_file():
Path(url).rename(f)
elif not f.exists():
print(f'Downloading {url} to {f}')
torch.hub.download_url_to_file(url, f, progress=True)
if unzip and f.suffix in ('.zip', '.tar'):
print(f'Unzipping {f.name}')
if f.suffix == '.zip':
ZipFile(f).extractall(path=dir)
elif f.suffix == '.tar':
TarFile(f).extractall(path=dir)
if delete:
f.unlink()
print(f'Delete {f}')
dir = Path(dir)
if threads > 1:
pool = ThreadPool(threads)
pool.imap(lambda x: download_one(*x), zip(url, repeat(dir)))
pool.close()
pool.join()
else:
for u in [url] if isinstance(url, (str, Path)) else url:
download_one(u, dir)
def main():
args = parse_args()
path = Path(args.save_dir)
if not path.exists():
path.mkdir(parents=True, exist_ok=True)
data2url = dict(
# TODO: Support for downloading Panoptic Segmentation of COCO
coco2017=[
'http://images.cocodataset.org/zips/train2017.zip',
'http://images.cocodataset.org/zips/val2017.zip',
'http://images.cocodataset.org/zips/test2017.zip',
'http://images.cocodataset.org/annotations/' +
'annotations_trainval2017.zip'
],
lvis=[
'https://s3-us-west-2.amazonaws.com/dl.fbaipublicfiles.com/LVIS/lvis_v1_train.json.zip', # noqa
'https://s3-us-west-2.amazonaws.com/dl.fbaipublicfiles.com/LVIS/lvis_v1_train.json.zip', # noqa
],
voc2007=[
'http://host.robots.ox.ac.uk/pascal/VOC/voc2007/VOCtrainval_06-Nov-2007.tar', # noqa
'http://host.robots.ox.ac.uk/pascal/VOC/voc2007/VOCtest_06-Nov-2007.tar', # noqa
'http://host.robots.ox.ac.uk/pascal/VOC/voc2007/VOCdevkit_08-Jun-2007.tar', # noqa
],
voc2012=[
'http://host.robots.ox.ac.uk/pascal/VOC/voc2012/VOCtrainval_11-May-2012.tar', # noqa
],
balloon=[
# src link: https://github.com/matterport/Mask_RCNN/releases/download/v2.1/balloon_dataset.zip # noqa
'https://download.openmmlab.com/mmyolo/data/balloon_dataset.zip'
],
cat=[
'https://download.openmmlab.com/mmyolo/data/cat_dataset.zip' # noqa
],
)
url = data2url.get(args.dataset_name, None)
if url is None:
print('Only support COCO, VOC, balloon, cat and LVIS now!')
return
download(
url,
dir=path,
unzip=args.unzip,
delete=args.delete,
threads=args.threads)
if __name__ == '__main__':
main()
| 3,814 | 32.761062 | 113 | py |
mmyolo | mmyolo-main/tools/misc/publish_model.py | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import subprocess
import torch
def parse_args():
parser = argparse.ArgumentParser(
description='Process a checkpoint to be published')
parser.add_argument('in_file', help='input checkpoint filename')
parser.add_argument('out_file', help='output checkpoint filename')
args = parser.parse_args()
return args
def process_checkpoint(in_file, out_file):
checkpoint = torch.load(in_file, map_location='cpu')
# remove optimizer for smaller file size
if 'optimizer' in checkpoint:
del checkpoint['optimizer']
if 'message_hub' in checkpoint:
del checkpoint['message_hub']
if 'ema_state_dict' in checkpoint:
del checkpoint['ema_state_dict']
for key in list(checkpoint['state_dict']):
if key.startswith('data_preprocessor'):
checkpoint['state_dict'].pop(key)
elif 'priors_base_sizes' in key:
checkpoint['state_dict'].pop(key)
elif 'grid_offset' in key:
checkpoint['state_dict'].pop(key)
elif 'prior_inds' in key:
checkpoint['state_dict'].pop(key)
if torch.__version__ >= '1.6':
torch.save(checkpoint, out_file, _use_new_zipfile_serialization=False)
else:
torch.save(checkpoint, out_file)
sha = subprocess.check_output(['sha256sum', out_file]).decode()
if out_file.endswith('.pth'):
out_file_name = out_file[:-4]
else:
out_file_name = out_file
final_file = out_file_name + f'-{sha[:8]}.pth'
subprocess.Popen(['mv', out_file, final_file])
def main():
args = parse_args()
process_checkpoint(args.in_file, args.out_file)
if __name__ == '__main__':
main()
| 1,744 | 29.086207 | 78 | py |
mmyolo | mmyolo-main/tools/model_converters/yolov6_to_mmyolo.py | import argparse
from collections import OrderedDict
import torch
def convert(src, dst):
import sys
sys.path.append('yolov6')
try:
ckpt = torch.load(src, map_location=torch.device('cpu'))
except ModuleNotFoundError:
raise RuntimeError(
'This script must be placed under the meituan/YOLOv6 repo,'
' because loading the official pretrained model need'
' some python files to build model.')
# The saved model is the model before reparameterization
model = ckpt['ema' if ckpt.get('ema') else 'model'].float()
new_state_dict = OrderedDict()
for k, v in model.state_dict().items():
name = k
if 'detect' in k:
if 'proj' in k:
continue
name = k.replace('detect', 'bbox_head.head_module')
if k.find('anchors') >= 0 or k.find('anchor_grid') >= 0:
continue
if 'ERBlock_2' in k:
name = k.replace('ERBlock_2', 'stage1.0')
if '.cv' in k:
name = name.replace('.cv', '.conv')
if '.m.' in k:
name = name.replace('.m.', '.block.')
elif 'ERBlock_3' in k:
name = k.replace('ERBlock_3', 'stage2.0')
if '.cv' in k:
name = name.replace('.cv', '.conv')
if '.m.' in k:
name = name.replace('.m.', '.block.')
elif 'ERBlock_4' in k:
name = k.replace('ERBlock_4', 'stage3.0')
if '.cv' in k:
name = name.replace('.cv', '.conv')
if '.m.' in k:
name = name.replace('.m.', '.block.')
elif 'ERBlock_5' in k:
name = k.replace('ERBlock_5', 'stage4.0')
if '.cv' in k:
name = name.replace('.cv', '.conv')
if '.m.' in k:
name = name.replace('.m.', '.block.')
if 'stage4.0.2' in name:
name = name.replace('stage4.0.2', 'stage4.1')
name = name.replace('cv', 'conv')
elif 'reduce_layer0' in k:
name = k.replace('reduce_layer0', 'reduce_layers.2')
elif 'Rep_p4' in k:
name = k.replace('Rep_p4', 'top_down_layers.0.0')
if '.cv' in k:
name = name.replace('.cv', '.conv')
if '.m.' in k:
name = name.replace('.m.', '.block.')
elif 'reduce_layer1' in k:
name = k.replace('reduce_layer1', 'top_down_layers.0.1')
if '.cv' in k:
name = name.replace('.cv', '.conv')
if '.m.' in k:
name = name.replace('.m.', '.block.')
elif 'Rep_p3' in k:
name = k.replace('Rep_p3', 'top_down_layers.1')
if '.cv' in k:
name = name.replace('.cv', '.conv')
if '.m.' in k:
name = name.replace('.m.', '.block.')
elif 'upsample0' in k:
name = k.replace('upsample0.upsample_transpose',
'upsample_layers.0')
elif 'upsample1' in k:
name = k.replace('upsample1.upsample_transpose',
'upsample_layers.1')
elif 'Rep_n3' in k:
name = k.replace('Rep_n3', 'bottom_up_layers.0')
if '.cv' in k:
name = name.replace('.cv', '.conv')
if '.m.' in k:
name = name.replace('.m.', '.block.')
elif 'Rep_n4' in k:
name = k.replace('Rep_n4', 'bottom_up_layers.1')
if '.cv' in k:
name = name.replace('.cv', '.conv')
if '.m.' in k:
name = name.replace('.m.', '.block.')
elif 'downsample2' in k:
name = k.replace('downsample2', 'downsample_layers.0')
elif 'downsample1' in k:
name = k.replace('downsample1', 'downsample_layers.1')
new_state_dict[name] = v
data = {'state_dict': new_state_dict}
torch.save(data, dst)
# Note: This script must be placed under the yolov6 repo to run.
def main():
parser = argparse.ArgumentParser(description='Convert model keys')
parser.add_argument(
'--src', default='yolov6s.pt', help='src yolov6 model path')
parser.add_argument('--dst', default='mmyolov6.pt', help='save path')
args = parser.parse_args()
convert(args.src, args.dst)
if __name__ == '__main__':
main()
| 4,403 | 36.965517 | 73 | py |
mmyolo | mmyolo-main/tools/model_converters/yolox_to_mmyolo.py | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
from collections import OrderedDict
import torch
neck_dict = {
'backbone.lateral_conv0': 'neck.reduce_layers.2',
'backbone.C3_p4.conv': 'neck.top_down_layers.0.0.cv',
'backbone.C3_p4.m.0.': 'neck.top_down_layers.0.0.m.0.',
'backbone.reduce_conv1': 'neck.top_down_layers.0.1',
'backbone.C3_p3.conv': 'neck.top_down_layers.1.cv',
'backbone.C3_p3.m.0.': 'neck.top_down_layers.1.m.0.',
'backbone.bu_conv2': 'neck.downsample_layers.0',
'backbone.C3_n3.conv': 'neck.bottom_up_layers.0.cv',
'backbone.C3_n3.m.0.': 'neck.bottom_up_layers.0.m.0.',
'backbone.bu_conv1': 'neck.downsample_layers.1',
'backbone.C3_n4.conv': 'neck.bottom_up_layers.1.cv',
'backbone.C3_n4.m.0.': 'neck.bottom_up_layers.1.m.0.',
}
def convert_stem(model_key, model_weight, state_dict, converted_names):
new_key = model_key[9:]
state_dict[new_key] = model_weight
converted_names.add(model_key)
print(f'Convert {model_key} to {new_key}')
def convert_backbone(model_key, model_weight, state_dict, converted_names):
new_key = model_key.replace('backbone.dark', 'stage')
num = int(new_key[14]) - 1
new_key = new_key[:14] + str(num) + new_key[15:]
if '.m.' in model_key:
new_key = new_key.replace('.m.', '.blocks.')
elif not new_key[16] == '0' and 'stage4.1' not in new_key:
new_key = new_key.replace('conv1', 'main_conv')
new_key = new_key.replace('conv2', 'short_conv')
new_key = new_key.replace('conv3', 'final_conv')
state_dict[new_key] = model_weight
converted_names.add(model_key)
print(f'Convert {model_key} to {new_key}')
def convert_neck(model_key, model_weight, state_dict, converted_names):
for old, new in neck_dict.items():
if old in model_key:
new_key = model_key.replace(old, new)
if '.m.' in model_key:
new_key = new_key.replace('.m.', '.blocks.')
elif '.C' in model_key:
new_key = new_key.replace('cv1', 'main_conv')
new_key = new_key.replace('cv2', 'short_conv')
new_key = new_key.replace('cv3', 'final_conv')
state_dict[new_key] = model_weight
converted_names.add(model_key)
print(f'Convert {model_key} to {new_key}')
def convert_head(model_key, model_weight, state_dict, converted_names):
if 'stem' in model_key:
new_key = model_key.replace('head.stem', 'neck.out_layer')
elif 'cls_convs' in model_key:
new_key = model_key.replace(
'head.cls_convs', 'bbox_head.head_module.multi_level_cls_convs')
elif 'reg_convs' in model_key:
new_key = model_key.replace(
'head.reg_convs', 'bbox_head.head_module.multi_level_reg_convs')
elif 'preds' in model_key:
new_key = model_key.replace('head.',
'bbox_head.head_module.multi_level_conv_')
new_key = new_key.replace('_preds', '')
state_dict[new_key] = model_weight
converted_names.add(model_key)
print(f'Convert {model_key} to {new_key}')
def convert(src, dst):
"""Convert keys in detectron pretrained YOLOX models to mmyolo style."""
blobs = torch.load(src)['model']
state_dict = OrderedDict()
converted_names = set()
for key, weight in blobs.items():
if 'backbone.stem' in key:
convert_stem(key, weight, state_dict, converted_names)
elif 'backbone.backbone' in key:
convert_backbone(key, weight, state_dict, converted_names)
elif 'backbone.neck' not in key and 'head' not in key:
convert_neck(key, weight, state_dict, converted_names)
elif 'head' in key:
convert_head(key, weight, state_dict, converted_names)
# save checkpoint
checkpoint = dict()
checkpoint['state_dict'] = state_dict
torch.save(checkpoint, dst)
def main():
parser = argparse.ArgumentParser(description='Convert model keys')
parser.add_argument(
'--src', default='yolox_s.pth', help='src yolox model path')
parser.add_argument('--dst', default='mmyoloxs.pt', help='save path')
args = parser.parse_args()
convert(args.src, args.dst)
if __name__ == '__main__':
main()
| 4,218 | 37.009009 | 78 | py |
mmyolo | mmyolo-main/tools/model_converters/yolov8_to_mmyolo.py | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
from collections import OrderedDict
import torch
convert_dict_s = {
# backbone
'model.0': 'backbone.stem',
'model.1': 'backbone.stage1.0',
'model.2': 'backbone.stage1.1',
'model.3': 'backbone.stage2.0',
'model.4': 'backbone.stage2.1',
'model.5': 'backbone.stage3.0',
'model.6': 'backbone.stage3.1',
'model.7': 'backbone.stage4.0',
'model.8': 'backbone.stage4.1',
'model.9': 'backbone.stage4.2',
# neck
'model.12': 'neck.top_down_layers.0',
'model.15': 'neck.top_down_layers.1',
'model.16': 'neck.downsample_layers.0',
'model.18': 'neck.bottom_up_layers.0',
'model.19': 'neck.downsample_layers.1',
'model.21': 'neck.bottom_up_layers.1',
# Detector
'model.22': 'bbox_head.head_module',
}
def convert(src, dst):
"""Convert keys in pretrained YOLOv8 models to mmyolo style."""
convert_dict = convert_dict_s
try:
yolov8_model = torch.load(src)['model']
blobs = yolov8_model.state_dict()
except ModuleNotFoundError:
raise RuntimeError(
'This script must be placed under the ultralytics repo,'
' because loading the official pretrained model need'
' `model.py` to build model.'
'Also need to install hydra-core>=1.2.0 and thop>=0.1.1')
state_dict = OrderedDict()
for key, weight in blobs.items():
num, module = key.split('.')[1:3]
prefix = f'model.{num}'
new_key = key.replace(prefix, convert_dict[prefix])
if '.m.' in new_key:
new_key = new_key.replace('.m.', '.blocks.')
new_key = new_key.replace('.cv', '.conv')
elif 'bbox_head.head_module' in new_key:
new_key = new_key.replace('.cv2', '.reg_preds')
new_key = new_key.replace('.cv3', '.cls_preds')
elif 'backbone.stage4.2' in new_key:
new_key = new_key.replace('.cv', '.conv')
else:
new_key = new_key.replace('.cv1', '.main_conv')
new_key = new_key.replace('.cv2', '.final_conv')
if 'bbox_head.head_module.dfl.conv.weight' == new_key:
print('Drop "bbox_head.head_module.dfl.conv.weight", '
'because it is useless')
continue
state_dict[new_key] = weight
print(f'Convert {key} to {new_key}')
# save checkpoint
checkpoint = dict()
checkpoint['state_dict'] = state_dict
torch.save(checkpoint, dst)
# Note: This script must be placed under the YOLOv8 repo to run.
def main():
parser = argparse.ArgumentParser(description='Convert model keys')
parser.add_argument(
'--src', default='yolov8s.pt', help='src YOLOv8 model path')
parser.add_argument('--dst', default='mmyolov8s.pth', help='save path')
args = parser.parse_args()
convert(args.src, args.dst)
if __name__ == '__main__':
main()
| 2,937 | 31.644444 | 75 | py |
mmyolo | mmyolo-main/tools/model_converters/rtmdet_to_mmyolo.py | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
from collections import OrderedDict
import torch
def convert(src, dst):
"""Convert keys in pretrained RTMDet models to MMYOLO style."""
blobs = torch.load(src)['state_dict']
state_dict = OrderedDict()
for key, weight in blobs.items():
if 'neck.reduce_layers.0' in key:
new_key = key.replace('.0', '.2')
state_dict[new_key] = weight
elif 'neck.reduce_layers.1' in key:
new_key = key.replace('reduce_layers.1', 'top_down_layers.0.1')
state_dict[new_key] = weight
elif 'neck.top_down_blocks.0' in key:
new_key = key.replace('down_blocks', 'down_layers.0')
state_dict[new_key] = weight
elif 'neck.top_down_blocks.1' in key:
new_key = key.replace('down_blocks', 'down_layers')
state_dict[new_key] = weight
elif 'downsamples' in key:
new_key = key.replace('downsamples', 'downsample_layers')
state_dict[new_key] = weight
elif 'bottom_up_blocks' in key:
new_key = key.replace('bottom_up_blocks', 'bottom_up_layers')
state_dict[new_key] = weight
elif 'out_convs' in key:
new_key = key.replace('out_convs', 'out_layers')
state_dict[new_key] = weight
elif 'bbox_head' in key:
new_key = key.replace('bbox_head', 'bbox_head.head_module')
state_dict[new_key] = weight
elif 'data_preprocessor' in key:
continue
else:
new_key = key
state_dict[new_key] = weight
print(f'Convert {key} to {new_key}')
# save checkpoint
checkpoint = dict()
checkpoint['state_dict'] = state_dict
checkpoint['meta'] = blobs.get('meta')
torch.save(checkpoint, dst)
def main():
parser = argparse.ArgumentParser(description='Convert model keys')
parser.add_argument('src', help='src rtm model path')
parser.add_argument('dst', help='save path')
args = parser.parse_args()
convert(args.src, args.dst)
if __name__ == '__main__':
main()
| 2,142 | 33.564516 | 75 | py |
mmyolo | mmyolo-main/tools/model_converters/ppyoloe_to_mmyolo.py | import argparse
import pickle
from collections import OrderedDict
import torch
def convert_bn(k: str):
name = k.replace('._mean',
'.running_mean').replace('._variance', '.running_var')
return name
def convert_repvgg(k: str):
if '.conv2.conv1.' in k:
name = k.replace('.conv2.conv1.', '.conv2.rbr_dense.')
return name
elif '.conv2.conv2.' in k:
name = k.replace('.conv2.conv2.', '.conv2.rbr_1x1.')
return name
else:
return k
def convert(src: str, dst: str, imagenet_pretrain: bool = False):
with open(src, 'rb') as f:
model = pickle.load(f)
new_state_dict = OrderedDict()
if imagenet_pretrain:
for k, v in model.items():
if '@@' in k:
continue
if 'stem.' in k:
# backbone.stem.conv1.conv.weight
# -> backbone.stem.0.conv.weight
org_ind = k.split('.')[1][-1]
new_ind = str(int(org_ind) - 1)
name = k.replace('stem.conv%s.' % org_ind,
'stem.%s.' % new_ind)
else:
# backbone.stages.1.conv2.bn._variance
# -> backbone.stage2.0.conv2.bn.running_var
org_stage_ind = k.split('.')[1]
new_stage_ind = str(int(org_stage_ind) + 1)
name = k.replace('stages.%s.' % org_stage_ind,
'stage%s.0.' % new_stage_ind)
name = convert_repvgg(name)
if '.attn.' in k:
name = name.replace('.attn.fc.', '.attn.fc.conv.')
name = convert_bn(name)
name = 'backbone.' + name
new_state_dict[name] = torch.from_numpy(v)
else:
for k, v in model.items():
name = k
if k.startswith('backbone.'):
if '.stem.' in k:
# backbone.stem.conv1.conv.weight
# -> backbone.stem.0.conv.weight
org_ind = k.split('.')[2][-1]
new_ind = str(int(org_ind) - 1)
name = k.replace('.stem.conv%s.' % org_ind,
'.stem.%s.' % new_ind)
else:
# backbone.stages.1.conv2.bn._variance
# -> backbone.stage2.0.conv2.bn.running_var
org_stage_ind = k.split('.')[2]
new_stage_ind = str(int(org_stage_ind) + 1)
name = k.replace('.stages.%s.' % org_stage_ind,
'.stage%s.0.' % new_stage_ind)
name = convert_repvgg(name)
if '.attn.' in k:
name = name.replace('.attn.fc.', '.attn.fc.conv.')
name = convert_bn(name)
elif k.startswith('neck.'):
# fpn_stages
if k.startswith('neck.fpn_stages.'):
# neck.fpn_stages.0.0.conv1.conv.weight
# -> neck.reduce_layers.2.0.conv1.conv.weight
if k.startswith('neck.fpn_stages.0.0.'):
name = k.replace('neck.fpn_stages.0.0.',
'neck.reduce_layers.2.0.')
if '.spp.' in name:
name = name.replace('.spp.conv.', '.spp.conv2.')
# neck.fpn_stages.1.0.conv1.conv.weight
# -> neck.top_down_layers.0.0.conv1.conv.weight
elif k.startswith('neck.fpn_stages.1.0.'):
name = k.replace('neck.fpn_stages.1.0.',
'neck.top_down_layers.0.0.')
elif k.startswith('neck.fpn_stages.2.0.'):
name = k.replace('neck.fpn_stages.2.0.',
'neck.top_down_layers.1.0.')
else:
raise NotImplementedError('Not implemented.')
name = name.replace('.0.convs.', '.0.blocks.')
elif k.startswith('neck.fpn_routes.'):
# neck.fpn_routes.0.conv.weight
# -> neck.upsample_layers.0.0.conv.weight
index = k.split('.')[2]
name = 'neck.upsample_layers.' + index + '.0.' + '.'.join(
k.split('.')[-2:])
name = name.replace('.0.convs.', '.0.blocks.')
elif k.startswith('neck.pan_stages.'):
# neck.pan_stages.0.0.conv1.conv.weight
# -> neck.bottom_up_layers.1.0.conv1.conv.weight
ind = k.split('.')[2]
name = k.replace(
'neck.pan_stages.' + ind, 'neck.bottom_up_layers.' +
('0' if ind == '1' else '1'))
name = name.replace('.0.convs.', '.0.blocks.')
elif k.startswith('neck.pan_routes.'):
# neck.pan_routes.0.conv.weight
# -> neck.downsample_layers.0.conv.weight
ind = k.split('.')[2]
name = k.replace(
'neck.pan_routes.' + ind, 'neck.downsample_layers.' +
('0' if ind == '1' else '1'))
name = name.replace('.0.convs.', '.0.blocks.')
else:
raise NotImplementedError('Not implement.')
name = convert_repvgg(name)
name = convert_bn(name)
elif k.startswith('yolo_head.'):
if ('anchor_points' in k) or ('stride_tensor' in k):
continue
if 'proj_conv' in k:
name = k.replace('yolo_head.proj_conv.',
'bbox_head.head_module.proj_conv.')
else:
for org_key, rep_key in [
[
'yolo_head.stem_cls.',
'bbox_head.head_module.cls_stems.'
],
[
'yolo_head.stem_reg.',
'bbox_head.head_module.reg_stems.'
],
[
'yolo_head.pred_cls.',
'bbox_head.head_module.cls_preds.'
],
[
'yolo_head.pred_reg.',
'bbox_head.head_module.reg_preds.'
]
]:
name = name.replace(org_key, rep_key)
name = name.split('.')
ind = name[3]
name[3] = str(2 - int(ind))
name = '.'.join(name)
name = convert_bn(name)
else:
continue
new_state_dict[name] = torch.from_numpy(v)
data = {'state_dict': new_state_dict}
torch.save(data, dst)
def main():
parser = argparse.ArgumentParser(description='Convert model keys')
parser.add_argument(
'--src',
default='ppyoloe_plus_crn_s_80e_coco.pdparams',
help='src ppyoloe model path')
parser.add_argument(
'--dst', default='mmppyoloe_plus_s.pt', help='save path')
parser.add_argument(
'--imagenet-pretrain',
action='store_true',
default=False,
help='Load model pretrained on imagenet dataset which only '
'have weight for backbone.')
args = parser.parse_args()
convert(args.src, args.dst, args.imagenet_pretrain)
if __name__ == '__main__':
main()
| 7,738 | 40.832432 | 78 | py |
mmyolo | mmyolo-main/tools/model_converters/convert_kd_ckpt_to_student.py | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
from pathlib import Path
import torch
def parse_args():
parser = argparse.ArgumentParser(
description='Convert KD checkpoint to student-only checkpoint')
parser.add_argument('checkpoint', help='input checkpoint filename')
parser.add_argument('--out-path', help='save checkpoint path')
parser.add_argument(
'--inplace', action='store_true', help='replace origin ckpt')
args = parser.parse_args()
return args
def main():
args = parse_args()
checkpoint = torch.load(args.checkpoint, map_location='cpu')
new_state_dict = dict()
new_meta = checkpoint['meta']
for key, value in checkpoint['state_dict'].items():
if key.startswith('architecture.'):
new_key = key.replace('architecture.', '')
new_state_dict[new_key] = value
checkpoint = dict()
checkpoint['meta'] = new_meta
checkpoint['state_dict'] = new_state_dict
if args.inplace:
torch.save(checkpoint, args.checkpoint)
else:
ckpt_path = Path(args.checkpoint)
ckpt_name = ckpt_path.stem
if args.out_path:
ckpt_dir = Path(args.out_path)
else:
ckpt_dir = ckpt_path.parent
new_ckpt_path = ckpt_dir / f'{ckpt_name}_student.pth'
torch.save(checkpoint, new_ckpt_path)
if __name__ == '__main__':
main()
| 1,412 | 27.836735 | 71 | py |
mmyolo | mmyolo-main/tools/model_converters/yolov7_to_mmyolo.py | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os.path as osp
from collections import OrderedDict
import torch
convert_dict_tiny = {
# stem
'model.0': 'backbone.stem.0',
'model.1': 'backbone.stem.1',
# stage1 TinyDownSampleBlock
'model.2': 'backbone.stage1.0.short_conv',
'model.3': 'backbone.stage1.0.main_convs.0',
'model.4': 'backbone.stage1.0.main_convs.1',
'model.5': 'backbone.stage1.0.main_convs.2',
'model.7': 'backbone.stage1.0.final_conv',
# stage2 TinyDownSampleBlock
'model.9': 'backbone.stage2.1.short_conv',
'model.10': 'backbone.stage2.1.main_convs.0',
'model.11': 'backbone.stage2.1.main_convs.1',
'model.12': 'backbone.stage2.1.main_convs.2',
'model.14': 'backbone.stage2.1.final_conv',
# stage3 TinyDownSampleBlock
'model.16': 'backbone.stage3.1.short_conv',
'model.17': 'backbone.stage3.1.main_convs.0',
'model.18': 'backbone.stage3.1.main_convs.1',
'model.19': 'backbone.stage3.1.main_convs.2',
'model.21': 'backbone.stage3.1.final_conv',
# stage4 TinyDownSampleBlock
'model.23': 'backbone.stage4.1.short_conv',
'model.24': 'backbone.stage4.1.main_convs.0',
'model.25': 'backbone.stage4.1.main_convs.1',
'model.26': 'backbone.stage4.1.main_convs.2',
'model.28': 'backbone.stage4.1.final_conv',
# neck SPPCSPBlock
'model.29': 'neck.reduce_layers.2.short_layer',
'model.30': 'neck.reduce_layers.2.main_layers',
'model.35': 'neck.reduce_layers.2.fuse_layers',
'model.37': 'neck.reduce_layers.2.final_conv',
'model.38': 'neck.upsample_layers.0.0',
'model.40': 'neck.reduce_layers.1',
'model.42': 'neck.top_down_layers.0.short_conv',
'model.43': 'neck.top_down_layers.0.main_convs.0',
'model.44': 'neck.top_down_layers.0.main_convs.1',
'model.45': 'neck.top_down_layers.0.main_convs.2',
'model.47': 'neck.top_down_layers.0.final_conv',
'model.48': 'neck.upsample_layers.1.0',
'model.50': 'neck.reduce_layers.0',
'model.52': 'neck.top_down_layers.1.short_conv',
'model.53': 'neck.top_down_layers.1.main_convs.0',
'model.54': 'neck.top_down_layers.1.main_convs.1',
'model.55': 'neck.top_down_layers.1.main_convs.2',
'model.57': 'neck.top_down_layers.1.final_conv',
'model.58': 'neck.downsample_layers.0',
'model.60': 'neck.bottom_up_layers.0.short_conv',
'model.61': 'neck.bottom_up_layers.0.main_convs.0',
'model.62': 'neck.bottom_up_layers.0.main_convs.1',
'model.63': 'neck.bottom_up_layers.0.main_convs.2',
'model.65': 'neck.bottom_up_layers.0.final_conv',
'model.66': 'neck.downsample_layers.1',
'model.68': 'neck.bottom_up_layers.1.short_conv',
'model.69': 'neck.bottom_up_layers.1.main_convs.0',
'model.70': 'neck.bottom_up_layers.1.main_convs.1',
'model.71': 'neck.bottom_up_layers.1.main_convs.2',
'model.73': 'neck.bottom_up_layers.1.final_conv',
'model.74': 'neck.out_layers.0',
'model.75': 'neck.out_layers.1',
'model.76': 'neck.out_layers.2',
# head
'model.77.m.0': 'bbox_head.head_module.convs_pred.0.1',
'model.77.m.1': 'bbox_head.head_module.convs_pred.1.1',
'model.77.m.2': 'bbox_head.head_module.convs_pred.2.1'
}
convert_dict_l = {
# stem
'model.0': 'backbone.stem.0',
'model.1': 'backbone.stem.1',
'model.2': 'backbone.stem.2',
# stage1
# ConvModule
'model.3': 'backbone.stage1.0',
# ELANBlock expand_channel_2x
'model.4': 'backbone.stage1.1.short_conv',
'model.5': 'backbone.stage1.1.main_conv',
'model.6': 'backbone.stage1.1.blocks.0.0',
'model.7': 'backbone.stage1.1.blocks.0.1',
'model.8': 'backbone.stage1.1.blocks.1.0',
'model.9': 'backbone.stage1.1.blocks.1.1',
'model.11': 'backbone.stage1.1.final_conv',
# stage2
# MaxPoolBlock reduce_channel_2x
'model.13': 'backbone.stage2.0.maxpool_branches.1',
'model.14': 'backbone.stage2.0.stride_conv_branches.0',
'model.15': 'backbone.stage2.0.stride_conv_branches.1',
# ELANBlock expand_channel_2x
'model.17': 'backbone.stage2.1.short_conv',
'model.18': 'backbone.stage2.1.main_conv',
'model.19': 'backbone.stage2.1.blocks.0.0',
'model.20': 'backbone.stage2.1.blocks.0.1',
'model.21': 'backbone.stage2.1.blocks.1.0',
'model.22': 'backbone.stage2.1.blocks.1.1',
'model.24': 'backbone.stage2.1.final_conv',
# stage3
# MaxPoolBlock reduce_channel_2x
'model.26': 'backbone.stage3.0.maxpool_branches.1',
'model.27': 'backbone.stage3.0.stride_conv_branches.0',
'model.28': 'backbone.stage3.0.stride_conv_branches.1',
# ELANBlock expand_channel_2x
'model.30': 'backbone.stage3.1.short_conv',
'model.31': 'backbone.stage3.1.main_conv',
'model.32': 'backbone.stage3.1.blocks.0.0',
'model.33': 'backbone.stage3.1.blocks.0.1',
'model.34': 'backbone.stage3.1.blocks.1.0',
'model.35': 'backbone.stage3.1.blocks.1.1',
'model.37': 'backbone.stage3.1.final_conv',
# stage4
# MaxPoolBlock reduce_channel_2x
'model.39': 'backbone.stage4.0.maxpool_branches.1',
'model.40': 'backbone.stage4.0.stride_conv_branches.0',
'model.41': 'backbone.stage4.0.stride_conv_branches.1',
# ELANBlock no_change_channel
'model.43': 'backbone.stage4.1.short_conv',
'model.44': 'backbone.stage4.1.main_conv',
'model.45': 'backbone.stage4.1.blocks.0.0',
'model.46': 'backbone.stage4.1.blocks.0.1',
'model.47': 'backbone.stage4.1.blocks.1.0',
'model.48': 'backbone.stage4.1.blocks.1.1',
'model.50': 'backbone.stage4.1.final_conv',
# neck SPPCSPBlock
'model.51.cv1': 'neck.reduce_layers.2.main_layers.0',
'model.51.cv3': 'neck.reduce_layers.2.main_layers.1',
'model.51.cv4': 'neck.reduce_layers.2.main_layers.2',
'model.51.cv5': 'neck.reduce_layers.2.fuse_layers.0',
'model.51.cv6': 'neck.reduce_layers.2.fuse_layers.1',
'model.51.cv2': 'neck.reduce_layers.2.short_layer',
'model.51.cv7': 'neck.reduce_layers.2.final_conv',
# neck
'model.52': 'neck.upsample_layers.0.0',
'model.54': 'neck.reduce_layers.1',
# neck ELANBlock reduce_channel_2x
'model.56': 'neck.top_down_layers.0.short_conv',
'model.57': 'neck.top_down_layers.0.main_conv',
'model.58': 'neck.top_down_layers.0.blocks.0',
'model.59': 'neck.top_down_layers.0.blocks.1',
'model.60': 'neck.top_down_layers.0.blocks.2',
'model.61': 'neck.top_down_layers.0.blocks.3',
'model.63': 'neck.top_down_layers.0.final_conv',
'model.64': 'neck.upsample_layers.1.0',
'model.66': 'neck.reduce_layers.0',
# neck ELANBlock reduce_channel_2x
'model.68': 'neck.top_down_layers.1.short_conv',
'model.69': 'neck.top_down_layers.1.main_conv',
'model.70': 'neck.top_down_layers.1.blocks.0',
'model.71': 'neck.top_down_layers.1.blocks.1',
'model.72': 'neck.top_down_layers.1.blocks.2',
'model.73': 'neck.top_down_layers.1.blocks.3',
'model.75': 'neck.top_down_layers.1.final_conv',
# neck MaxPoolBlock no_change_channel
'model.77': 'neck.downsample_layers.0.maxpool_branches.1',
'model.78': 'neck.downsample_layers.0.stride_conv_branches.0',
'model.79': 'neck.downsample_layers.0.stride_conv_branches.1',
# neck ELANBlock reduce_channel_2x
'model.81': 'neck.bottom_up_layers.0.short_conv',
'model.82': 'neck.bottom_up_layers.0.main_conv',
'model.83': 'neck.bottom_up_layers.0.blocks.0',
'model.84': 'neck.bottom_up_layers.0.blocks.1',
'model.85': 'neck.bottom_up_layers.0.blocks.2',
'model.86': 'neck.bottom_up_layers.0.blocks.3',
'model.88': 'neck.bottom_up_layers.0.final_conv',
# neck MaxPoolBlock no_change_channel
'model.90': 'neck.downsample_layers.1.maxpool_branches.1',
'model.91': 'neck.downsample_layers.1.stride_conv_branches.0',
'model.92': 'neck.downsample_layers.1.stride_conv_branches.1',
# neck ELANBlock reduce_channel_2x
'model.94': 'neck.bottom_up_layers.1.short_conv',
'model.95': 'neck.bottom_up_layers.1.main_conv',
'model.96': 'neck.bottom_up_layers.1.blocks.0',
'model.97': 'neck.bottom_up_layers.1.blocks.1',
'model.98': 'neck.bottom_up_layers.1.blocks.2',
'model.99': 'neck.bottom_up_layers.1.blocks.3',
'model.101': 'neck.bottom_up_layers.1.final_conv',
# RepVGGBlock
'model.102.rbr_dense.0': 'neck.out_layers.0.rbr_dense.conv',
'model.102.rbr_dense.1': 'neck.out_layers.0.rbr_dense.bn',
'model.102.rbr_1x1.0': 'neck.out_layers.0.rbr_1x1.conv',
'model.102.rbr_1x1.1': 'neck.out_layers.0.rbr_1x1.bn',
'model.103.rbr_dense.0': 'neck.out_layers.1.rbr_dense.conv',
'model.103.rbr_dense.1': 'neck.out_layers.1.rbr_dense.bn',
'model.103.rbr_1x1.0': 'neck.out_layers.1.rbr_1x1.conv',
'model.103.rbr_1x1.1': 'neck.out_layers.1.rbr_1x1.bn',
'model.104.rbr_dense.0': 'neck.out_layers.2.rbr_dense.conv',
'model.104.rbr_dense.1': 'neck.out_layers.2.rbr_dense.bn',
'model.104.rbr_1x1.0': 'neck.out_layers.2.rbr_1x1.conv',
'model.104.rbr_1x1.1': 'neck.out_layers.2.rbr_1x1.bn',
# head
'model.105.m.0': 'bbox_head.head_module.convs_pred.0.1',
'model.105.m.1': 'bbox_head.head_module.convs_pred.1.1',
'model.105.m.2': 'bbox_head.head_module.convs_pred.2.1'
}
convert_dict_x = {
# stem
'model.0': 'backbone.stem.0',
'model.1': 'backbone.stem.1',
'model.2': 'backbone.stem.2',
# stage1
# ConvModule
'model.3': 'backbone.stage1.0',
# ELANBlock expand_channel_2x
'model.4': 'backbone.stage1.1.short_conv',
'model.5': 'backbone.stage1.1.main_conv',
'model.6': 'backbone.stage1.1.blocks.0.0',
'model.7': 'backbone.stage1.1.blocks.0.1',
'model.8': 'backbone.stage1.1.blocks.1.0',
'model.9': 'backbone.stage1.1.blocks.1.1',
'model.10': 'backbone.stage1.1.blocks.2.0',
'model.11': 'backbone.stage1.1.blocks.2.1',
'model.13': 'backbone.stage1.1.final_conv',
# stage2
# MaxPoolBlock reduce_channel_2x
'model.15': 'backbone.stage2.0.maxpool_branches.1',
'model.16': 'backbone.stage2.0.stride_conv_branches.0',
'model.17': 'backbone.stage2.0.stride_conv_branches.1',
# ELANBlock expand_channel_2x
'model.19': 'backbone.stage2.1.short_conv',
'model.20': 'backbone.stage2.1.main_conv',
'model.21': 'backbone.stage2.1.blocks.0.0',
'model.22': 'backbone.stage2.1.blocks.0.1',
'model.23': 'backbone.stage2.1.blocks.1.0',
'model.24': 'backbone.stage2.1.blocks.1.1',
'model.25': 'backbone.stage2.1.blocks.2.0',
'model.26': 'backbone.stage2.1.blocks.2.1',
'model.28': 'backbone.stage2.1.final_conv',
# stage3
# MaxPoolBlock reduce_channel_2x
'model.30': 'backbone.stage3.0.maxpool_branches.1',
'model.31': 'backbone.stage3.0.stride_conv_branches.0',
'model.32': 'backbone.stage3.0.stride_conv_branches.1',
# ELANBlock expand_channel_2x
'model.34': 'backbone.stage3.1.short_conv',
'model.35': 'backbone.stage3.1.main_conv',
'model.36': 'backbone.stage3.1.blocks.0.0',
'model.37': 'backbone.stage3.1.blocks.0.1',
'model.38': 'backbone.stage3.1.blocks.1.0',
'model.39': 'backbone.stage3.1.blocks.1.1',
'model.40': 'backbone.stage3.1.blocks.2.0',
'model.41': 'backbone.stage3.1.blocks.2.1',
'model.43': 'backbone.stage3.1.final_conv',
# stage4
# MaxPoolBlock reduce_channel_2x
'model.45': 'backbone.stage4.0.maxpool_branches.1',
'model.46': 'backbone.stage4.0.stride_conv_branches.0',
'model.47': 'backbone.stage4.0.stride_conv_branches.1',
# ELANBlock no_change_channel
'model.49': 'backbone.stage4.1.short_conv',
'model.50': 'backbone.stage4.1.main_conv',
'model.51': 'backbone.stage4.1.blocks.0.0',
'model.52': 'backbone.stage4.1.blocks.0.1',
'model.53': 'backbone.stage4.1.blocks.1.0',
'model.54': 'backbone.stage4.1.blocks.1.1',
'model.55': 'backbone.stage4.1.blocks.2.0',
'model.56': 'backbone.stage4.1.blocks.2.1',
'model.58': 'backbone.stage4.1.final_conv',
# neck SPPCSPBlock
'model.59.cv1': 'neck.reduce_layers.2.main_layers.0',
'model.59.cv3': 'neck.reduce_layers.2.main_layers.1',
'model.59.cv4': 'neck.reduce_layers.2.main_layers.2',
'model.59.cv5': 'neck.reduce_layers.2.fuse_layers.0',
'model.59.cv6': 'neck.reduce_layers.2.fuse_layers.1',
'model.59.cv2': 'neck.reduce_layers.2.short_layer',
'model.59.cv7': 'neck.reduce_layers.2.final_conv',
# neck
'model.60': 'neck.upsample_layers.0.0',
'model.62': 'neck.reduce_layers.1',
# neck ELANBlock reduce_channel_2x
'model.64': 'neck.top_down_layers.0.short_conv',
'model.65': 'neck.top_down_layers.0.main_conv',
'model.66': 'neck.top_down_layers.0.blocks.0.0',
'model.67': 'neck.top_down_layers.0.blocks.0.1',
'model.68': 'neck.top_down_layers.0.blocks.1.0',
'model.69': 'neck.top_down_layers.0.blocks.1.1',
'model.70': 'neck.top_down_layers.0.blocks.2.0',
'model.71': 'neck.top_down_layers.0.blocks.2.1',
'model.73': 'neck.top_down_layers.0.final_conv',
'model.74': 'neck.upsample_layers.1.0',
'model.76': 'neck.reduce_layers.0',
# neck ELANBlock reduce_channel_2x
'model.78': 'neck.top_down_layers.1.short_conv',
'model.79': 'neck.top_down_layers.1.main_conv',
'model.80': 'neck.top_down_layers.1.blocks.0.0',
'model.81': 'neck.top_down_layers.1.blocks.0.1',
'model.82': 'neck.top_down_layers.1.blocks.1.0',
'model.83': 'neck.top_down_layers.1.blocks.1.1',
'model.84': 'neck.top_down_layers.1.blocks.2.0',
'model.85': 'neck.top_down_layers.1.blocks.2.1',
'model.87': 'neck.top_down_layers.1.final_conv',
# neck MaxPoolBlock no_change_channel
'model.89': 'neck.downsample_layers.0.maxpool_branches.1',
'model.90': 'neck.downsample_layers.0.stride_conv_branches.0',
'model.91': 'neck.downsample_layers.0.stride_conv_branches.1',
# neck ELANBlock reduce_channel_2x
'model.93': 'neck.bottom_up_layers.0.short_conv',
'model.94': 'neck.bottom_up_layers.0.main_conv',
'model.95': 'neck.bottom_up_layers.0.blocks.0.0',
'model.96': 'neck.bottom_up_layers.0.blocks.0.1',
'model.97': 'neck.bottom_up_layers.0.blocks.1.0',
'model.98': 'neck.bottom_up_layers.0.blocks.1.1',
'model.99': 'neck.bottom_up_layers.0.blocks.2.0',
'model.100': 'neck.bottom_up_layers.0.blocks.2.1',
'model.102': 'neck.bottom_up_layers.0.final_conv',
# neck MaxPoolBlock no_change_channel
'model.104': 'neck.downsample_layers.1.maxpool_branches.1',
'model.105': 'neck.downsample_layers.1.stride_conv_branches.0',
'model.106': 'neck.downsample_layers.1.stride_conv_branches.1',
# neck ELANBlock reduce_channel_2x
'model.108': 'neck.bottom_up_layers.1.short_conv',
'model.109': 'neck.bottom_up_layers.1.main_conv',
'model.110': 'neck.bottom_up_layers.1.blocks.0.0',
'model.111': 'neck.bottom_up_layers.1.blocks.0.1',
'model.112': 'neck.bottom_up_layers.1.blocks.1.0',
'model.113': 'neck.bottom_up_layers.1.blocks.1.1',
'model.114': 'neck.bottom_up_layers.1.blocks.2.0',
'model.115': 'neck.bottom_up_layers.1.blocks.2.1',
'model.117': 'neck.bottom_up_layers.1.final_conv',
# Conv
'model.118': 'neck.out_layers.0',
'model.119': 'neck.out_layers.1',
'model.120': 'neck.out_layers.2',
# head
'model.121.m.0': 'bbox_head.head_module.convs_pred.0.1',
'model.121.m.1': 'bbox_head.head_module.convs_pred.1.1',
'model.121.m.2': 'bbox_head.head_module.convs_pred.2.1'
}
convert_dict_w = {
# stem
'model.1': 'backbone.stem.conv',
# stage1
# ConvModule
'model.2': 'backbone.stage1.0',
# ELANBlock
'model.3': 'backbone.stage1.1.short_conv',
'model.4': 'backbone.stage1.1.main_conv',
'model.5': 'backbone.stage1.1.blocks.0.0',
'model.6': 'backbone.stage1.1.blocks.0.1',
'model.7': 'backbone.stage1.1.blocks.1.0',
'model.8': 'backbone.stage1.1.blocks.1.1',
'model.10': 'backbone.stage1.1.final_conv',
# stage2
'model.11': 'backbone.stage2.0',
# ELANBlock
'model.12': 'backbone.stage2.1.short_conv',
'model.13': 'backbone.stage2.1.main_conv',
'model.14': 'backbone.stage2.1.blocks.0.0',
'model.15': 'backbone.stage2.1.blocks.0.1',
'model.16': 'backbone.stage2.1.blocks.1.0',
'model.17': 'backbone.stage2.1.blocks.1.1',
'model.19': 'backbone.stage2.1.final_conv',
# stage3
'model.20': 'backbone.stage3.0',
# ELANBlock
'model.21': 'backbone.stage3.1.short_conv',
'model.22': 'backbone.stage3.1.main_conv',
'model.23': 'backbone.stage3.1.blocks.0.0',
'model.24': 'backbone.stage3.1.blocks.0.1',
'model.25': 'backbone.stage3.1.blocks.1.0',
'model.26': 'backbone.stage3.1.blocks.1.1',
'model.28': 'backbone.stage3.1.final_conv',
# stage4
'model.29': 'backbone.stage4.0',
# ELANBlock
'model.30': 'backbone.stage4.1.short_conv',
'model.31': 'backbone.stage4.1.main_conv',
'model.32': 'backbone.stage4.1.blocks.0.0',
'model.33': 'backbone.stage4.1.blocks.0.1',
'model.34': 'backbone.stage4.1.blocks.1.0',
'model.35': 'backbone.stage4.1.blocks.1.1',
'model.37': 'backbone.stage4.1.final_conv',
# stage5
'model.38': 'backbone.stage5.0',
# ELANBlock
'model.39': 'backbone.stage5.1.short_conv',
'model.40': 'backbone.stage5.1.main_conv',
'model.41': 'backbone.stage5.1.blocks.0.0',
'model.42': 'backbone.stage5.1.blocks.0.1',
'model.43': 'backbone.stage5.1.blocks.1.0',
'model.44': 'backbone.stage5.1.blocks.1.1',
'model.46': 'backbone.stage5.1.final_conv',
# neck SPPCSPBlock
'model.47.cv1': 'neck.reduce_layers.3.main_layers.0',
'model.47.cv3': 'neck.reduce_layers.3.main_layers.1',
'model.47.cv4': 'neck.reduce_layers.3.main_layers.2',
'model.47.cv5': 'neck.reduce_layers.3.fuse_layers.0',
'model.47.cv6': 'neck.reduce_layers.3.fuse_layers.1',
'model.47.cv2': 'neck.reduce_layers.3.short_layer',
'model.47.cv7': 'neck.reduce_layers.3.final_conv',
# neck
'model.48': 'neck.upsample_layers.0.0',
'model.50': 'neck.reduce_layers.2',
# neck ELANBlock
'model.52': 'neck.top_down_layers.0.short_conv',
'model.53': 'neck.top_down_layers.0.main_conv',
'model.54': 'neck.top_down_layers.0.blocks.0',
'model.55': 'neck.top_down_layers.0.blocks.1',
'model.56': 'neck.top_down_layers.0.blocks.2',
'model.57': 'neck.top_down_layers.0.blocks.3',
'model.59': 'neck.top_down_layers.0.final_conv',
'model.60': 'neck.upsample_layers.1.0',
'model.62': 'neck.reduce_layers.1',
# neck ELANBlock reduce_channel_2x
'model.64': 'neck.top_down_layers.1.short_conv',
'model.65': 'neck.top_down_layers.1.main_conv',
'model.66': 'neck.top_down_layers.1.blocks.0',
'model.67': 'neck.top_down_layers.1.blocks.1',
'model.68': 'neck.top_down_layers.1.blocks.2',
'model.69': 'neck.top_down_layers.1.blocks.3',
'model.71': 'neck.top_down_layers.1.final_conv',
'model.72': 'neck.upsample_layers.2.0',
'model.74': 'neck.reduce_layers.0',
'model.76': 'neck.top_down_layers.2.short_conv',
'model.77': 'neck.top_down_layers.2.main_conv',
'model.78': 'neck.top_down_layers.2.blocks.0',
'model.79': 'neck.top_down_layers.2.blocks.1',
'model.80': 'neck.top_down_layers.2.blocks.2',
'model.81': 'neck.top_down_layers.2.blocks.3',
'model.83': 'neck.top_down_layers.2.final_conv',
'model.84': 'neck.downsample_layers.0',
# neck ELANBlock
'model.86': 'neck.bottom_up_layers.0.short_conv',
'model.87': 'neck.bottom_up_layers.0.main_conv',
'model.88': 'neck.bottom_up_layers.0.blocks.0',
'model.89': 'neck.bottom_up_layers.0.blocks.1',
'model.90': 'neck.bottom_up_layers.0.blocks.2',
'model.91': 'neck.bottom_up_layers.0.blocks.3',
'model.93': 'neck.bottom_up_layers.0.final_conv',
'model.94': 'neck.downsample_layers.1',
# neck ELANBlock reduce_channel_2x
'model.96': 'neck.bottom_up_layers.1.short_conv',
'model.97': 'neck.bottom_up_layers.1.main_conv',
'model.98': 'neck.bottom_up_layers.1.blocks.0',
'model.99': 'neck.bottom_up_layers.1.blocks.1',
'model.100': 'neck.bottom_up_layers.1.blocks.2',
'model.101': 'neck.bottom_up_layers.1.blocks.3',
'model.103': 'neck.bottom_up_layers.1.final_conv',
'model.104': 'neck.downsample_layers.2',
# neck ELANBlock reduce_channel_2x
'model.106': 'neck.bottom_up_layers.2.short_conv',
'model.107': 'neck.bottom_up_layers.2.main_conv',
'model.108': 'neck.bottom_up_layers.2.blocks.0',
'model.109': 'neck.bottom_up_layers.2.blocks.1',
'model.110': 'neck.bottom_up_layers.2.blocks.2',
'model.111': 'neck.bottom_up_layers.2.blocks.3',
'model.113': 'neck.bottom_up_layers.2.final_conv',
'model.114': 'bbox_head.head_module.main_convs_pred.0.0',
'model.115': 'bbox_head.head_module.main_convs_pred.1.0',
'model.116': 'bbox_head.head_module.main_convs_pred.2.0',
'model.117': 'bbox_head.head_module.main_convs_pred.3.0',
# head
'model.118.m.0': 'bbox_head.head_module.main_convs_pred.0.2',
'model.118.m.1': 'bbox_head.head_module.main_convs_pred.1.2',
'model.118.m.2': 'bbox_head.head_module.main_convs_pred.2.2',
'model.118.m.3': 'bbox_head.head_module.main_convs_pred.3.2'
}
convert_dict_e = {
# stem
'model.1': 'backbone.stem.conv',
# stage1
'model.2.cv1': 'backbone.stage1.0.stride_conv_branches.0',
'model.2.cv2': 'backbone.stage1.0.stride_conv_branches.1',
'model.2.cv3': 'backbone.stage1.0.maxpool_branches.1',
# ELANBlock
'model.3': 'backbone.stage1.1.short_conv',
'model.4': 'backbone.stage1.1.main_conv',
'model.5': 'backbone.stage1.1.blocks.0.0',
'model.6': 'backbone.stage1.1.blocks.0.1',
'model.7': 'backbone.stage1.1.blocks.1.0',
'model.8': 'backbone.stage1.1.blocks.1.1',
'model.9': 'backbone.stage1.1.blocks.2.0',
'model.10': 'backbone.stage1.1.blocks.2.1',
'model.12': 'backbone.stage1.1.final_conv',
# stage2
'model.13.cv1': 'backbone.stage2.0.stride_conv_branches.0',
'model.13.cv2': 'backbone.stage2.0.stride_conv_branches.1',
'model.13.cv3': 'backbone.stage2.0.maxpool_branches.1',
# ELANBlock
'model.14': 'backbone.stage2.1.short_conv',
'model.15': 'backbone.stage2.1.main_conv',
'model.16': 'backbone.stage2.1.blocks.0.0',
'model.17': 'backbone.stage2.1.blocks.0.1',
'model.18': 'backbone.stage2.1.blocks.1.0',
'model.19': 'backbone.stage2.1.blocks.1.1',
'model.20': 'backbone.stage2.1.blocks.2.0',
'model.21': 'backbone.stage2.1.blocks.2.1',
'model.23': 'backbone.stage2.1.final_conv',
# stage3
'model.24.cv1': 'backbone.stage3.0.stride_conv_branches.0',
'model.24.cv2': 'backbone.stage3.0.stride_conv_branches.1',
'model.24.cv3': 'backbone.stage3.0.maxpool_branches.1',
# ELANBlock
'model.25': 'backbone.stage3.1.short_conv',
'model.26': 'backbone.stage3.1.main_conv',
'model.27': 'backbone.stage3.1.blocks.0.0',
'model.28': 'backbone.stage3.1.blocks.0.1',
'model.29': 'backbone.stage3.1.blocks.1.0',
'model.30': 'backbone.stage3.1.blocks.1.1',
'model.31': 'backbone.stage3.1.blocks.2.0',
'model.32': 'backbone.stage3.1.blocks.2.1',
'model.34': 'backbone.stage3.1.final_conv',
# stage4
'model.35.cv1': 'backbone.stage4.0.stride_conv_branches.0',
'model.35.cv2': 'backbone.stage4.0.stride_conv_branches.1',
'model.35.cv3': 'backbone.stage4.0.maxpool_branches.1',
# ELANBlock
'model.36': 'backbone.stage4.1.short_conv',
'model.37': 'backbone.stage4.1.main_conv',
'model.38': 'backbone.stage4.1.blocks.0.0',
'model.39': 'backbone.stage4.1.blocks.0.1',
'model.40': 'backbone.stage4.1.blocks.1.0',
'model.41': 'backbone.stage4.1.blocks.1.1',
'model.42': 'backbone.stage4.1.blocks.2.0',
'model.43': 'backbone.stage4.1.blocks.2.1',
'model.45': 'backbone.stage4.1.final_conv',
# stage5
'model.46.cv1': 'backbone.stage5.0.stride_conv_branches.0',
'model.46.cv2': 'backbone.stage5.0.stride_conv_branches.1',
'model.46.cv3': 'backbone.stage5.0.maxpool_branches.1',
# ELANBlock
'model.47': 'backbone.stage5.1.short_conv',
'model.48': 'backbone.stage5.1.main_conv',
'model.49': 'backbone.stage5.1.blocks.0.0',
'model.50': 'backbone.stage5.1.blocks.0.1',
'model.51': 'backbone.stage5.1.blocks.1.0',
'model.52': 'backbone.stage5.1.blocks.1.1',
'model.53': 'backbone.stage5.1.blocks.2.0',
'model.54': 'backbone.stage5.1.blocks.2.1',
'model.56': 'backbone.stage5.1.final_conv',
# neck SPPCSPBlock
'model.57.cv1': 'neck.reduce_layers.3.main_layers.0',
'model.57.cv3': 'neck.reduce_layers.3.main_layers.1',
'model.57.cv4': 'neck.reduce_layers.3.main_layers.2',
'model.57.cv5': 'neck.reduce_layers.3.fuse_layers.0',
'model.57.cv6': 'neck.reduce_layers.3.fuse_layers.1',
'model.57.cv2': 'neck.reduce_layers.3.short_layer',
'model.57.cv7': 'neck.reduce_layers.3.final_conv',
# neck
'model.58': 'neck.upsample_layers.0.0',
'model.60': 'neck.reduce_layers.2',
# neck ELANBlock
'model.62': 'neck.top_down_layers.0.short_conv',
'model.63': 'neck.top_down_layers.0.main_conv',
'model.64': 'neck.top_down_layers.0.blocks.0',
'model.65': 'neck.top_down_layers.0.blocks.1',
'model.66': 'neck.top_down_layers.0.blocks.2',
'model.67': 'neck.top_down_layers.0.blocks.3',
'model.68': 'neck.top_down_layers.0.blocks.4',
'model.69': 'neck.top_down_layers.0.blocks.5',
'model.71': 'neck.top_down_layers.0.final_conv',
'model.72': 'neck.upsample_layers.1.0',
'model.74': 'neck.reduce_layers.1',
# neck ELANBlock
'model.76': 'neck.top_down_layers.1.short_conv',
'model.77': 'neck.top_down_layers.1.main_conv',
'model.78': 'neck.top_down_layers.1.blocks.0',
'model.79': 'neck.top_down_layers.1.blocks.1',
'model.80': 'neck.top_down_layers.1.blocks.2',
'model.81': 'neck.top_down_layers.1.blocks.3',
'model.82': 'neck.top_down_layers.1.blocks.4',
'model.83': 'neck.top_down_layers.1.blocks.5',
'model.85': 'neck.top_down_layers.1.final_conv',
'model.86': 'neck.upsample_layers.2.0',
'model.88': 'neck.reduce_layers.0',
'model.90': 'neck.top_down_layers.2.short_conv',
'model.91': 'neck.top_down_layers.2.main_conv',
'model.92': 'neck.top_down_layers.2.blocks.0',
'model.93': 'neck.top_down_layers.2.blocks.1',
'model.94': 'neck.top_down_layers.2.blocks.2',
'model.95': 'neck.top_down_layers.2.blocks.3',
'model.96': 'neck.top_down_layers.2.blocks.4',
'model.97': 'neck.top_down_layers.2.blocks.5',
'model.99': 'neck.top_down_layers.2.final_conv',
'model.100.cv1': 'neck.downsample_layers.0.stride_conv_branches.0',
'model.100.cv2': 'neck.downsample_layers.0.stride_conv_branches.1',
'model.100.cv3': 'neck.downsample_layers.0.maxpool_branches.1',
# neck ELANBlock
'model.102': 'neck.bottom_up_layers.0.short_conv',
'model.103': 'neck.bottom_up_layers.0.main_conv',
'model.104': 'neck.bottom_up_layers.0.blocks.0',
'model.105': 'neck.bottom_up_layers.0.blocks.1',
'model.106': 'neck.bottom_up_layers.0.blocks.2',
'model.107': 'neck.bottom_up_layers.0.blocks.3',
'model.108': 'neck.bottom_up_layers.0.blocks.4',
'model.109': 'neck.bottom_up_layers.0.blocks.5',
'model.111': 'neck.bottom_up_layers.0.final_conv',
'model.112.cv1': 'neck.downsample_layers.1.stride_conv_branches.0',
'model.112.cv2': 'neck.downsample_layers.1.stride_conv_branches.1',
'model.112.cv3': 'neck.downsample_layers.1.maxpool_branches.1',
# neck ELANBlock
'model.114': 'neck.bottom_up_layers.1.short_conv',
'model.115': 'neck.bottom_up_layers.1.main_conv',
'model.116': 'neck.bottom_up_layers.1.blocks.0',
'model.117': 'neck.bottom_up_layers.1.blocks.1',
'model.118': 'neck.bottom_up_layers.1.blocks.2',
'model.119': 'neck.bottom_up_layers.1.blocks.3',
'model.120': 'neck.bottom_up_layers.1.blocks.4',
'model.121': 'neck.bottom_up_layers.1.blocks.5',
'model.123': 'neck.bottom_up_layers.1.final_conv',
'model.124.cv1': 'neck.downsample_layers.2.stride_conv_branches.0',
'model.124.cv2': 'neck.downsample_layers.2.stride_conv_branches.1',
'model.124.cv3': 'neck.downsample_layers.2.maxpool_branches.1',
# neck ELANBlock
'model.126': 'neck.bottom_up_layers.2.short_conv',
'model.127': 'neck.bottom_up_layers.2.main_conv',
'model.128': 'neck.bottom_up_layers.2.blocks.0',
'model.129': 'neck.bottom_up_layers.2.blocks.1',
'model.130': 'neck.bottom_up_layers.2.blocks.2',
'model.131': 'neck.bottom_up_layers.2.blocks.3',
'model.132': 'neck.bottom_up_layers.2.blocks.4',
'model.133': 'neck.bottom_up_layers.2.blocks.5',
'model.135': 'neck.bottom_up_layers.2.final_conv',
'model.136': 'bbox_head.head_module.main_convs_pred.0.0',
'model.137': 'bbox_head.head_module.main_convs_pred.1.0',
'model.138': 'bbox_head.head_module.main_convs_pred.2.0',
'model.139': 'bbox_head.head_module.main_convs_pred.3.0',
# head
'model.140.m.0': 'bbox_head.head_module.main_convs_pred.0.2',
'model.140.m.1': 'bbox_head.head_module.main_convs_pred.1.2',
'model.140.m.2': 'bbox_head.head_module.main_convs_pred.2.2',
'model.140.m.3': 'bbox_head.head_module.main_convs_pred.3.2'
}
convert_dict_e2e = {
# stem
'model.1': 'backbone.stem.conv',
# stage1
'model.2.cv1': 'backbone.stage1.0.stride_conv_branches.0',
'model.2.cv2': 'backbone.stage1.0.stride_conv_branches.1',
'model.2.cv3': 'backbone.stage1.0.maxpool_branches.1',
# E-ELANBlock
'model.3': 'backbone.stage1.1.e_elan_blocks.0.short_conv',
'model.4': 'backbone.stage1.1.e_elan_blocks.0.main_conv',
'model.5': 'backbone.stage1.1.e_elan_blocks.0.blocks.0.0',
'model.6': 'backbone.stage1.1.e_elan_blocks.0.blocks.0.1',
'model.7': 'backbone.stage1.1.e_elan_blocks.0.blocks.1.0',
'model.8': 'backbone.stage1.1.e_elan_blocks.0.blocks.1.1',
'model.9': 'backbone.stage1.1.e_elan_blocks.0.blocks.2.0',
'model.10': 'backbone.stage1.1.e_elan_blocks.0.blocks.2.1',
'model.12': 'backbone.stage1.1.e_elan_blocks.0.final_conv',
'model.13': 'backbone.stage1.1.e_elan_blocks.1.short_conv',
'model.14': 'backbone.stage1.1.e_elan_blocks.1.main_conv',
'model.15': 'backbone.stage1.1.e_elan_blocks.1.blocks.0.0',
'model.16': 'backbone.stage1.1.e_elan_blocks.1.blocks.0.1',
'model.17': 'backbone.stage1.1.e_elan_blocks.1.blocks.1.0',
'model.18': 'backbone.stage1.1.e_elan_blocks.1.blocks.1.1',
'model.19': 'backbone.stage1.1.e_elan_blocks.1.blocks.2.0',
'model.20': 'backbone.stage1.1.e_elan_blocks.1.blocks.2.1',
'model.22': 'backbone.stage1.1.e_elan_blocks.1.final_conv',
# stage2
'model.24.cv1': 'backbone.stage2.0.stride_conv_branches.0',
'model.24.cv2': 'backbone.stage2.0.stride_conv_branches.1',
'model.24.cv3': 'backbone.stage2.0.maxpool_branches.1',
# E-ELANBlock
'model.25': 'backbone.stage2.1.e_elan_blocks.0.short_conv',
'model.26': 'backbone.stage2.1.e_elan_blocks.0.main_conv',
'model.27': 'backbone.stage2.1.e_elan_blocks.0.blocks.0.0',
'model.28': 'backbone.stage2.1.e_elan_blocks.0.blocks.0.1',
'model.29': 'backbone.stage2.1.e_elan_blocks.0.blocks.1.0',
'model.30': 'backbone.stage2.1.e_elan_blocks.0.blocks.1.1',
'model.31': 'backbone.stage2.1.e_elan_blocks.0.blocks.2.0',
'model.32': 'backbone.stage2.1.e_elan_blocks.0.blocks.2.1',
'model.34': 'backbone.stage2.1.e_elan_blocks.0.final_conv',
'model.35': 'backbone.stage2.1.e_elan_blocks.1.short_conv',
'model.36': 'backbone.stage2.1.e_elan_blocks.1.main_conv',
'model.37': 'backbone.stage2.1.e_elan_blocks.1.blocks.0.0',
'model.38': 'backbone.stage2.1.e_elan_blocks.1.blocks.0.1',
'model.39': 'backbone.stage2.1.e_elan_blocks.1.blocks.1.0',
'model.40': 'backbone.stage2.1.e_elan_blocks.1.blocks.1.1',
'model.41': 'backbone.stage2.1.e_elan_blocks.1.blocks.2.0',
'model.42': 'backbone.stage2.1.e_elan_blocks.1.blocks.2.1',
'model.44': 'backbone.stage2.1.e_elan_blocks.1.final_conv',
# stage3
'model.46.cv1': 'backbone.stage3.0.stride_conv_branches.0',
'model.46.cv2': 'backbone.stage3.0.stride_conv_branches.1',
'model.46.cv3': 'backbone.stage3.0.maxpool_branches.1',
# E-ELANBlock
'model.47': 'backbone.stage3.1.e_elan_blocks.0.short_conv',
'model.48': 'backbone.stage3.1.e_elan_blocks.0.main_conv',
'model.49': 'backbone.stage3.1.e_elan_blocks.0.blocks.0.0',
'model.50': 'backbone.stage3.1.e_elan_blocks.0.blocks.0.1',
'model.51': 'backbone.stage3.1.e_elan_blocks.0.blocks.1.0',
'model.52': 'backbone.stage3.1.e_elan_blocks.0.blocks.1.1',
'model.53': 'backbone.stage3.1.e_elan_blocks.0.blocks.2.0',
'model.54': 'backbone.stage3.1.e_elan_blocks.0.blocks.2.1',
'model.56': 'backbone.stage3.1.e_elan_blocks.0.final_conv',
'model.57': 'backbone.stage3.1.e_elan_blocks.1.short_conv',
'model.58': 'backbone.stage3.1.e_elan_blocks.1.main_conv',
'model.59': 'backbone.stage3.1.e_elan_blocks.1.blocks.0.0',
'model.60': 'backbone.stage3.1.e_elan_blocks.1.blocks.0.1',
'model.61': 'backbone.stage3.1.e_elan_blocks.1.blocks.1.0',
'model.62': 'backbone.stage3.1.e_elan_blocks.1.blocks.1.1',
'model.63': 'backbone.stage3.1.e_elan_blocks.1.blocks.2.0',
'model.64': 'backbone.stage3.1.e_elan_blocks.1.blocks.2.1',
'model.66': 'backbone.stage3.1.e_elan_blocks.1.final_conv',
# stage4
'model.68.cv1': 'backbone.stage4.0.stride_conv_branches.0',
'model.68.cv2': 'backbone.stage4.0.stride_conv_branches.1',
'model.68.cv3': 'backbone.stage4.0.maxpool_branches.1',
# E-ELANBlock
'model.69': 'backbone.stage4.1.e_elan_blocks.0.short_conv',
'model.70': 'backbone.stage4.1.e_elan_blocks.0.main_conv',
'model.71': 'backbone.stage4.1.e_elan_blocks.0.blocks.0.0',
'model.72': 'backbone.stage4.1.e_elan_blocks.0.blocks.0.1',
'model.73': 'backbone.stage4.1.e_elan_blocks.0.blocks.1.0',
'model.74': 'backbone.stage4.1.e_elan_blocks.0.blocks.1.1',
'model.75': 'backbone.stage4.1.e_elan_blocks.0.blocks.2.0',
'model.76': 'backbone.stage4.1.e_elan_blocks.0.blocks.2.1',
'model.78': 'backbone.stage4.1.e_elan_blocks.0.final_conv',
'model.79': 'backbone.stage4.1.e_elan_blocks.1.short_conv',
'model.80': 'backbone.stage4.1.e_elan_blocks.1.main_conv',
'model.81': 'backbone.stage4.1.e_elan_blocks.1.blocks.0.0',
'model.82': 'backbone.stage4.1.e_elan_blocks.1.blocks.0.1',
'model.83': 'backbone.stage4.1.e_elan_blocks.1.blocks.1.0',
'model.84': 'backbone.stage4.1.e_elan_blocks.1.blocks.1.1',
'model.85': 'backbone.stage4.1.e_elan_blocks.1.blocks.2.0',
'model.86': 'backbone.stage4.1.e_elan_blocks.1.blocks.2.1',
'model.88': 'backbone.stage4.1.e_elan_blocks.1.final_conv',
# stage5
'model.90.cv1': 'backbone.stage5.0.stride_conv_branches.0',
'model.90.cv2': 'backbone.stage5.0.stride_conv_branches.1',
'model.90.cv3': 'backbone.stage5.0.maxpool_branches.1',
# E-ELANBlock
'model.91': 'backbone.stage5.1.e_elan_blocks.0.short_conv',
'model.92': 'backbone.stage5.1.e_elan_blocks.0.main_conv',
'model.93': 'backbone.stage5.1.e_elan_blocks.0.blocks.0.0',
'model.94': 'backbone.stage5.1.e_elan_blocks.0.blocks.0.1',
'model.95': 'backbone.stage5.1.e_elan_blocks.0.blocks.1.0',
'model.96': 'backbone.stage5.1.e_elan_blocks.0.blocks.1.1',
'model.97': 'backbone.stage5.1.e_elan_blocks.0.blocks.2.0',
'model.98': 'backbone.stage5.1.e_elan_blocks.0.blocks.2.1',
'model.100': 'backbone.stage5.1.e_elan_blocks.0.final_conv',
'model.101': 'backbone.stage5.1.e_elan_blocks.1.short_conv',
'model.102': 'backbone.stage5.1.e_elan_blocks.1.main_conv',
'model.103': 'backbone.stage5.1.e_elan_blocks.1.blocks.0.0',
'model.104': 'backbone.stage5.1.e_elan_blocks.1.blocks.0.1',
'model.105': 'backbone.stage5.1.e_elan_blocks.1.blocks.1.0',
'model.106': 'backbone.stage5.1.e_elan_blocks.1.blocks.1.1',
'model.107': 'backbone.stage5.1.e_elan_blocks.1.blocks.2.0',
'model.108': 'backbone.stage5.1.e_elan_blocks.1.blocks.2.1',
'model.110': 'backbone.stage5.1.e_elan_blocks.1.final_conv',
# neck SPPCSPBlock
'model.112.cv1': 'neck.reduce_layers.3.main_layers.0',
'model.112.cv3': 'neck.reduce_layers.3.main_layers.1',
'model.112.cv4': 'neck.reduce_layers.3.main_layers.2',
'model.112.cv5': 'neck.reduce_layers.3.fuse_layers.0',
'model.112.cv6': 'neck.reduce_layers.3.fuse_layers.1',
'model.112.cv2': 'neck.reduce_layers.3.short_layer',
'model.112.cv7': 'neck.reduce_layers.3.final_conv',
# neck
'model.113': 'neck.upsample_layers.0.0',
'model.115': 'neck.reduce_layers.2',
# neck E-ELANBlock
'model.117': 'neck.top_down_layers.0.e_elan_blocks.0.short_conv',
'model.118': 'neck.top_down_layers.0.e_elan_blocks.0.main_conv',
'model.119': 'neck.top_down_layers.0.e_elan_blocks.0.blocks.0',
'model.120': 'neck.top_down_layers.0.e_elan_blocks.0.blocks.1',
'model.121': 'neck.top_down_layers.0.e_elan_blocks.0.blocks.2',
'model.122': 'neck.top_down_layers.0.e_elan_blocks.0.blocks.3',
'model.123': 'neck.top_down_layers.0.e_elan_blocks.0.blocks.4',
'model.124': 'neck.top_down_layers.0.e_elan_blocks.0.blocks.5',
'model.126': 'neck.top_down_layers.0.e_elan_blocks.0.final_conv',
'model.127': 'neck.top_down_layers.0.e_elan_blocks.1.short_conv',
'model.128': 'neck.top_down_layers.0.e_elan_blocks.1.main_conv',
'model.129': 'neck.top_down_layers.0.e_elan_blocks.1.blocks.0',
'model.130': 'neck.top_down_layers.0.e_elan_blocks.1.blocks.1',
'model.131': 'neck.top_down_layers.0.e_elan_blocks.1.blocks.2',
'model.132': 'neck.top_down_layers.0.e_elan_blocks.1.blocks.3',
'model.133': 'neck.top_down_layers.0.e_elan_blocks.1.blocks.4',
'model.134': 'neck.top_down_layers.0.e_elan_blocks.1.blocks.5',
'model.136': 'neck.top_down_layers.0.e_elan_blocks.1.final_conv',
'model.138': 'neck.upsample_layers.1.0',
'model.140': 'neck.reduce_layers.1',
# neck E-ELANBlock
'model.142': 'neck.top_down_layers.1.e_elan_blocks.0.short_conv',
'model.143': 'neck.top_down_layers.1.e_elan_blocks.0.main_conv',
'model.144': 'neck.top_down_layers.1.e_elan_blocks.0.blocks.0',
'model.145': 'neck.top_down_layers.1.e_elan_blocks.0.blocks.1',
'model.146': 'neck.top_down_layers.1.e_elan_blocks.0.blocks.2',
'model.147': 'neck.top_down_layers.1.e_elan_blocks.0.blocks.3',
'model.148': 'neck.top_down_layers.1.e_elan_blocks.0.blocks.4',
'model.149': 'neck.top_down_layers.1.e_elan_blocks.0.blocks.5',
'model.151': 'neck.top_down_layers.1.e_elan_blocks.0.final_conv',
'model.152': 'neck.top_down_layers.1.e_elan_blocks.1.short_conv',
'model.153': 'neck.top_down_layers.1.e_elan_blocks.1.main_conv',
'model.154': 'neck.top_down_layers.1.e_elan_blocks.1.blocks.0',
'model.155': 'neck.top_down_layers.1.e_elan_blocks.1.blocks.1',
'model.156': 'neck.top_down_layers.1.e_elan_blocks.1.blocks.2',
'model.157': 'neck.top_down_layers.1.e_elan_blocks.1.blocks.3',
'model.158': 'neck.top_down_layers.1.e_elan_blocks.1.blocks.4',
'model.159': 'neck.top_down_layers.1.e_elan_blocks.1.blocks.5',
'model.161': 'neck.top_down_layers.1.e_elan_blocks.1.final_conv',
'model.163': 'neck.upsample_layers.2.0',
'model.165': 'neck.reduce_layers.0',
'model.167': 'neck.top_down_layers.2.e_elan_blocks.0.short_conv',
'model.168': 'neck.top_down_layers.2.e_elan_blocks.0.main_conv',
'model.169': 'neck.top_down_layers.2.e_elan_blocks.0.blocks.0',
'model.170': 'neck.top_down_layers.2.e_elan_blocks.0.blocks.1',
'model.171': 'neck.top_down_layers.2.e_elan_blocks.0.blocks.2',
'model.172': 'neck.top_down_layers.2.e_elan_blocks.0.blocks.3',
'model.173': 'neck.top_down_layers.2.e_elan_blocks.0.blocks.4',
'model.174': 'neck.top_down_layers.2.e_elan_blocks.0.blocks.5',
'model.176': 'neck.top_down_layers.2.e_elan_blocks.0.final_conv',
'model.177': 'neck.top_down_layers.2.e_elan_blocks.1.short_conv',
'model.178': 'neck.top_down_layers.2.e_elan_blocks.1.main_conv',
'model.179': 'neck.top_down_layers.2.e_elan_blocks.1.blocks.0',
'model.180': 'neck.top_down_layers.2.e_elan_blocks.1.blocks.1',
'model.181': 'neck.top_down_layers.2.e_elan_blocks.1.blocks.2',
'model.182': 'neck.top_down_layers.2.e_elan_blocks.1.blocks.3',
'model.183': 'neck.top_down_layers.2.e_elan_blocks.1.blocks.4',
'model.184': 'neck.top_down_layers.2.e_elan_blocks.1.blocks.5',
'model.186': 'neck.top_down_layers.2.e_elan_blocks.1.final_conv',
'model.188.cv1': 'neck.downsample_layers.0.stride_conv_branches.0',
'model.188.cv2': 'neck.downsample_layers.0.stride_conv_branches.1',
'model.188.cv3': 'neck.downsample_layers.0.maxpool_branches.1',
# neck E-ELANBlock
'model.190': 'neck.bottom_up_layers.0.e_elan_blocks.0.short_conv',
'model.191': 'neck.bottom_up_layers.0.e_elan_blocks.0.main_conv',
'model.192': 'neck.bottom_up_layers.0.e_elan_blocks.0.blocks.0',
'model.193': 'neck.bottom_up_layers.0.e_elan_blocks.0.blocks.1',
'model.194': 'neck.bottom_up_layers.0.e_elan_blocks.0.blocks.2',
'model.195': 'neck.bottom_up_layers.0.e_elan_blocks.0.blocks.3',
'model.196': 'neck.bottom_up_layers.0.e_elan_blocks.0.blocks.4',
'model.197': 'neck.bottom_up_layers.0.e_elan_blocks.0.blocks.5',
'model.199': 'neck.bottom_up_layers.0.e_elan_blocks.0.final_conv',
'model.200': 'neck.bottom_up_layers.0.e_elan_blocks.1.short_conv',
'model.201': 'neck.bottom_up_layers.0.e_elan_blocks.1.main_conv',
'model.202': 'neck.bottom_up_layers.0.e_elan_blocks.1.blocks.0',
'model.203': 'neck.bottom_up_layers.0.e_elan_blocks.1.blocks.1',
'model.204': 'neck.bottom_up_layers.0.e_elan_blocks.1.blocks.2',
'model.205': 'neck.bottom_up_layers.0.e_elan_blocks.1.blocks.3',
'model.206': 'neck.bottom_up_layers.0.e_elan_blocks.1.blocks.4',
'model.207': 'neck.bottom_up_layers.0.e_elan_blocks.1.blocks.5',
'model.209': 'neck.bottom_up_layers.0.e_elan_blocks.1.final_conv',
'model.211.cv1': 'neck.downsample_layers.1.stride_conv_branches.0',
'model.211.cv2': 'neck.downsample_layers.1.stride_conv_branches.1',
'model.211.cv3': 'neck.downsample_layers.1.maxpool_branches.1',
'model.213': 'neck.bottom_up_layers.1.e_elan_blocks.0.short_conv',
'model.214': 'neck.bottom_up_layers.1.e_elan_blocks.0.main_conv',
'model.215': 'neck.bottom_up_layers.1.e_elan_blocks.0.blocks.0',
'model.216': 'neck.bottom_up_layers.1.e_elan_blocks.0.blocks.1',
'model.217': 'neck.bottom_up_layers.1.e_elan_blocks.0.blocks.2',
'model.218': 'neck.bottom_up_layers.1.e_elan_blocks.0.blocks.3',
'model.219': 'neck.bottom_up_layers.1.e_elan_blocks.0.blocks.4',
'model.220': 'neck.bottom_up_layers.1.e_elan_blocks.0.blocks.5',
'model.222': 'neck.bottom_up_layers.1.e_elan_blocks.0.final_conv',
'model.223': 'neck.bottom_up_layers.1.e_elan_blocks.1.short_conv',
'model.224': 'neck.bottom_up_layers.1.e_elan_blocks.1.main_conv',
'model.225': 'neck.bottom_up_layers.1.e_elan_blocks.1.blocks.0',
'model.226': 'neck.bottom_up_layers.1.e_elan_blocks.1.blocks.1',
'model.227': 'neck.bottom_up_layers.1.e_elan_blocks.1.blocks.2',
'model.228': 'neck.bottom_up_layers.1.e_elan_blocks.1.blocks.3',
'model.229': 'neck.bottom_up_layers.1.e_elan_blocks.1.blocks.4',
'model.230': 'neck.bottom_up_layers.1.e_elan_blocks.1.blocks.5',
'model.232': 'neck.bottom_up_layers.1.e_elan_blocks.1.final_conv',
'model.234.cv1': 'neck.downsample_layers.2.stride_conv_branches.0',
'model.234.cv2': 'neck.downsample_layers.2.stride_conv_branches.1',
'model.234.cv3': 'neck.downsample_layers.2.maxpool_branches.1',
# neck E-ELANBlock
'model.236': 'neck.bottom_up_layers.2.e_elan_blocks.0.short_conv',
'model.237': 'neck.bottom_up_layers.2.e_elan_blocks.0.main_conv',
'model.238': 'neck.bottom_up_layers.2.e_elan_blocks.0.blocks.0',
'model.239': 'neck.bottom_up_layers.2.e_elan_blocks.0.blocks.1',
'model.240': 'neck.bottom_up_layers.2.e_elan_blocks.0.blocks.2',
'model.241': 'neck.bottom_up_layers.2.e_elan_blocks.0.blocks.3',
'model.242': 'neck.bottom_up_layers.2.e_elan_blocks.0.blocks.4',
'model.243': 'neck.bottom_up_layers.2.e_elan_blocks.0.blocks.5',
'model.245': 'neck.bottom_up_layers.2.e_elan_blocks.0.final_conv',
'model.246': 'neck.bottom_up_layers.2.e_elan_blocks.1.short_conv',
'model.247': 'neck.bottom_up_layers.2.e_elan_blocks.1.main_conv',
'model.248': 'neck.bottom_up_layers.2.e_elan_blocks.1.blocks.0',
'model.249': 'neck.bottom_up_layers.2.e_elan_blocks.1.blocks.1',
'model.250': 'neck.bottom_up_layers.2.e_elan_blocks.1.blocks.2',
'model.251': 'neck.bottom_up_layers.2.e_elan_blocks.1.blocks.3',
'model.252': 'neck.bottom_up_layers.2.e_elan_blocks.1.blocks.4',
'model.253': 'neck.bottom_up_layers.2.e_elan_blocks.1.blocks.5',
'model.255': 'neck.bottom_up_layers.2.e_elan_blocks.1.final_conv',
'model.257': 'bbox_head.head_module.main_convs_pred.0.0',
'model.258': 'bbox_head.head_module.main_convs_pred.1.0',
'model.259': 'bbox_head.head_module.main_convs_pred.2.0',
'model.260': 'bbox_head.head_module.main_convs_pred.3.0',
# head
'model.261.m.0': 'bbox_head.head_module.main_convs_pred.0.2',
'model.261.m.1': 'bbox_head.head_module.main_convs_pred.1.2',
'model.261.m.2': 'bbox_head.head_module.main_convs_pred.2.2',
'model.261.m.3': 'bbox_head.head_module.main_convs_pred.3.2'
}
convert_dicts = {
'yolov7-tiny.pt': convert_dict_tiny,
'yolov7-w6.pt': convert_dict_w,
'yolov7-e6.pt': convert_dict_e,
'yolov7-e6e.pt': convert_dict_e2e,
'yolov7.pt': convert_dict_l,
'yolov7x.pt': convert_dict_x
}
def convert(src, dst):
src_key = osp.basename(src)
convert_dict = convert_dicts[osp.basename(src)]
num_levels = 3
if src_key == 'yolov7.pt':
indexes = [102, 51]
in_channels = [256, 512, 1024]
elif src_key == 'yolov7x.pt':
indexes = [121, 59]
in_channels = [320, 640, 1280]
elif src_key == 'yolov7-tiny.pt':
indexes = [77, 1000]
in_channels = [128, 256, 512]
elif src_key == 'yolov7-w6.pt':
indexes = [118, 47]
in_channels = [256, 512, 768, 1024]
num_levels = 4
elif src_key == 'yolov7-e6.pt':
indexes = [140, [2, 13, 24, 35, 46, 57, 100, 112, 124]]
in_channels = 320, 640, 960, 1280
num_levels = 4
elif src_key == 'yolov7-e6e.pt':
indexes = [261, [2, 24, 46, 68, 90, 112, 188, 211, 234]]
in_channels = 320, 640, 960, 1280
num_levels = 4
if isinstance(indexes[1], int):
indexes[1] = [indexes[1]]
"""Convert keys in detectron pretrained YOLOv7 models to mmyolo style."""
try:
yolov7_model = torch.load(src)['model'].float()
blobs = yolov7_model.state_dict()
except ModuleNotFoundError:
raise RuntimeError(
'This script must be placed under the WongKinYiu/yolov7 repo,'
' because loading the official pretrained model need'
' `model.py` to build model.')
state_dict = OrderedDict()
for key, weight in blobs.items():
if key.find('anchors') >= 0 or key.find('anchor_grid') >= 0:
continue
num, module = key.split('.')[1:3]
if int(num) < indexes[0] and int(num) not in indexes[1]:
prefix = f'model.{num}'
new_key = key.replace(prefix, convert_dict[prefix])
state_dict[new_key] = weight
print(f'Convert {key} to {new_key}')
elif int(num) in indexes[1]:
strs_key = key.split('.')[:3]
new_key = key.replace('.'.join(strs_key),
convert_dict['.'.join(strs_key)])
state_dict[new_key] = weight
print(f'Convert {key} to {new_key}')
else:
strs_key = key.split('.')[:4]
new_key = key.replace('.'.join(strs_key),
convert_dict['.'.join(strs_key)])
state_dict[new_key] = weight
print(f'Convert {key} to {new_key}')
# Add ImplicitA and ImplicitM
for i in range(num_levels):
if num_levels == 3:
implicit_a = f'bbox_head.head_module.' \
f'convs_pred.{i}.0.implicit'
state_dict[implicit_a] = torch.zeros((1, in_channels[i], 1, 1))
implicit_m = f'bbox_head.head_module.' \
f'convs_pred.{i}.2.implicit'
state_dict[implicit_m] = torch.ones((1, 3 * 85, 1, 1))
else:
implicit_a = f'bbox_head.head_module.' \
f'main_convs_pred.{i}.1.implicit'
state_dict[implicit_a] = torch.zeros((1, in_channels[i], 1, 1))
implicit_m = f'bbox_head.head_module.' \
f'main_convs_pred.{i}.3.implicit'
state_dict[implicit_m] = torch.ones((1, 3 * 85, 1, 1))
# save checkpoint
checkpoint = dict()
checkpoint['state_dict'] = state_dict
torch.save(checkpoint, dst)
# Note: This script must be placed under the yolov7 repo to run.
def main():
parser = argparse.ArgumentParser(description='Convert model keys')
parser.add_argument(
'src', default='yolov7.pt', help='src yolov7 model path')
parser.add_argument('dst', default='mm_yolov7l.pt', help='save path')
args = parser.parse_args()
convert(args.src, args.dst)
print('If your model weights are from P6 models, such as W6, E6, D6, \
E6E, the auxiliary training module is not required to be loaded, \
so it is normal for the weights of the auxiliary module \
to be missing.')
if __name__ == '__main__':
main()
| 50,022 | 44.724863 | 78 | py |
mmyolo | mmyolo-main/tools/model_converters/yolov5_to_mmyolo.py | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
from collections import OrderedDict
import torch
convert_dict_p5 = {
'model.0': 'backbone.stem',
'model.1': 'backbone.stage1.0',
'model.2': 'backbone.stage1.1',
'model.3': 'backbone.stage2.0',
'model.4': 'backbone.stage2.1',
'model.5': 'backbone.stage3.0',
'model.6': 'backbone.stage3.1',
'model.7': 'backbone.stage4.0',
'model.8': 'backbone.stage4.1',
'model.9.cv1': 'backbone.stage4.2.conv1',
'model.9.cv2': 'backbone.stage4.2.conv2',
'model.10': 'neck.reduce_layers.2',
'model.13': 'neck.top_down_layers.0.0',
'model.14': 'neck.top_down_layers.0.1',
'model.17': 'neck.top_down_layers.1',
'model.18': 'neck.downsample_layers.0',
'model.20': 'neck.bottom_up_layers.0',
'model.21': 'neck.downsample_layers.1',
'model.23': 'neck.bottom_up_layers.1',
'model.24.m': 'bbox_head.head_module.convs_pred',
}
convert_dict_p6 = {
'model.0': 'backbone.stem',
'model.1': 'backbone.stage1.0',
'model.2': 'backbone.stage1.1',
'model.3': 'backbone.stage2.0',
'model.4': 'backbone.stage2.1',
'model.5': 'backbone.stage3.0',
'model.6': 'backbone.stage3.1',
'model.7': 'backbone.stage4.0',
'model.8': 'backbone.stage4.1',
'model.9': 'backbone.stage5.0',
'model.10': 'backbone.stage5.1',
'model.11.cv1': 'backbone.stage5.2.conv1',
'model.11.cv2': 'backbone.stage5.2.conv2',
'model.12': 'neck.reduce_layers.3',
'model.15': 'neck.top_down_layers.0.0',
'model.16': 'neck.top_down_layers.0.1',
'model.19': 'neck.top_down_layers.1.0',
'model.20': 'neck.top_down_layers.1.1',
'model.23': 'neck.top_down_layers.2',
'model.24': 'neck.downsample_layers.0',
'model.26': 'neck.bottom_up_layers.0',
'model.27': 'neck.downsample_layers.1',
'model.29': 'neck.bottom_up_layers.1',
'model.30': 'neck.downsample_layers.2',
'model.32': 'neck.bottom_up_layers.2',
'model.33.m': 'bbox_head.head_module.convs_pred',
}
def convert(src, dst):
"""Convert keys in pretrained YOLOv5 models to mmyolo style."""
if src.endswith('6.pt'):
convert_dict = convert_dict_p6
is_p6_model = True
print('Converting P6 model')
else:
convert_dict = convert_dict_p5
is_p6_model = False
print('Converting P5 model')
try:
yolov5_model = torch.load(src)['model']
blobs = yolov5_model.state_dict()
except ModuleNotFoundError:
raise RuntimeError(
'This script must be placed under the ultralytics/yolov5 repo,'
' because loading the official pretrained model need'
' `model.py` to build model.')
state_dict = OrderedDict()
for key, weight in blobs.items():
num, module = key.split('.')[1:3]
if (is_p6_model and
(num == '11' or num == '33')) or (not is_p6_model and
(num == '9' or num == '24')):
if module == 'anchors':
continue
prefix = f'model.{num}.{module}'
else:
prefix = f'model.{num}'
new_key = key.replace(prefix, convert_dict[prefix])
if '.m.' in new_key:
new_key = new_key.replace('.m.', '.blocks.')
new_key = new_key.replace('.cv', '.conv')
else:
new_key = new_key.replace('.cv1', '.main_conv')
new_key = new_key.replace('.cv2', '.short_conv')
new_key = new_key.replace('.cv3', '.final_conv')
state_dict[new_key] = weight
print(f'Convert {key} to {new_key}')
# save checkpoint
checkpoint = dict()
checkpoint['state_dict'] = state_dict
torch.save(checkpoint, dst)
# Note: This script must be placed under the yolov5 repo to run.
def main():
parser = argparse.ArgumentParser(description='Convert model keys')
parser.add_argument(
'--src', default='yolov5s.pt', help='src yolov5 model path')
parser.add_argument('--dst', default='mmyolov5s.pt', help='save path')
args = parser.parse_args()
convert(args.src, args.dst)
if __name__ == '__main__':
main()
| 4,176 | 32.95935 | 75 | py |
mmyolo | mmyolo-main/tools/analysis_tools/benchmark.py | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import copy
import os
import time
import torch
from mmengine import Config, DictAction
from mmengine.dist import get_world_size, init_dist
from mmengine.logging import MMLogger, print_log
from mmengine.registry import init_default_scope
from mmengine.runner import Runner, load_checkpoint
from mmengine.utils import mkdir_or_exist
from mmengine.utils.dl_utils import set_multi_processing
from mmyolo.registry import MODELS
# TODO: Refactoring and improving
def parse_args():
parser = argparse.ArgumentParser(description='MMYOLO benchmark a model')
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint', help='checkpoint file')
parser.add_argument(
'--repeat-num',
type=int,
default=1,
help='number of repeat times of measurement for averaging the results')
parser.add_argument(
'--max-iter', type=int, default=2000, help='num of max iter')
parser.add_argument(
'--log-interval', type=int, default=50, help='interval of logging')
parser.add_argument(
'--work-dir',
help='the directory to save the file containing '
'benchmark metrics')
parser.add_argument(
'--fuse-conv-bn',
action='store_true',
help='Whether to fuse conv and bn, this will slightly increase'
'the inference speed')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
return args
def measure_inference_speed(cfg, checkpoint, max_iter, log_interval,
is_fuse_conv_bn):
env_cfg = cfg.get('env_cfg')
if env_cfg.get('cudnn_benchmark'):
torch.backends.cudnn.benchmark = True
mp_cfg: dict = env_cfg.get('mp_cfg', {})
set_multi_processing(**mp_cfg, distributed=cfg.distributed)
# Because multiple processes will occupy additional CPU resources,
# FPS statistics will be more unstable when num_workers is not 0.
# It is reasonable to set num_workers to 0.
dataloader_cfg = cfg.test_dataloader
dataloader_cfg['num_workers'] = 0
dataloader_cfg['batch_size'] = 1
dataloader_cfg['persistent_workers'] = False
data_loader = Runner.build_dataloader(dataloader_cfg)
# build the model and load checkpoint
model = MODELS.build(cfg.model)
load_checkpoint(model, checkpoint, map_location='cpu')
model = model.cuda()
model.eval()
# the first several iterations may be very slow so skip them
num_warmup = 5
pure_inf_time = 0
fps = 0
# benchmark with 2000 image and take the average
for i, data in enumerate(data_loader):
torch.cuda.synchronize()
start_time = time.perf_counter()
with torch.no_grad():
model.test_step(data)
torch.cuda.synchronize()
elapsed = time.perf_counter() - start_time
if i >= num_warmup:
pure_inf_time += elapsed
if (i + 1) % log_interval == 0:
fps = (i + 1 - num_warmup) / pure_inf_time
print_log(
f'Done image [{i + 1:<3}/ {max_iter}], '
f'fps: {fps:.1f} img / s, '
f'times per image: {1000 / fps:.1f} ms / img', 'current')
if (i + 1) == max_iter:
fps = (i + 1 - num_warmup) / pure_inf_time
print_log(
f'Overall fps: {fps:.1f} img / s, '
f'times per image: {1000 / fps:.1f} ms / img', 'current')
break
return fps
def repeat_measure_inference_speed(cfg,
checkpoint,
max_iter,
log_interval,
is_fuse_conv_bn,
repeat_num=1):
assert repeat_num >= 1
fps_list = []
for _ in range(repeat_num):
cp_cfg = copy.deepcopy(cfg)
fps_list.append(
measure_inference_speed(cp_cfg, checkpoint, max_iter, log_interval,
is_fuse_conv_bn))
if repeat_num > 1:
fps_list_ = [round(fps, 1) for fps in fps_list]
times_pre_image_list_ = [round(1000 / fps, 1) for fps in fps_list]
mean_fps_ = sum(fps_list_) / len(fps_list_)
mean_times_pre_image_ = sum(times_pre_image_list_) / len(
times_pre_image_list_)
print_log(
f'Overall fps: {fps_list_}[{mean_fps_:.1f}] img / s, '
f'times per image: '
f'{times_pre_image_list_}[{mean_times_pre_image_:.1f}] ms / img',
'current')
return fps_list
return fps_list[0]
# TODO: refactoring
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
init_default_scope(cfg.get('default_scope', 'mmyolo'))
distributed = False
if args.launcher != 'none':
init_dist(args.launcher, **cfg.get('env_cfg', {}).get('dist_cfg', {}))
distributed = True
assert get_world_size(
) == 1, 'Inference benchmark does not allow distributed multi-GPU'
cfg.distributed = distributed
log_file = None
if args.work_dir:
log_file = os.path.join(args.work_dir, 'benchmark.log')
mkdir_or_exist(args.work_dir)
MMLogger.get_instance('mmyolo', log_file=log_file, log_level='INFO')
repeat_measure_inference_speed(cfg, args.checkpoint, args.max_iter,
args.log_interval, args.fuse_conv_bn,
args.repeat_num)
if __name__ == '__main__':
main()
| 6,460 | 33.185185 | 79 | py |
mmyolo | mmyolo-main/tools/analysis_tools/optimize_anchors.py | # Copyright (c) OpenMMLab. All rights reserved.
"""Optimize anchor settings on a specific dataset.
This script provides three methods to optimize YOLO anchors including k-means
anchor cluster, differential evolution and v5-k-means. You can use
``--algorithm k-means``, ``--algorithm differential_evolution`` and
``--algorithm v5-k-means`` to switch those methods.
Example:
Use k-means anchor cluster::
python tools/analysis_tools/optimize_anchors.py ${CONFIG} \
--algorithm k-means --input-shape ${INPUT_SHAPE [WIDTH HEIGHT]} \
--out-dir ${OUT_DIR}
Use differential evolution to optimize anchors::
python tools/analysis_tools/optimize_anchors.py ${CONFIG} \
--algorithm differential_evolution \
--input-shape ${INPUT_SHAPE [WIDTH HEIGHT]} \
--out-dir ${OUT_DIR}
Use v5-k-means to optimize anchors::
python tools/analysis_tools/optimize_anchors.py ${CONFIG} \
--algorithm v5-k-means \
--input-shape ${INPUT_SHAPE [WIDTH HEIGHT]} \
--prior_match_thr ${PRIOR_MATCH_THR} \
--out-dir ${OUT_DIR}
"""
import argparse
import os.path as osp
import random
from typing import Tuple
import numpy as np
import torch
from mmdet.structures.bbox import (bbox_cxcywh_to_xyxy, bbox_overlaps,
bbox_xyxy_to_cxcywh)
from mmdet.utils import replace_cfg_vals, update_data_root
from mmengine.config import Config
from mmengine.fileio import dump
from mmengine.logging import MMLogger
from mmengine.registry import init_default_scope
from mmengine.utils import ProgressBar
from scipy.optimize import differential_evolution
from torch import Tensor
from mmyolo.registry import DATASETS
try:
from scipy.cluster.vq import kmeans
except ImportError:
kmeans = None
def parse_args():
parser = argparse.ArgumentParser(description='Optimize anchor parameters.')
parser.add_argument('config', help='Train config file path.')
parser.add_argument(
'--input-shape',
type=int,
nargs='+',
default=[640, 640],
help='input image size, represent [width, height]')
parser.add_argument(
'--algorithm',
default='DE',
help='Algorithm used for anchor optimizing.'
'Support k-means and differential_evolution for YOLO,'
'and v5-k-means is special for YOLOV5.')
parser.add_argument(
'--iters',
default=1000,
type=int,
help='Maximum iterations for optimizer.')
parser.add_argument(
'--prior-match-thr',
default=4.0,
type=float,
help='anchor-label `gt_filter_sizes` ratio threshold '
'hyperparameter used for training, default=4.0, this '
'parameter is unique to v5-k-means')
parser.add_argument(
'--mutation-args',
type=float,
nargs='+',
default=[0.9, 0.1],
help='paramter of anchor optimize method genetic algorithm, '
'represent [prob, sigma], this parameter is unique to v5-k-means')
parser.add_argument(
'--augment-args',
type=float,
nargs='+',
default=[0.9, 1.1],
help='scale factor of box size augment when metric box and anchor, '
'represent [min, max], this parameter is unique to v5-k-means')
parser.add_argument(
'--device', default='cuda:0', help='Device used for calculating.')
parser.add_argument(
'--out-dir',
default=None,
type=str,
help='Path to save anchor optimize result.')
args = parser.parse_args()
return args
class BaseAnchorOptimizer:
"""Base class for anchor optimizer.
Args:
dataset (obj:`Dataset`): Dataset object.
input_shape (list[int]): Input image shape of the model.
Format in [width, height].
num_anchor_per_level (list[int]) : Number of anchors for each level.
logger (obj:`logging.Logger`): The logger for logging.
device (str, optional): Device used for calculating.
Default: 'cuda:0'
out_dir (str, optional): Path to save anchor optimize result.
Default: None
"""
def __init__(self,
dataset,
input_shape,
num_anchor_per_level,
logger,
device='cuda:0',
out_dir=None):
self.dataset = dataset
self.input_shape = input_shape
self.num_anchor_per_level = num_anchor_per_level
self.num_anchors = sum(num_anchor_per_level)
self.logger = logger
self.device = device
self.out_dir = out_dir
bbox_whs, img_shapes = self.get_whs_and_shapes()
ratios = img_shapes.max(1, keepdims=True) / np.array([input_shape])
# resize to input shape
self.bbox_whs = bbox_whs / ratios
def get_whs_and_shapes(self):
"""Get widths and heights of bboxes and shapes of images.
Returns:
tuple[np.ndarray]: Array of bbox shapes and array of image
shapes with shape (num_bboxes, 2) in [width, height] format.
"""
self.logger.info('Collecting bboxes from annotation...')
bbox_whs = []
img_shapes = []
prog_bar = ProgressBar(len(self.dataset))
for idx in range(len(self.dataset)):
data_info = self.dataset.get_data_info(idx)
img_shape = np.array([data_info['width'], data_info['height']])
gt_instances = data_info['instances']
for instance in gt_instances:
bbox = np.array(instance['bbox'])
gt_filter_sizes = bbox[2:4] - bbox[0:2]
img_shapes.append(img_shape)
bbox_whs.append(gt_filter_sizes)
prog_bar.update()
print('\n')
bbox_whs = np.array(bbox_whs)
img_shapes = np.array(img_shapes)
self.logger.info(f'Collected {bbox_whs.shape[0]} bboxes.')
return bbox_whs, img_shapes
def get_zero_center_bbox_tensor(self):
"""Get a tensor of bboxes centered at (0, 0).
Returns:
Tensor: Tensor of bboxes with shape (num_bboxes, 4)
in [xmin, ymin, xmax, ymax] format.
"""
whs = torch.from_numpy(self.bbox_whs).to(
self.device, dtype=torch.float32)
bboxes = bbox_cxcywh_to_xyxy(
torch.cat([torch.zeros_like(whs), whs], dim=1))
return bboxes
def optimize(self):
raise NotImplementedError
def save_result(self, anchors, path=None):
anchor_results = []
start = 0
for num in self.num_anchor_per_level:
end = num + start
anchor_results.append([(round(w), round(h))
for w, h in anchors[start:end]])
start = end
self.logger.info(f'Anchor optimize result:{anchor_results}')
if path:
json_path = osp.join(path, 'anchor_optimize_result.json')
dump(anchor_results, json_path)
self.logger.info(f'Result saved in {json_path}')
class YOLOKMeansAnchorOptimizer(BaseAnchorOptimizer):
r"""YOLO anchor optimizer using k-means. Code refer to `AlexeyAB/darknet.
<https://github.com/AlexeyAB/darknet/blob/master/src/detector.c>`_.
Args:
iters (int): Maximum iterations for k-means.
"""
def __init__(self, iters, **kwargs):
super().__init__(**kwargs)
self.iters = iters
def optimize(self):
anchors = self.kmeans_anchors()
self.save_result(anchors, self.out_dir)
def kmeans_anchors(self):
self.logger.info(
f'Start cluster {self.num_anchors} YOLO anchors with K-means...')
bboxes = self.get_zero_center_bbox_tensor()
cluster_center_idx = torch.randint(
0, bboxes.shape[0], (self.num_anchors, )).to(self.device)
assignments = torch.zeros((bboxes.shape[0], )).to(self.device)
cluster_centers = bboxes[cluster_center_idx]
if self.num_anchors == 1:
cluster_centers = self.kmeans_maximization(bboxes, assignments,
cluster_centers)
anchors = bbox_xyxy_to_cxcywh(cluster_centers)[:, 2:].cpu().numpy()
anchors = sorted(anchors, key=lambda x: x[0] * x[1])
return anchors
prog_bar = ProgressBar(self.iters)
for i in range(self.iters):
converged, assignments = self.kmeans_expectation(
bboxes, assignments, cluster_centers)
if converged:
self.logger.info(f'K-means process has converged at iter {i}.')
break
cluster_centers = self.kmeans_maximization(bboxes, assignments,
cluster_centers)
prog_bar.update()
print('\n')
avg_iou = bbox_overlaps(bboxes,
cluster_centers).max(1)[0].mean().item()
anchors = bbox_xyxy_to_cxcywh(cluster_centers)[:, 2:].cpu().numpy()
anchors = sorted(anchors, key=lambda x: x[0] * x[1])
self.logger.info(f'Anchor cluster finish. Average IOU: {avg_iou}')
return anchors
def kmeans_maximization(self, bboxes, assignments, centers):
"""Maximization part of EM algorithm(Expectation-Maximization)"""
new_centers = torch.zeros_like(centers)
for i in range(centers.shape[0]):
mask = (assignments == i)
if mask.sum():
new_centers[i, :] = bboxes[mask].mean(0)
return new_centers
def kmeans_expectation(self, bboxes, assignments, centers):
"""Expectation part of EM algorithm(Expectation-Maximization)"""
ious = bbox_overlaps(bboxes, centers)
closest = ious.argmax(1)
converged = (closest == assignments).all()
return converged, closest
class YOLOV5KMeansAnchorOptimizer(BaseAnchorOptimizer):
r"""YOLOv5 anchor optimizer using shape k-means.
Code refer to `ultralytics/yolov5.
<https://github.com/ultralytics/yolov5/blob/master/utils/autoanchor.py>`_.
Args:
iters (int): Maximum iterations for k-means.
prior_match_thr (float): anchor-label width height
ratio threshold hyperparameter.
"""
def __init__(self,
iters,
prior_match_thr=4.0,
mutation_args=[0.9, 0.1],
augment_args=[0.9, 1.1],
**kwargs):
super().__init__(**kwargs)
self.iters = iters
self.prior_match_thr = prior_match_thr
[self.mutation_prob, self.mutation_sigma] = mutation_args
[self.augment_min, self.augment_max] = augment_args
def optimize(self):
self.logger.info(
f'Start cluster {self.num_anchors} YOLOv5 anchors with K-means...')
bbox_whs = torch.from_numpy(self.bbox_whs).to(
self.device, dtype=torch.float32)
anchors = self.anchor_generate(
bbox_whs,
num=self.num_anchors,
img_size=self.input_shape[0],
prior_match_thr=self.prior_match_thr,
iters=self.iters)
best_ratio, mean_matched = self.anchor_metric(bbox_whs, anchors)
self.logger.info(f'{mean_matched:.2f} anchors/target {best_ratio:.3f} '
'Best Possible Recall (BPR). ')
self.save_result(anchors.tolist(), self.out_dir)
def anchor_generate(self,
box_size: Tensor,
num: int = 9,
img_size: int = 640,
prior_match_thr: float = 4.0,
iters: int = 1000) -> Tensor:
"""cluster boxes metric with anchors.
Args:
box_size (Tensor): The size of the bxes, which shape is
(box_num, 2),the number 2 means width and height.
num (int): number of anchors.
img_size (int): image size used for training
prior_match_thr (float): width/height ratio threshold
used for training
iters (int): iterations to evolve anchors using genetic algorithm
Returns:
anchors (Tensor): kmeans evolved anchors
"""
thr = 1 / prior_match_thr
# step1: filter small bbox
box_size = self._filter_box(box_size)
assert num <= len(box_size)
# step2: init anchors
if kmeans:
try:
self.logger.info(
'beginning init anchors with scipy kmeans method')
# sigmas for whitening
sigmas = box_size.std(0).cpu().numpy()
anchors = kmeans(
box_size.cpu().numpy() / sigmas, num, iter=30)[0] * sigmas
# kmeans may return fewer points than requested
# if width/height is insufficient or too similar
assert num == len(anchors)
except Exception:
self.logger.warning(
'scipy kmeans method cannot get enough points '
'because of width/height is insufficient or too similar, '
'now switching strategies from kmeans to random init.')
anchors = np.sort(np.random.rand(num * 2)).reshape(
num, 2) * img_size
else:
self.logger.info(
'cannot found scipy package, switching strategies from kmeans '
'to random init, you can install scipy package to '
'get better anchor init')
anchors = np.sort(np.random.rand(num * 2)).reshape(num,
2) * img_size
self.logger.info('init done, beginning evolve anchors...')
# sort small to large
anchors = torch.tensor(anchors[np.argsort(anchors.prod(1))]).to(
box_size.device, dtype=torch.float32)
# step3: evolve anchors use Genetic Algorithm
prog_bar = ProgressBar(iters)
fitness = self._anchor_fitness(box_size, anchors, thr)
cluster_shape = anchors.shape
for _ in range(iters):
mutate_result = np.ones(cluster_shape)
# mutate until a change occurs (prevent duplicates)
while (mutate_result == 1).all():
# mutate_result is scale factor of anchors, between 0.3 and 3
mutate_result = (
(np.random.random(cluster_shape) < self.mutation_prob) *
random.random() * np.random.randn(*cluster_shape) *
self.mutation_sigma + 1).clip(0.3, 3.0)
mutate_result = torch.from_numpy(mutate_result).to(box_size.device)
new_anchors = (anchors.clone() * mutate_result).clip(min=2.0)
new_fitness = self._anchor_fitness(box_size, new_anchors, thr)
if new_fitness > fitness:
fitness = new_fitness
anchors = new_anchors.clone()
prog_bar.update()
print('\n')
# sort small to large
anchors = anchors[torch.argsort(anchors.prod(1))]
self.logger.info(f'Anchor cluster finish. fitness = {fitness:.4f}')
return anchors
def anchor_metric(self,
box_size: Tensor,
anchors: Tensor,
threshold: float = 4.0) -> Tuple:
"""compute boxes metric with anchors.
Args:
box_size (Tensor): The size of the bxes, which shape
is (box_num, 2), the number 2 means width and height.
anchors (Tensor): The size of the bxes, which shape
is (anchor_num, 2), the number 2 means width and height.
threshold (float): the compare threshold of ratio
Returns:
Tuple: a tuple of metric result, best_ratio_mean and mean_matched
"""
# step1: augment scale
# According to the uniform distribution,the scaling scale between
# augment_min and augment_max is randomly generated
scale = np.random.uniform(
self.augment_min, self.augment_max, size=(box_size.shape[0], 1))
box_size = torch.tensor(
np.array(
[l[:, ] * s for s, l in zip(scale,
box_size.cpu().numpy())])).to(
box_size.device,
dtype=torch.float32)
# step2: calculate ratio
min_ratio, best_ratio = self._metric(box_size, anchors)
mean_matched = (min_ratio > 1 / threshold).float().sum(1).mean()
best_ratio_mean = (best_ratio > 1 / threshold).float().mean()
return best_ratio_mean, mean_matched
def _filter_box(self, box_size: Tensor) -> Tensor:
small_cnt = (box_size < 3.0).any(1).sum()
if small_cnt:
self.logger.warning(
f'Extremely small objects found: {small_cnt} '
f'of {len(box_size)} labels are <3 pixels in size')
# filter > 2 pixels
filter_sizes = box_size[(box_size >= 2.0).any(1)]
return filter_sizes
def _anchor_fitness(self, box_size: Tensor, anchors: Tensor, thr: float):
"""mutation fitness."""
_, best = self._metric(box_size, anchors)
return (best * (best > thr).float()).mean()
def _metric(self, box_size: Tensor, anchors: Tensor) -> Tuple:
"""compute boxes metric with anchors.
Args:
box_size (Tensor): The size of the bxes, which shape is
(box_num, 2), the number 2 means width and height.
anchors (Tensor): The size of the bxes, which shape is
(anchor_num, 2), the number 2 means width and height.
Returns:
Tuple: a tuple of metric result, min_ratio and best_ratio
"""
# ratio means the (width_1/width_2 and height_1/height_2) ratio of each
# box and anchor, the ratio shape is torch.Size([box_num,anchor_num,2])
ratio = box_size[:, None] / anchors[None]
# min_ratio records the min ratio of each box with all anchor,
# min_ratio.shape is torch.Size([box_num,anchor_num])
# notice:
# smaller ratio means worse shape-match between boxes and anchors
min_ratio = torch.min(ratio, 1 / ratio).min(2)[0]
# find the best shape-match ratio for each box
# box_best_ratio.shape is torch.Size([box_num])
best_ratio = min_ratio.max(1)[0]
return min_ratio, best_ratio
class YOLODEAnchorOptimizer(BaseAnchorOptimizer):
"""YOLO anchor optimizer using differential evolution algorithm.
Args:
iters (int): Maximum iterations for k-means.
strategy (str): The differential evolution strategy to use.
Should be one of:
- 'best1bin'
- 'best1exp'
- 'rand1exp'
- 'randtobest1exp'
- 'currenttobest1exp'
- 'best2exp'
- 'rand2exp'
- 'randtobest1bin'
- 'currenttobest1bin'
- 'best2bin'
- 'rand2bin'
- 'rand1bin'
Default: 'best1bin'.
population_size (int): Total population size of evolution algorithm.
Default: 15.
convergence_thr (float): Tolerance for convergence, the
optimizing stops when ``np.std(pop) <= abs(convergence_thr)
+ convergence_thr * np.abs(np.mean(population_energies))``,
respectively. Default: 0.0001.
mutation (tuple[float]): Range of dithering randomly changes the
mutation constant. Default: (0.5, 1).
recombination (float): Recombination constant of crossover probability.
Default: 0.7.
"""
def __init__(self,
iters,
strategy='best1bin',
population_size=15,
convergence_thr=0.0001,
mutation=(0.5, 1),
recombination=0.7,
**kwargs):
super().__init__(**kwargs)
self.iters = iters
self.strategy = strategy
self.population_size = population_size
self.convergence_thr = convergence_thr
self.mutation = mutation
self.recombination = recombination
def optimize(self):
anchors = self.differential_evolution()
self.save_result(anchors, self.out_dir)
def differential_evolution(self):
bboxes = self.get_zero_center_bbox_tensor()
bounds = []
for i in range(self.num_anchors):
bounds.extend([(0, self.input_shape[0]), (0, self.input_shape[1])])
result = differential_evolution(
func=self.avg_iou_cost,
bounds=bounds,
args=(bboxes, ),
strategy=self.strategy,
maxiter=self.iters,
popsize=self.population_size,
tol=self.convergence_thr,
mutation=self.mutation,
recombination=self.recombination,
updating='immediate',
disp=True)
self.logger.info(
f'Anchor evolution finish. Average IOU: {1 - result.fun}')
anchors = [(w, h) for w, h in zip(result.x[::2], result.x[1::2])]
anchors = sorted(anchors, key=lambda x: x[0] * x[1])
return anchors
@staticmethod
def avg_iou_cost(anchor_params, bboxes):
assert len(anchor_params) % 2 == 0
anchor_whs = torch.tensor(
[[w, h]
for w, h in zip(anchor_params[::2], anchor_params[1::2])]).to(
bboxes.device, dtype=bboxes.dtype)
anchor_boxes = bbox_cxcywh_to_xyxy(
torch.cat([torch.zeros_like(anchor_whs), anchor_whs], dim=1))
ious = bbox_overlaps(bboxes, anchor_boxes)
max_ious, _ = ious.max(1)
cost = 1 - max_ious.mean().item()
return cost
def main():
logger = MMLogger.get_current_instance()
args = parse_args()
cfg = args.config
cfg = Config.fromfile(cfg)
# replace the ${key} with the value of cfg.key
cfg = replace_cfg_vals(cfg)
# update data root according to MMDET_DATASETS
update_data_root(cfg)
init_default_scope(cfg.get('default_scope', 'mmyolo'))
input_shape = args.input_shape
assert len(input_shape) == 2
anchor_type = cfg.model.bbox_head.prior_generator.type
assert anchor_type == 'mmdet.YOLOAnchorGenerator', \
f'Only support optimize YOLOAnchor, but get {anchor_type}.'
base_sizes = cfg.model.bbox_head.prior_generator.base_sizes
num_anchor_per_level = [len(sizes) for sizes in base_sizes]
train_data_cfg = cfg.train_dataloader
while 'dataset' in train_data_cfg:
train_data_cfg = train_data_cfg['dataset']
dataset = DATASETS.build(train_data_cfg)
if args.algorithm == 'k-means':
optimizer = YOLOKMeansAnchorOptimizer(
dataset=dataset,
input_shape=input_shape,
device=args.device,
num_anchor_per_level=num_anchor_per_level,
iters=args.iters,
logger=logger,
out_dir=args.out_dir)
elif args.algorithm == 'DE':
optimizer = YOLODEAnchorOptimizer(
dataset=dataset,
input_shape=input_shape,
device=args.device,
num_anchor_per_level=num_anchor_per_level,
iters=args.iters,
logger=logger,
out_dir=args.out_dir)
elif args.algorithm == 'v5-k-means':
optimizer = YOLOV5KMeansAnchorOptimizer(
dataset=dataset,
input_shape=input_shape,
device=args.device,
num_anchor_per_level=num_anchor_per_level,
iters=args.iters,
prior_match_thr=args.prior_match_thr,
mutation_args=args.mutation_args,
augment_args=args.augment_args,
logger=logger,
out_dir=args.out_dir)
else:
raise NotImplementedError(
f'Only support k-means and differential_evolution, '
f'but get {args.algorithm}')
optimizer.optimize()
if __name__ == '__main__':
main()
| 24,296 | 36.49537 | 79 | py |
mmyolo | mmyolo-main/tools/analysis_tools/get_flops.py | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import tempfile
from pathlib import Path
import torch
from mmdet.registry import MODELS
from mmengine.analysis import get_model_complexity_info
from mmengine.config import Config, DictAction
from mmengine.logging import MMLogger
from mmengine.model import revert_sync_batchnorm
from mmengine.registry import init_default_scope
from mmyolo.utils import switch_to_deploy
def parse_args():
parser = argparse.ArgumentParser(description='Get a detector flops')
parser.add_argument('config', help='train config file path')
parser.add_argument(
'--shape',
type=int,
nargs='+',
default=[640, 640],
help='input image size')
parser.add_argument(
'--show-arch',
action='store_true',
help='whether return the statistics in the form of network layers')
parser.add_argument(
'--not-show-table',
action='store_true',
help='whether return the statistics in the form of table'),
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
return parser.parse_args()
def inference(args, logger):
config_name = Path(args.config)
if not config_name.exists():
logger.error(f'{config_name} not found.')
cfg = Config.fromfile(args.config)
cfg.work_dir = tempfile.TemporaryDirectory().name
cfg.log_level = 'WARN'
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
init_default_scope(cfg.get('default_scope', 'mmyolo'))
if len(args.shape) == 1:
h = w = args.shape[0]
elif len(args.shape) == 2:
h, w = args.shape
else:
raise ValueError('invalid input shape')
# model
model = MODELS.build(cfg.model)
if torch.cuda.is_available():
model.cuda()
model = revert_sync_batchnorm(model)
model.eval()
switch_to_deploy(model)
# input tensor
# automatically generate a input tensor with the given input_shape.
data_batch = {'inputs': [torch.rand(3, h, w)], 'batch_samples': [None]}
data = model.data_preprocessor(data_batch)
result = {'ori_shape': (h, w), 'pad_shape': data['inputs'].shape[-2:]}
outputs = get_model_complexity_info(
model,
input_shape=None,
inputs=data['inputs'], # the input tensor of the model
show_table=not args.not_show_table, # show the complexity table
show_arch=args.show_arch) # show the complexity arch
result['flops'] = outputs['flops_str']
result['params'] = outputs['params_str']
result['out_table'] = outputs['out_table']
result['out_arch'] = outputs['out_arch']
return result
def main():
args = parse_args()
logger = MMLogger.get_instance(name='MMLogger')
result = inference(args, logger)
split_line = '=' * 30
ori_shape = result['ori_shape']
pad_shape = result['pad_shape']
flops = result['flops']
params = result['params']
print(result['out_table']) # print related information by table
print(result['out_arch']) # print related information by network layers
if pad_shape != ori_shape:
print(f'{split_line}\nUse size divisor set input shape '
f'from {ori_shape} to {pad_shape}')
print(f'{split_line}\n'
f'Input shape: {pad_shape}\nModel Flops: {flops}\n'
f'Model Parameters: {params}\n{split_line}')
print('!!!Please be cautious if you use the results in papers. '
'You may need to check if all ops are supported and verify '
'that the flops computation is correct.')
if __name__ == '__main__':
main()
| 4,085 | 31.951613 | 78 | py |
mmyolo | mmyolo-main/tools/analysis_tools/vis_scheduler.py | # Copyright (c) OpenMMLab. All rights reserved.
"""Hyper-parameter Scheduler Visualization.
This tool aims to help the user to check
the hyper-parameter scheduler of the optimizer(without training),
which support the "learning rate", "momentum", and "weight_decay".
Example:
```shell
python tools/analysis_tools/vis_scheduler.py \
configs/rtmdet/rtmdet_s_syncbn_fast_8xb32-300e_coco.py \
--dataset-size 118287 \
--ngpus 8 \
--out-dir ./output
```
Modified from: https://github.com/open-mmlab/mmclassification/blob/1.x/tools/visualizations/vis_scheduler.py # noqa
"""
import argparse
import json
import os.path as osp
import re
from pathlib import Path
from unittest.mock import MagicMock
import matplotlib.pyplot as plt
import rich
import torch.nn as nn
from mmengine.config import Config, DictAction
from mmengine.hooks import Hook
from mmengine.model import BaseModel
from mmengine.registry import init_default_scope
from mmengine.runner import Runner
from mmengine.utils.path import mkdir_or_exist
from mmengine.visualization import Visualizer
from rich.progress import BarColumn, MofNCompleteColumn, Progress, TextColumn
def parse_args():
parser = argparse.ArgumentParser(
description='Visualize a hyper-parameter scheduler')
parser.add_argument('config', help='config file path')
parser.add_argument(
'-p',
'--parameter',
type=str,
default='lr',
choices=['lr', 'momentum', 'wd'],
help='The parameter to visualize its change curve, choose from'
'"lr", "wd" and "momentum". Defaults to "lr".')
parser.add_argument(
'-d',
'--dataset-size',
type=int,
help='The size of the dataset. If specify, `DATASETS.build` will '
'be skipped and use this size as the dataset size.')
parser.add_argument(
'-n',
'--ngpus',
type=int,
default=1,
help='The number of GPUs used in training.')
parser.add_argument(
'-o', '--out-dir', type=Path, help='Path to output file')
parser.add_argument(
'--log-level',
default='WARNING',
help='The log level of the handler and logger. Defaults to '
'WARNING.')
parser.add_argument('--title', type=str, help='title of figure')
parser.add_argument(
'--style', type=str, default='whitegrid', help='style of plt')
parser.add_argument('--not-show', default=False, action='store_true')
parser.add_argument(
'--window-size',
default='12*7',
help='Size of the window to display images, in format of "$W*$H".')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
args = parser.parse_args()
if args.window_size != '':
assert re.match(r'\d+\*\d+', args.window_size), \
"'window-size' must be in format 'W*H'."
return args
class SimpleModel(BaseModel):
"""simple model that do nothing in train_step."""
def __init__(self):
super().__init__()
self.data_preprocessor = nn.Identity()
self.conv = nn.Conv2d(1, 1, 1)
def forward(self, inputs, data_samples, mode='tensor'):
pass
def train_step(self, data, optim_wrapper):
pass
class ParamRecordHook(Hook):
def __init__(self, by_epoch):
super().__init__()
self.by_epoch = by_epoch
self.lr_list = []
self.momentum_list = []
self.wd_list = []
self.task_id = 0
self.progress = Progress(BarColumn(), MofNCompleteColumn(),
TextColumn('{task.description}'))
def before_train(self, runner):
if self.by_epoch:
total = runner.train_loop.max_epochs
self.task_id = self.progress.add_task(
'epochs', start=True, total=total)
else:
total = runner.train_loop.max_iters
self.task_id = self.progress.add_task(
'iters', start=True, total=total)
self.progress.start()
def after_train_epoch(self, runner):
if self.by_epoch:
self.progress.update(self.task_id, advance=1)
# TODO: Support multiple schedulers
def after_train_iter(self, runner, batch_idx, data_batch, outputs):
if not self.by_epoch:
self.progress.update(self.task_id, advance=1)
self.lr_list.append(runner.optim_wrapper.get_lr()['lr'][0])
self.momentum_list.append(
runner.optim_wrapper.get_momentum()['momentum'][0])
self.wd_list.append(
runner.optim_wrapper.param_groups[0]['weight_decay'])
def after_train(self, runner):
self.progress.stop()
def plot_curve(lr_list, args, param_name, iters_per_epoch, by_epoch=True):
"""Plot learning rate vs iter graph."""
try:
import seaborn as sns
sns.set_style(args.style)
except ImportError:
pass
wind_w, wind_h = args.window_size.split('*')
wind_w, wind_h = int(wind_w), int(wind_h)
plt.figure(figsize=(wind_w, wind_h))
ax: plt.Axes = plt.subplot()
ax.plot(lr_list, linewidth=1)
if by_epoch:
ax.xaxis.tick_top()
ax.set_xlabel('Iters')
ax.xaxis.set_label_position('top')
sec_ax = ax.secondary_xaxis(
'bottom',
functions=(lambda x: x / iters_per_epoch,
lambda y: y * iters_per_epoch))
sec_ax.set_xlabel('Epochs')
else:
plt.xlabel('Iters')
plt.ylabel(param_name)
if args.title is None:
plt.title(f'{osp.basename(args.config)} {param_name} curve')
else:
plt.title(args.title)
def simulate_train(data_loader, cfg, by_epoch):
model = SimpleModel()
param_record_hook = ParamRecordHook(by_epoch=by_epoch)
default_hooks = dict(
param_scheduler=cfg.default_hooks['param_scheduler'],
runtime_info=None,
timer=None,
logger=None,
checkpoint=None,
sampler_seed=None,
param_record=param_record_hook)
runner = Runner(
model=model,
work_dir=cfg.work_dir,
train_dataloader=data_loader,
train_cfg=cfg.train_cfg,
log_level=cfg.log_level,
optim_wrapper=cfg.optim_wrapper,
param_scheduler=cfg.param_scheduler,
default_scope=cfg.default_scope,
default_hooks=default_hooks,
visualizer=MagicMock(spec=Visualizer),
custom_hooks=cfg.get('custom_hooks', None))
runner.train()
param_dict = dict(
lr=param_record_hook.lr_list,
momentum=param_record_hook.momentum_list,
wd=param_record_hook.wd_list)
return param_dict
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
if cfg.get('work_dir', None) is None:
# use config filename as default work_dir if cfg.work_dir is None
cfg.work_dir = osp.join('./work_dirs',
osp.splitext(osp.basename(args.config))[0])
cfg.log_level = args.log_level
init_default_scope(cfg.get('default_scope', 'mmyolo'))
# init logger
print('Param_scheduler :')
rich.print_json(json.dumps(cfg.param_scheduler))
# prepare data loader
batch_size = cfg.train_dataloader.batch_size * args.ngpus
if 'by_epoch' in cfg.train_cfg:
by_epoch = cfg.train_cfg.get('by_epoch')
elif 'type' in cfg.train_cfg:
by_epoch = cfg.train_cfg.get('type') == 'EpochBasedTrainLoop'
else:
raise ValueError('please set `train_cfg`.')
if args.dataset_size is None and by_epoch:
from mmyolo.registry import DATASETS
dataset_size = len(DATASETS.build(cfg.train_dataloader.dataset))
else:
dataset_size = args.dataset_size or batch_size
class FakeDataloader(list):
dataset = MagicMock(metainfo=None)
data_loader = FakeDataloader(range(dataset_size // batch_size))
dataset_info = (
f'\nDataset infos:'
f'\n - Dataset size: {dataset_size}'
f'\n - Batch size per GPU: {cfg.train_dataloader.batch_size}'
f'\n - Number of GPUs: {args.ngpus}'
f'\n - Total batch size: {batch_size}')
if by_epoch:
dataset_info += f'\n - Iterations per epoch: {len(data_loader)}'
rich.print(dataset_info + '\n')
# simulation training process
param_dict = simulate_train(data_loader, cfg, by_epoch)
param_list = param_dict[args.parameter]
if args.parameter == 'lr':
param_name = 'Learning Rate'
elif args.parameter == 'momentum':
param_name = 'Momentum'
else:
param_name = 'Weight Decay'
plot_curve(param_list, args, param_name, len(data_loader), by_epoch)
if args.out_dir:
# make dir for output
mkdir_or_exist(args.out_dir)
# save the graph
out_file = osp.join(
args.out_dir, f'{osp.basename(args.config)}-{args.parameter}.jpg')
plt.savefig(out_file)
print(f'\nThe {param_name} graph is saved at {out_file}')
if not args.not_show:
plt.show()
if __name__ == '__main__':
main()
| 9,574 | 31.347973 | 115 | py |
mmyolo | mmyolo-main/projects/assigner_visualization/assigner_visualization.py | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os
import os.path as osp
import sys
import warnings
import mmcv
import numpy as np
import torch
from mmengine import ProgressBar
from mmengine.config import Config, DictAction
from mmengine.dataset import COLLATE_FUNCTIONS
from mmengine.runner.checkpoint import load_checkpoint
from numpy import random
from mmyolo.registry import DATASETS, MODELS
from mmyolo.utils import register_all_modules
from projects.assigner_visualization.dense_heads import (RTMHeadAssigner,
YOLOv5HeadAssigner,
YOLOv7HeadAssigner,
YOLOv8HeadAssigner)
from projects.assigner_visualization.visualization import \
YOLOAssignerVisualizer
def parse_args():
parser = argparse.ArgumentParser(
description='MMYOLO show the positive sample assigning'
' results.')
parser.add_argument('config', help='config file path')
parser.add_argument('--checkpoint', '-c', type=str, help='checkpoint file')
parser.add_argument(
'--show-number',
'-n',
type=int,
default=sys.maxsize,
help='number of images selected to save, '
'must bigger than 0. if the number is bigger than length '
'of dataset, show all the images in dataset; '
'default "sys.maxsize", show all images in dataset')
parser.add_argument(
'--output-dir',
default='assigned_results',
type=str,
help='The name of the folder where the image is saved.')
parser.add_argument(
'--device', default='cuda:0', help='Device used for inference.')
parser.add_argument(
'--show-prior',
default=False,
action='store_true',
help='Whether to show prior on image.')
parser.add_argument(
'--not-show-label',
default=False,
action='store_true',
help='Whether to show label on image.')
parser.add_argument('--seed', default=-1, type=int, help='random seed')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
args = parser.parse_args()
return args
def main():
args = parse_args()
register_all_modules()
# set random seed
seed = int(args.seed)
if seed != -1:
print(f'Set the global seed: {seed}')
random.seed(int(args.seed))
cfg = Config.fromfile(args.config)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
# build model
model = MODELS.build(cfg.model)
if args.checkpoint is not None:
load_checkpoint(model, args.checkpoint)
elif isinstance(model.bbox_head, (YOLOv7HeadAssigner, RTMHeadAssigner)):
warnings.warn(
'if you use dynamic_assignment methods such as YOLOv7 or '
'YOLOv8 or RTMDet assigner, please load the checkpoint.')
assert isinstance(model.bbox_head, (YOLOv5HeadAssigner,
YOLOv7HeadAssigner,
YOLOv8HeadAssigner,
RTMHeadAssigner)), \
'Now, this script only support YOLOv5, YOLOv7, YOLOv8 and RTMdet, ' \
'and bbox_head must use ' \
'`YOLOv5HeadAssigner or YOLOv7HeadAssigne or YOLOv8HeadAssigner ' \
'or RTMHeadAssigner`. Please use `' \
'yolov5_s-v61_syncbn_fast_8xb16-300e_coco_assignervisualization.py' \
'or yolov7_tiny_syncbn_fast_8x16b-300e_coco_assignervisualization.py' \
'or yolov8_s_syncbn_fast_8xb16-500e_coco_assignervisualization.py' \
'or rtmdet_s_syncbn_fast_8xb32-300e_coco_assignervisualization.py' \
"""` as config file."""
model.eval()
model.to(args.device)
# build dataset
dataset_cfg = cfg.get('train_dataloader').get('dataset')
dataset = DATASETS.build(dataset_cfg)
# get collate_fn
collate_fn_cfg = cfg.get('train_dataloader').pop(
'collate_fn', dict(type='pseudo_collate'))
collate_fn_type = collate_fn_cfg.pop('type')
collate_fn = COLLATE_FUNCTIONS.get(collate_fn_type)
# init visualizer
visualizer = YOLOAssignerVisualizer(
vis_backends=[{
'type': 'LocalVisBackend'
}], name='visualizer')
visualizer.dataset_meta = dataset.metainfo
# need priors size to draw priors
if hasattr(model.bbox_head.prior_generator, 'base_anchors'):
visualizer.priors_size = model.bbox_head.prior_generator.base_anchors
# make output dir
os.makedirs(args.output_dir, exist_ok=True)
print('Results will save to ', args.output_dir)
# init visualization image number
assert args.show_number > 0
display_number = min(args.show_number, len(dataset))
progress_bar = ProgressBar(display_number)
for ind_img in range(display_number):
data = dataset.prepare_data(ind_img)
if data is None:
print('Unable to visualize {} due to strong data augmentations'.
format(dataset[ind_img]['data_samples'].img_path))
continue
# convert data to batch format
batch_data = collate_fn([data])
with torch.no_grad():
assign_results = model.assign(batch_data)
img = data['inputs'].cpu().numpy().astype(np.uint8).transpose(
(1, 2, 0))
# bgr2rgb
img = mmcv.bgr2rgb(img)
gt_instances = data['data_samples'].gt_instances
img_show = visualizer.draw_assign(img, assign_results, gt_instances,
args.show_prior, args.not_show_label)
if hasattr(data['data_samples'], 'img_path'):
filename = osp.basename(data['data_samples'].img_path)
else:
# some dataset have not image path
filename = f'{ind_img}.jpg'
out_file = osp.join(args.output_dir, filename)
# convert rgb 2 bgr and save img
mmcv.imwrite(mmcv.rgb2bgr(img_show), out_file)
progress_bar.update()
if __name__ == '__main__':
main()
| 6,558 | 35.848315 | 79 | py |
mmyolo | mmyolo-main/projects/assigner_visualization/visualization/assigner_visualizer.py | # Copyright (c) OpenMMLab. All rights reserved.
import math
from typing import List, Union
import mmcv
import numpy as np
import torch
from mmdet.structures.bbox import HorizontalBoxes
from mmdet.visualization import DetLocalVisualizer
from mmdet.visualization.palette import _get_adaptive_scales, get_palette
from mmengine.structures import InstanceData
from torch import Tensor
from mmyolo.registry import VISUALIZERS
@VISUALIZERS.register_module()
class YOLOAssignerVisualizer(DetLocalVisualizer):
"""MMYOLO Detection Assigner Visualizer.
This class is provided to the `assigner_visualization.py` script.
Args:
name (str): Name of the instance. Defaults to 'visualizer'.
"""
def __init__(self, name: str = 'visualizer', *args, **kwargs):
super().__init__(name=name, *args, **kwargs)
# need priors_size from config
self.priors_size = None
def draw_grid(self,
stride: int = 8,
line_styles: Union[str, List[str]] = ':',
colors: Union[str, tuple, List[str],
List[tuple]] = (180, 180, 180),
line_widths: Union[Union[int, float],
List[Union[int, float]]] = 1):
"""Draw grids on image.
Args:
stride (int): Downsample factor of feature map.
line_styles (Union[str, List[str]]): The linestyle
of lines. ``line_styles`` can have the same length with
texts or just single value. If ``line_styles`` is single
value, all the lines will have the same linestyle.
Reference to
https://matplotlib.org/stable/api/collections_api.html?highlight=collection#matplotlib.collections.AsteriskPolygonCollection.set_linestyle
for more details. Defaults to ':'.
colors (Union[str, tuple, List[str], List[tuple]]): The colors of
lines. ``colors`` can have the same length with lines or just
single value. If ``colors`` is single value, all the lines
will have the same colors. Reference to
https://matplotlib.org/stable/gallery/color/named_colors.html
for more details. Defaults to (180, 180, 180).
line_widths (Union[Union[int, float], List[Union[int, float]]]):
The linewidth of lines. ``line_widths`` can have
the same length with lines or just single value.
If ``line_widths`` is single value, all the lines will
have the same linewidth. Defaults to 1.
"""
assert self._image is not None, 'Please set image using `set_image`'
# draw vertical lines
x_datas_vertical = ((np.arange(self.width // stride - 1) + 1) *
stride).reshape((-1, 1)).repeat(
2, axis=1)
y_datas_vertical = np.array([[0, self.height - 1]]).repeat(
self.width // stride - 1, axis=0)
self.draw_lines(
x_datas_vertical,
y_datas_vertical,
colors=colors,
line_styles=line_styles,
line_widths=line_widths)
# draw horizontal lines
x_datas_horizontal = np.array([[0, self.width - 1]]).repeat(
self.height // stride - 1, axis=0)
y_datas_horizontal = ((np.arange(self.height // stride - 1) + 1) *
stride).reshape((-1, 1)).repeat(
2, axis=1)
self.draw_lines(
x_datas_horizontal,
y_datas_horizontal,
colors=colors,
line_styles=line_styles,
line_widths=line_widths)
def draw_instances_assign(self,
instances: InstanceData,
retained_gt_inds: Tensor,
not_show_label: bool = False):
"""Draw instances of GT.
Args:
instances (:obj:`InstanceData`): gt_instance. It usually
includes ``bboxes`` and ``labels`` attributes.
retained_gt_inds (Tensor): The gt indexes assigned as the
positive sample in the current prior.
not_show_label (bool): Whether to show gt labels on images.
"""
assert self.dataset_meta is not None
classes = self.dataset_meta['classes']
palette = self.dataset_meta['palette']
if len(retained_gt_inds) == 0:
return self.get_image()
draw_gt_inds = torch.from_numpy(
np.array(
list(set(retained_gt_inds.cpu().numpy())), dtype=np.int64))
bboxes = instances.bboxes[draw_gt_inds]
labels = instances.labels[draw_gt_inds]
if not isinstance(bboxes, Tensor):
bboxes = bboxes.tensor
edge_colors = [palette[i] for i in labels]
max_label = int(max(labels) if len(labels) > 0 else 0)
text_palette = get_palette(self.text_color, max_label + 1)
text_colors = [text_palette[label] for label in labels]
self.draw_bboxes(
bboxes,
edge_colors=edge_colors,
alpha=self.alpha,
line_widths=self.line_width)
if not not_show_label:
positions = bboxes[:, :2] + self.line_width
areas = (bboxes[:, 3] - bboxes[:, 1]) * (
bboxes[:, 2] - bboxes[:, 0])
scales = _get_adaptive_scales(areas)
for i, (pos, label) in enumerate(zip(positions, labels)):
label_text = classes[
label] if classes is not None else f'class {label}'
self.draw_texts(
label_text,
pos,
colors=text_colors[i],
font_sizes=int(13 * scales[i]),
bboxes=[{
'facecolor': 'black',
'alpha': 0.8,
'pad': 0.7,
'edgecolor': 'none'
}])
def draw_positive_assign(self,
grid_x_inds: Tensor,
grid_y_inds: Tensor,
class_inds: Tensor,
stride: int,
bboxes: Union[Tensor, HorizontalBoxes],
retained_gt_inds: Tensor,
offset: float = 0.5):
"""
Args:
grid_x_inds (Tensor): The X-axis indexes of the positive sample
in current prior.
grid_y_inds (Tensor): The Y-axis indexes of the positive sample
in current prior.
class_inds (Tensor): The classes indexes of the positive sample
in current prior.
stride (int): Downsample factor of feature map.
bboxes (Union[Tensor, HorizontalBoxes]): Bounding boxes of GT.
retained_gt_inds (Tensor): The gt indexes assigned as the
positive sample in the current prior.
offset (float): The offset of points, the value is normalized
with corresponding stride. Defaults to 0.5.
"""
if not isinstance(bboxes, Tensor):
# Convert HorizontalBoxes to Tensor
bboxes = bboxes.tensor
# The PALETTE in the dataset_meta is required
assert self.dataset_meta is not None
palette = self.dataset_meta['palette']
x = ((grid_x_inds + offset) * stride).long()
y = ((grid_y_inds + offset) * stride).long()
center = torch.stack((x, y), dim=-1)
retained_bboxes = bboxes[retained_gt_inds]
bbox_wh = retained_bboxes[:, 2:] - retained_bboxes[:, :2]
bbox_area = bbox_wh[:, 0] * bbox_wh[:, 1]
radius = _get_adaptive_scales(bbox_area) * 4
colors = [palette[i] for i in class_inds]
self.draw_circles(
center,
radius,
colors,
line_widths=0,
face_colors=colors,
alpha=1.0)
def draw_prior(self,
grid_x_inds: Tensor,
grid_y_inds: Tensor,
class_inds: Tensor,
stride: int,
feat_ind: int,
prior_ind: int,
offset: float = 0.5):
"""Draw priors on image.
Args:
grid_x_inds (Tensor): The X-axis indexes of the positive sample
in current prior.
grid_y_inds (Tensor): The Y-axis indexes of the positive sample
in current prior.
class_inds (Tensor): The classes indexes of the positive sample
in current prior.
stride (int): Downsample factor of feature map.
feat_ind (int): Index of featmap.
prior_ind (int): Index of prior in current featmap.
offset (float): The offset of points, the value is normalized
with corresponding stride. Defaults to 0.5.
"""
palette = self.dataset_meta['palette']
center_x = ((grid_x_inds + offset) * stride)
center_y = ((grid_y_inds + offset) * stride)
xyxy = torch.stack((center_x, center_y, center_x, center_y), dim=1)
device = xyxy.device
if self.priors_size is not None:
xyxy += self.priors_size[feat_ind][prior_ind].to(device)
else:
xyxy += torch.tensor(
[[-stride / 2, -stride / 2, stride / 2, stride / 2]],
device=device)
colors = [palette[i] for i in class_inds]
self.draw_bboxes(
xyxy,
edge_colors=colors,
alpha=self.alpha,
line_styles='--',
line_widths=math.ceil(self.line_width * 0.3))
def draw_assign(self,
image: np.ndarray,
assign_results: List[List[dict]],
gt_instances: InstanceData,
show_prior: bool = False,
not_show_label: bool = False) -> np.ndarray:
"""Draw assigning results.
Args:
image (np.ndarray): The image to draw.
assign_results (list): The assigning results.
gt_instances (:obj:`InstanceData`): Data structure for
instance-level annotations or predictions.
show_prior (bool): Whether to show prior on image.
not_show_label (bool): Whether to show gt labels on images.
Returns:
np.ndarray: the drawn image which channel is RGB.
"""
img_show_list = []
for feat_ind, assign_results_feat in enumerate(assign_results):
img_show_list_feat = []
for prior_ind, assign_results_prior in enumerate(
assign_results_feat):
self.set_image(image)
h, w = image.shape[:2]
# draw grid
stride = assign_results_prior['stride']
self.draw_grid(stride)
# draw prior on matched gt
grid_x_inds = assign_results_prior['grid_x_inds']
grid_y_inds = assign_results_prior['grid_y_inds']
class_inds = assign_results_prior['class_inds']
prior_ind = assign_results_prior['prior_ind']
offset = assign_results_prior.get('offset', 0.5)
if show_prior:
self.draw_prior(grid_x_inds, grid_y_inds, class_inds,
stride, feat_ind, prior_ind, offset)
# draw matched gt
retained_gt_inds = assign_results_prior['retained_gt_inds']
self.draw_instances_assign(gt_instances, retained_gt_inds,
not_show_label)
# draw positive
self.draw_positive_assign(grid_x_inds, grid_y_inds, class_inds,
stride, gt_instances.bboxes,
retained_gt_inds, offset)
# draw title
if self.priors_size is not None:
base_prior = self.priors_size[feat_ind][prior_ind]
else:
base_prior = [stride, stride, stride * 2, stride * 2]
prior_size = (base_prior[2] - base_prior[0],
base_prior[3] - base_prior[1])
pos = np.array((20, 20))
text = f'feat_ind: {feat_ind} ' \
f'prior_ind: {prior_ind} ' \
f'prior_size: ({prior_size[0]}, {prior_size[1]})'
scales = _get_adaptive_scales(np.array([h * w / 16]))
font_sizes = int(13 * scales)
self.draw_texts(
text,
pos,
colors=self.text_color,
font_sizes=font_sizes,
bboxes=[{
'facecolor': 'black',
'alpha': 0.8,
'pad': 0.7,
'edgecolor': 'none'
}])
img_show = self.get_image()
img_show = mmcv.impad(img_show, padding=(5, 5, 5, 5))
img_show_list_feat.append(img_show)
img_show_list.append(np.concatenate(img_show_list_feat, axis=1))
# Merge all images into one image
# setting axis is to beautify the merged image
axis = 0 if len(assign_results[0]) > 1 else 1
return np.concatenate(img_show_list, axis=axis)
| 13,691 | 40.87156 | 154 | py |
mmyolo | mmyolo-main/projects/assigner_visualization/dense_heads/yolov5_head_assigner.py | # Copyright (c) OpenMMLab. All rights reserved.
from typing import Sequence, Union
import torch
from mmdet.models.utils import unpack_gt_instances
from mmengine.structures import InstanceData
from torch import Tensor
from mmyolo.models import YOLOv5Head
from mmyolo.registry import MODELS
@MODELS.register_module()
class YOLOv5HeadAssigner(YOLOv5Head):
def assign_by_gt_and_feat(
self,
batch_gt_instances: Sequence[InstanceData],
batch_img_metas: Sequence[dict],
inputs_hw: Union[Tensor, tuple] = (640, 640)
) -> dict:
"""Calculate the assigning results based on the gt and features
extracted by the detection head.
Args:
batch_gt_instances (Sequence[InstanceData]): Batch of
gt_instance. It usually includes ``bboxes`` and ``labels``
attributes.
batch_img_metas (Sequence[dict]): Meta information of each image,
e.g., image size, scaling factor, etc.
batch_gt_instances_ignore (list[:obj:`InstanceData`], optional):
Batch of gt_instances_ignore. It includes ``bboxes`` attribute
data that is ignored during training and testing.
Defaults to None.
inputs_hw (Union[Tensor, tuple]): Height and width of inputs size.
Returns:
dict[str, Tensor]: A dictionary of assigning results.
"""
# 1. Convert gt to norm format
batch_targets_normed = self._convert_gt_to_norm_format(
batch_gt_instances, batch_img_metas)
device = batch_targets_normed.device
scaled_factor = torch.ones(7, device=device)
gt_inds = torch.arange(
batch_targets_normed.shape[1],
dtype=torch.long,
device=device,
requires_grad=False).unsqueeze(0).repeat((self.num_base_priors, 1))
assign_results = []
for i in range(self.num_levels):
assign_results_feat = []
h = inputs_hw[0] // self.featmap_strides[i]
w = inputs_hw[1] // self.featmap_strides[i]
# empty gt bboxes
if batch_targets_normed.shape[1] == 0:
for k in range(self.num_base_priors):
assign_results_feat.append({
'stride':
self.featmap_strides[i],
'grid_x_inds':
torch.zeros([0], dtype=torch.int64).to(device),
'grid_y_inds':
torch.zeros([0], dtype=torch.int64).to(device),
'img_inds':
torch.zeros([0], dtype=torch.int64).to(device),
'class_inds':
torch.zeros([0], dtype=torch.int64).to(device),
'retained_gt_inds':
torch.zeros([0], dtype=torch.int64).to(device),
'prior_ind':
k
})
assign_results.append(assign_results_feat)
continue
priors_base_sizes_i = self.priors_base_sizes[i]
# feature map scale whwh
scaled_factor[2:6] = torch.tensor([w, h, w, h])
# Scale batch_targets from range 0-1 to range 0-features_maps size.
# (num_base_priors, num_bboxes, 7)
batch_targets_scaled = batch_targets_normed * scaled_factor
# 2. Shape match
wh_ratio = batch_targets_scaled[...,
4:6] / priors_base_sizes_i[:, None]
match_inds = torch.max(
wh_ratio, 1 / wh_ratio).max(2)[0] < self.prior_match_thr
batch_targets_scaled = batch_targets_scaled[match_inds]
match_gt_inds = gt_inds[match_inds]
# no gt bbox matches anchor
if batch_targets_scaled.shape[0] == 0:
for k in range(self.num_base_priors):
assign_results_feat.append({
'stride':
self.featmap_strides[i],
'grid_x_inds':
torch.zeros([0], dtype=torch.int64).to(device),
'grid_y_inds':
torch.zeros([0], dtype=torch.int64).to(device),
'img_inds':
torch.zeros([0], dtype=torch.int64).to(device),
'class_inds':
torch.zeros([0], dtype=torch.int64).to(device),
'retained_gt_inds':
torch.zeros([0], dtype=torch.int64).to(device),
'prior_ind':
k
})
assign_results.append(assign_results_feat)
continue
# 3. Positive samples with additional neighbors
# check the left, up, right, bottom sides of the
# targets grid, and determine whether assigned
# them as positive samples as well.
batch_targets_cxcy = batch_targets_scaled[:, 2:4]
grid_xy = scaled_factor[[2, 3]] - batch_targets_cxcy
left, up = ((batch_targets_cxcy % 1 < self.near_neighbor_thr) &
(batch_targets_cxcy > 1)).T
right, bottom = ((grid_xy % 1 < self.near_neighbor_thr) &
(grid_xy > 1)).T
offset_inds = torch.stack(
(torch.ones_like(left), left, up, right, bottom))
batch_targets_scaled = batch_targets_scaled.repeat(
(5, 1, 1))[offset_inds]
retained_gt_inds = match_gt_inds.repeat((5, 1))[offset_inds]
retained_offsets = self.grid_offset.repeat(1, offset_inds.shape[1],
1)[offset_inds]
# prepare pred results and positive sample indexes to
# calculate class loss and bbox lo
_chunk_targets = batch_targets_scaled.chunk(4, 1)
img_class_inds, grid_xy, grid_wh, priors_inds = _chunk_targets
priors_inds, (img_inds, class_inds) = priors_inds.long().view(
-1), img_class_inds.long().T
grid_xy_long = (grid_xy -
retained_offsets * self.near_neighbor_thr).long()
grid_x_inds, grid_y_inds = grid_xy_long.T
for k in range(self.num_base_priors):
retained_inds = priors_inds == k
assign_results_prior = {
'stride': self.featmap_strides[i],
'grid_x_inds': grid_x_inds[retained_inds],
'grid_y_inds': grid_y_inds[retained_inds],
'img_inds': img_inds[retained_inds],
'class_inds': class_inds[retained_inds],
'retained_gt_inds': retained_gt_inds[retained_inds],
'prior_ind': k
}
assign_results_feat.append(assign_results_prior)
assign_results.append(assign_results_feat)
return assign_results
def assign(self, batch_data_samples: Union[list, dict],
inputs_hw: Union[tuple, torch.Size]) -> dict:
"""Calculate assigning results. This function is provided to the
`assigner_visualization.py` script.
Args:
batch_data_samples (List[:obj:`DetDataSample`], dict): The Data
Samples. It usually includes information such as
`gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`.
inputs_hw: Height and width of inputs size
Returns:
dict: A dictionary of assigning components.
"""
if isinstance(batch_data_samples, list):
outputs = unpack_gt_instances(batch_data_samples)
(batch_gt_instances, batch_gt_instances_ignore,
batch_img_metas) = outputs
assign_inputs = (batch_gt_instances, batch_img_metas,
batch_gt_instances_ignore, inputs_hw)
else:
# Fast version
assign_inputs = (batch_data_samples['bboxes_labels'],
batch_data_samples['img_metas'], inputs_hw)
assign_results = self.assign_by_gt_and_feat(*assign_inputs)
return assign_results
| 8,364 | 43.259259 | 79 | py |
mmyolo | mmyolo-main/projects/assigner_visualization/dense_heads/yolov8_head_assigner.py | # Copyright (c) OpenMMLab. All rights reserved.
from typing import List, Union
import torch
from mmdet.utils import InstanceList
from torch import Tensor
from mmyolo.models import YOLOv8Head
from mmyolo.models.utils import gt_instances_preprocess
from mmyolo.registry import MODELS
@MODELS.register_module()
class YOLOv8HeadAssigner(YOLOv8Head):
def assign_by_gt_and_feat(
self,
cls_scores: List[Tensor],
bbox_preds: List[Tensor],
batch_gt_instances: InstanceList,
batch_img_metas: List[dict],
inputs_hw: Union[Tensor, tuple] = (640, 640)
) -> dict:
"""Calculate the assigning results based on the gt and features
extracted by the detection head.
Args:
cls_scores (Sequence[Tensor]): Box scores for each scale level,
each is a 4D-tensor, the channel number is
num_priors * num_classes.
bbox_preds (Sequence[Tensor]): Box energies / deltas for each scale
level, each is a 4D-tensor, the channel number is
num_priors * 4.
bbox_dist_preds (Sequence[Tensor]): Box distribution logits for
each scale level with shape (bs, reg_max + 1, H*W, 4).
batch_gt_instances (list[:obj:`InstanceData`]): Batch of
gt_instance. It usually includes ``bboxes`` and ``labels``
attributes.
batch_img_metas (list[dict]): Meta information of each image, e.g.,
image size, scaling factor, etc.
inputs_hw (Union[Tensor, tuple]): Height and width of inputs size.
Returns:
dict[str, Tensor]: A dictionary of assigning results.
"""
num_imgs = len(batch_img_metas)
device = cls_scores[0].device
current_featmap_sizes = [
cls_score.shape[2:] for cls_score in cls_scores
]
# If the shape does not equal, generate new one
if current_featmap_sizes != self.featmap_sizes_train:
self.featmap_sizes_train = current_featmap_sizes
mlvl_priors_with_stride = self.prior_generator.grid_priors(
self.featmap_sizes_train,
dtype=cls_scores[0].dtype,
device=device,
with_stride=True)
self.num_level_priors = [len(n) for n in mlvl_priors_with_stride]
self.flatten_priors_train = torch.cat(
mlvl_priors_with_stride, dim=0)
self.stride_tensor = self.flatten_priors_train[..., [2]]
# gt info
gt_info = gt_instances_preprocess(batch_gt_instances, num_imgs)
gt_labels = gt_info[:, :, :1]
gt_bboxes = gt_info[:, :, 1:] # xyxy
pad_bbox_flag = (gt_bboxes.sum(-1, keepdim=True) > 0).float()
# pred info
flatten_cls_preds = [
cls_pred.permute(0, 2, 3, 1).reshape(num_imgs, -1,
self.num_classes)
for cls_pred in cls_scores
]
flatten_pred_bboxes = [
bbox_pred.permute(0, 2, 3, 1).reshape(num_imgs, -1, 4)
for bbox_pred in bbox_preds
]
# (bs, n, 4 * reg_max)
flatten_cls_preds = torch.cat(flatten_cls_preds, dim=1)
flatten_pred_bboxes = torch.cat(flatten_pred_bboxes, dim=1)
flatten_pred_bboxes = self.bbox_coder.decode(
self.flatten_priors_train[..., :2], flatten_pred_bboxes,
self.stride_tensor[..., 0])
assigned_result = self.assigner(
(flatten_pred_bboxes.detach()).type(gt_bboxes.dtype),
flatten_cls_preds.detach().sigmoid(), self.flatten_priors_train,
gt_labels, gt_bboxes, pad_bbox_flag)
labels = assigned_result['assigned_labels'].reshape(-1)
bbox_targets = assigned_result['assigned_bboxes'].reshape(-1, 4)
fg_mask_pre_prior = assigned_result['fg_mask_pre_prior'].squeeze(0)
pos_inds = fg_mask_pre_prior.nonzero().squeeze(1)
targets = bbox_targets[pos_inds]
gt_bboxes = gt_bboxes.squeeze(0)
matched_gt_inds = torch.tensor(
[((t == gt_bboxes).sum(dim=1) == t.shape[0]).nonzero()[0]
for t in targets],
device=device)
level_inds = torch.zeros_like(labels)
img_inds = torch.zeros_like(labels)
level_nums = [0] + self.num_level_priors
for i in range(len(level_nums) - 1):
level_nums[i + 1] = level_nums[i] + level_nums[i + 1]
level_inds[level_nums[i]:level_nums[i + 1]] = i
level_inds_pos = level_inds[pos_inds]
img_inds = img_inds[pos_inds]
labels = labels[pos_inds]
assign_results = []
for i in range(self.num_levels):
retained_inds = level_inds_pos == i
if not retained_inds.any():
assign_results_prior = {
'stride':
self.featmap_strides[i],
'grid_x_inds':
torch.zeros([0], dtype=torch.int64).to(device),
'grid_y_inds':
torch.zeros([0], dtype=torch.int64).to(device),
'img_inds':
torch.zeros([0], dtype=torch.int64).to(device),
'class_inds':
torch.zeros([0], dtype=torch.int64).to(device),
'retained_gt_inds':
torch.zeros([0], dtype=torch.int64).to(device),
'prior_ind':
0
}
else:
w = inputs_hw[1] // self.featmap_strides[i]
retained_pos_inds = pos_inds[retained_inds] - level_nums[i]
grid_y_inds = retained_pos_inds // w
grid_x_inds = retained_pos_inds - retained_pos_inds // w * w
assign_results_prior = {
'stride': self.featmap_strides[i],
'grid_x_inds': grid_x_inds,
'grid_y_inds': grid_y_inds,
'img_inds': img_inds[retained_inds],
'class_inds': labels[retained_inds],
'retained_gt_inds': matched_gt_inds[retained_inds],
'prior_ind': 0
}
assign_results.append([assign_results_prior])
return assign_results
def assign(self, batch_data_samples: Union[list, dict],
inputs_hw: Union[tuple, torch.Size]) -> dict:
"""Calculate assigning results.
This function is provided to the
`assigner_visualization.py` script.
Args:
batch_data_samples (List[:obj:`DetDataSample`], dict): The Data
Samples. It usually includes information such as
`gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`.
inputs_hw: Height and width of inputs size
Returns:
dict: A dictionary of assigning components.
"""
if isinstance(batch_data_samples, list):
raise NotImplementedError(
'assigning results_list is not implemented')
else:
# Fast version
cls_scores, bbox_preds = self(batch_data_samples['feats'])
assign_inputs = (cls_scores, bbox_preds,
batch_data_samples['bboxes_labels'],
batch_data_samples['img_metas'], inputs_hw)
assign_results = self.assign_by_gt_and_feat(*assign_inputs)
return assign_results
| 7,515 | 40.524862 | 79 | py |
mmyolo | mmyolo-main/projects/assigner_visualization/dense_heads/yolov7_head_assigner.py | # Copyright (c) OpenMMLab. All rights reserved.
from typing import List, Union
import torch
from mmdet.utils import InstanceList
from torch import Tensor
from mmyolo.models import YOLOv7Head
from mmyolo.registry import MODELS
@MODELS.register_module()
class YOLOv7HeadAssigner(YOLOv7Head):
def assign_by_gt_and_feat(
self,
cls_scores: List[Tensor],
bbox_preds: List[Tensor],
objectnesses: List[Tensor],
batch_gt_instances: InstanceList,
batch_img_metas: List[dict],
inputs_hw: Union[Tensor, tuple],
) -> dict:
"""Calculate the assigning results based on the gt and features
extracted by the detection head.
Args:
cls_scores (Sequence[Tensor]): Box scores for each scale level,
each is a 4D-tensor, the channel number is
num_priors * num_classes.
bbox_preds (Sequence[Tensor]): Box energies / deltas for each scale
level, each is a 4D-tensor, the channel number is
num_priors * 4.
objectnesses (Sequence[Tensor]): Score factor for
all scale level, each is a 4D-tensor, has shape
(batch_size, 1, H, W)
batch_gt_instances (list[:obj:`InstanceData`]): Batch of
gt_instance. It usually includes ``bboxes`` and ``labels``
attributes.
batch_img_metas (list[dict]): Meta information of each image, e.g.,
image size, scaling factor, etc.
inputs_hw (Union[Tensor, tuple]): Height and width of inputs size.
Returns:
dict[str, Tensor]: A dictionary of assigning results.
"""
device = cls_scores[0][0].device
head_preds = self._merge_predict_results(bbox_preds, objectnesses,
cls_scores)
batch_targets_normed = self._convert_gt_to_norm_format(
batch_gt_instances, batch_img_metas)
# yolov5_assign and simota_assign
assigner_results = self.assigner(
head_preds,
batch_targets_normed,
batch_img_metas[0]['batch_input_shape'],
self.priors_base_sizes,
self.grid_offset,
near_neighbor_thr=self.near_neighbor_thr)
# multi-level positive sample position.
mlvl_positive_infos = assigner_results['mlvl_positive_infos']
# assigned results with label and bboxes information.
mlvl_targets_normed = assigner_results['mlvl_targets_normed']
assign_results = []
for i in range(self.num_levels):
assign_results_feat = []
# no gt bbox matches anchor
if mlvl_positive_infos[i].shape[0] == 0:
for k in range(self.num_base_priors):
assign_results_feat.append({
'stride':
self.featmap_strides[i],
'grid_x_inds':
torch.zeros([0], dtype=torch.int64).to(device),
'grid_y_inds':
torch.zeros([0], dtype=torch.int64).to(device),
'img_inds':
torch.zeros([0], dtype=torch.int64).to(device),
'class_inds':
torch.zeros([0], dtype=torch.int64).to(device),
'retained_gt_inds':
torch.zeros([0], dtype=torch.int64).to(device),
'prior_ind':
k
})
assign_results.append(assign_results_feat)
continue
# (batch_idx, prior_idx, x_scaled, y_scaled)
positive_info = mlvl_positive_infos[i]
targets_normed = mlvl_targets_normed[i]
priors_inds = positive_info[:, 1]
grid_x_inds = positive_info[:, 2]
grid_y_inds = positive_info[:, 3]
img_inds = targets_normed[:, 0]
class_inds = targets_normed[:, 1].long()
retained_gt_inds = self.get_gt_inds(
targets_normed, batch_targets_normed[0]).long()
for k in range(self.num_base_priors):
retained_inds = priors_inds == k
assign_results_prior = {
'stride': self.featmap_strides[i],
'grid_x_inds': grid_x_inds[retained_inds],
'grid_y_inds': grid_y_inds[retained_inds],
'img_inds': img_inds[retained_inds],
'class_inds': class_inds[retained_inds],
'retained_gt_inds': retained_gt_inds[retained_inds],
'prior_ind': k
}
assign_results_feat.append(assign_results_prior)
assign_results.append(assign_results_feat)
return assign_results
def get_gt_inds(self, assigned_target, gt_instance):
"""Judging which one gt_ind is assigned by comparing assign_target and
origin target.
Args:
assigned_target (Tensor(assign_nums,7)): YOLOv7 assigning results.
gt_instance (Tensor(gt_nums,7)): Normalized gt_instance, It
usually includes ``bboxes`` and ``labels`` attributes.
Returns:
gt_inds (Tensor): the index which one gt is assigned.
"""
gt_inds = torch.zeros(assigned_target.shape[0])
for i in range(assigned_target.shape[0]):
gt_inds[i] = ((assigned_target[i] == gt_instance).sum(
dim=1) == 7).nonzero().squeeze()
return gt_inds
def assign(self, batch_data_samples: Union[list, dict],
inputs_hw: Union[tuple, torch.Size]) -> dict:
"""Calculate assigning results.
This function is provided to the
`assigner_visualization.py` script.
Args:
batch_data_samples (List[:obj:`DetDataSample`], dict): The Data
Samples. It usually includes information such as
`gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`.
inputs_hw: Height and width of inputs size
Returns:
dict: A dictionary of assigning components.
"""
if isinstance(batch_data_samples, list):
raise NotImplementedError(
'assigning results_list is not implemented')
else:
# Fast version
cls_scores, bbox_preds, objectnesses = self(
batch_data_samples['feats'])
assign_inputs = (cls_scores, bbox_preds, objectnesses,
batch_data_samples['bboxes_labels'],
batch_data_samples['img_metas'], inputs_hw)
assign_results = self.assign_by_gt_and_feat(*assign_inputs)
return assign_results
| 6,836 | 41.73125 | 79 | py |
mmyolo | mmyolo-main/projects/assigner_visualization/dense_heads/rtmdet_head_assigner.py | # Copyright (c) OpenMMLab. All rights reserved.
from typing import List, Union
import torch
from mmdet.structures.bbox import distance2bbox
from mmdet.utils import InstanceList
from torch import Tensor
from mmyolo.models import RTMDetHead
from mmyolo.models.utils import gt_instances_preprocess
from mmyolo.registry import MODELS
@MODELS.register_module()
class RTMHeadAssigner(RTMDetHead):
def assign_by_gt_and_feat(
self,
cls_scores: List[Tensor],
bbox_preds: List[Tensor],
batch_gt_instances: InstanceList,
batch_img_metas: List[dict],
inputs_hw: Union[Tensor, tuple] = (640, 640)
) -> dict:
"""Calculate the assigning results based on the gt and features
extracted by the detection head.
Args:
cls_scores (list[Tensor]): Box scores for each scale level
Has shape (N, num_anchors * num_classes, H, W)
bbox_preds (list[Tensor]): Decoded box for each scale
level with shape (N, num_anchors * 4, H, W) in
[tl_x, tl_y, br_x, br_y] format.
batch_gt_instances (list[:obj:`InstanceData`]): Batch of
gt_instance. It usually includes ``bboxes`` and ``labels``
attributes.
batch_img_metas (list[dict]): Meta information of each image, e.g.,
image size, scaling factor, etc.
inputs_hw (Union[Tensor, tuple]): Height and width of inputs size.
Returns:
dict[str, Tensor]: A dictionary of assigning results.
"""
num_imgs = len(batch_img_metas)
featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
assert len(featmap_sizes) == self.prior_generator.num_levels
# rtmdet's prior offset differs from others
prior_offset = self.prior_generator.offset
gt_info = gt_instances_preprocess(batch_gt_instances, num_imgs)
gt_labels = gt_info[:, :, :1]
gt_bboxes = gt_info[:, :, 1:] # xyxy
pad_bbox_flag = (gt_bboxes.sum(-1, keepdim=True) > 0).float()
device = cls_scores[0].device
# If the shape does not equal, generate new one
if featmap_sizes != self.featmap_sizes_train:
self.featmap_sizes_train = featmap_sizes
mlvl_priors_with_stride = self.prior_generator.grid_priors(
featmap_sizes, device=device, with_stride=True)
self.flatten_priors_train = torch.cat(
mlvl_priors_with_stride, dim=0)
flatten_cls_scores = torch.cat([
cls_score.permute(0, 2, 3, 1).reshape(num_imgs, -1,
self.cls_out_channels)
for cls_score in cls_scores
], 1).contiguous()
flatten_bboxes = torch.cat([
bbox_pred.permute(0, 2, 3, 1).reshape(num_imgs, -1, 4)
for bbox_pred in bbox_preds
], 1)
flatten_bboxes = flatten_bboxes * self.flatten_priors_train[..., -1,
None]
flatten_bboxes = distance2bbox(self.flatten_priors_train[..., :2],
flatten_bboxes)
assigned_result = self.assigner(flatten_bboxes.detach(),
flatten_cls_scores.detach(),
self.flatten_priors_train, gt_labels,
gt_bboxes, pad_bbox_flag)
labels = assigned_result['assigned_labels'].reshape(-1)
bbox_targets = assigned_result['assigned_bboxes'].reshape(-1, 4)
# FG cat_id: [0, num_classes -1], BG cat_id: num_classes
bg_class_ind = self.num_classes
pos_inds = ((labels >= 0)
& (labels < bg_class_ind)).nonzero().squeeze(1)
targets = bbox_targets[pos_inds]
gt_bboxes = gt_bboxes.squeeze(0)
matched_gt_inds = torch.tensor(
[((t == gt_bboxes).sum(dim=1) == t.shape[0]).nonzero()[0]
for t in targets],
device=device)
level_inds = torch.zeros_like(labels)
img_inds = torch.zeros_like(labels)
level_nums = [0] + [f[0] * f[1] for f in featmap_sizes]
for i in range(len(level_nums) - 1):
level_nums[i + 1] = level_nums[i] + level_nums[i + 1]
level_inds[level_nums[i]:level_nums[i + 1]] = i
level_inds_pos = level_inds[pos_inds]
img_inds = img_inds[pos_inds]
labels = labels[pos_inds]
inputs_hw = batch_img_metas[0]['batch_input_shape']
assign_results = []
for i in range(self.num_levels):
retained_inds = level_inds_pos == i
if not retained_inds.any():
assign_results_prior = {
'stride':
self.featmap_strides[i],
'grid_x_inds':
torch.zeros([0], dtype=torch.int64).to(device),
'grid_y_inds':
torch.zeros([0], dtype=torch.int64).to(device),
'img_inds':
torch.zeros([0], dtype=torch.int64).to(device),
'class_inds':
torch.zeros([0], dtype=torch.int64).to(device),
'retained_gt_inds':
torch.zeros([0], dtype=torch.int64).to(device),
'prior_ind':
0,
'offset':
prior_offset
}
else:
w = inputs_hw[1] // self.featmap_strides[i]
retained_pos_inds = pos_inds[retained_inds] - level_nums[i]
grid_y_inds = retained_pos_inds // w
grid_x_inds = retained_pos_inds - retained_pos_inds // w * w
assign_results_prior = {
'stride': self.featmap_strides[i],
'grid_x_inds': grid_x_inds,
'grid_y_inds': grid_y_inds,
'img_inds': img_inds[retained_inds],
'class_inds': labels[retained_inds],
'retained_gt_inds': matched_gt_inds[retained_inds],
'prior_ind': 0,
'offset': prior_offset
}
assign_results.append([assign_results_prior])
return assign_results
def assign(self, batch_data_samples: Union[list, dict],
inputs_hw: Union[tuple, torch.Size]) -> dict:
"""Calculate assigning results. This function is provided to the
`assigner_visualization.py` script.
Args:
batch_data_samples (List[:obj:`DetDataSample`], dict): The Data
Samples. It usually includes information such as
`gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`.
inputs_hw: Height and width of inputs size
Returns:
dict: A dictionary of assigning components.
"""
if isinstance(batch_data_samples, list):
raise NotImplementedError(
'assigning results_list is not implemented')
else:
# Fast version
cls_scores, bbox_preds = self(batch_data_samples['feats'])
assign_inputs = (cls_scores, bbox_preds,
batch_data_samples['bboxes_labels'],
batch_data_samples['img_metas'], inputs_hw)
assign_results = self.assign_by_gt_and_feat(*assign_inputs)
return assign_results
| 7,517 | 41.715909 | 79 | py |
mmyolo | mmyolo-main/projects/easydeploy/backbone/common.py | import torch
import torch.nn as nn
from torch import Tensor
class DeployC2f(nn.Module):
def __init__(self, *args, **kwargs):
super().__init__()
def forward(self, x: Tensor) -> Tensor:
x_main = self.main_conv(x)
x_main = [x_main, x_main[:, self.mid_channels:, ...]]
x_main.extend(blocks(x_main[-1]) for blocks in self.blocks)
x_main.pop(1)
return self.final_conv(torch.cat(x_main, 1))
| 444 | 25.176471 | 67 | py |
mmyolo | mmyolo-main/projects/easydeploy/backbone/focus.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
class DeployFocus(nn.Module):
def __init__(self, orin_Focus: nn.Module):
super().__init__()
self.__dict__.update(orin_Focus.__dict__)
def forward(self, x: Tensor) -> Tensor:
batch_size, channel, height, width = x.shape
x = x.reshape(batch_size, channel, -1, 2, width)
x = x.reshape(batch_size, channel, x.shape[2], 2, -1, 2)
half_h = x.shape[2]
half_w = x.shape[4]
x = x.permute(0, 5, 3, 1, 2, 4)
x = x.reshape(batch_size, channel * 4, half_h, half_w)
return self.conv(x)
class NcnnFocus(nn.Module):
def __init__(self, orin_Focus: nn.Module):
super().__init__()
self.__dict__.update(orin_Focus.__dict__)
def forward(self, x: Tensor) -> Tensor:
batch_size, c, h, w = x.shape
assert h % 2 == 0 and w % 2 == 0, f'focus for yolox needs even feature\
height and width, got {(h, w)}.'
x = x.reshape(batch_size, c * h, 1, w)
_b, _c, _h, _w = x.shape
g = _c // 2
# fuse to ncnn's shufflechannel
x = x.view(_b, g, 2, _h, _w)
x = torch.transpose(x, 1, 2).contiguous()
x = x.view(_b, -1, _h, _w)
x = x.reshape(_b, c * h * w, 1, 1)
_b, _c, _h, _w = x.shape
g = _c // 2
# fuse to ncnn's shufflechannel
x = x.view(_b, g, 2, _h, _w)
x = torch.transpose(x, 1, 2).contiguous()
x = x.view(_b, -1, _h, _w)
x = x.reshape(_b, c * 4, h // 2, w // 2)
return self.conv(x)
class GConvFocus(nn.Module):
def __init__(self, orin_Focus: nn.Module):
super().__init__()
device = next(orin_Focus.parameters()).device
self.weight1 = torch.tensor([[1., 0], [0, 0]]).expand(3, 1, 2,
2).to(device)
self.weight2 = torch.tensor([[0, 0], [1., 0]]).expand(3, 1, 2,
2).to(device)
self.weight3 = torch.tensor([[0, 1.], [0, 0]]).expand(3, 1, 2,
2).to(device)
self.weight4 = torch.tensor([[0, 0], [0, 1.]]).expand(3, 1, 2,
2).to(device)
self.__dict__.update(orin_Focus.__dict__)
def forward(self, x: Tensor) -> Tensor:
conv1 = F.conv2d(x, self.weight1, stride=2, groups=3)
conv2 = F.conv2d(x, self.weight2, stride=2, groups=3)
conv3 = F.conv2d(x, self.weight3, stride=2, groups=3)
conv4 = F.conv2d(x, self.weight4, stride=2, groups=3)
return self.conv(torch.cat([conv1, conv2, conv3, conv4], dim=1))
| 2,834 | 34.4375 | 79 | py |
mmyolo | mmyolo-main/projects/easydeploy/tools/build_engine.py | import argparse
from pathlib import Path
from typing import List, Optional, Tuple, Union
try:
import tensorrt as trt
except Exception:
trt = None
import warnings
import numpy as np
import torch
warnings.filterwarnings(action='ignore', category=DeprecationWarning)
class EngineBuilder:
def __init__(
self,
checkpoint: Union[str, Path],
opt_shape: Union[Tuple, List] = (1, 3, 640, 640),
device: Optional[Union[str, int, torch.device]] = None) -> None:
checkpoint = Path(checkpoint) if isinstance(checkpoint,
str) else checkpoint
assert checkpoint.exists() and checkpoint.suffix == '.onnx'
if isinstance(device, str):
device = torch.device(device)
elif isinstance(device, int):
device = torch.device(f'cuda:{device}')
self.checkpoint = checkpoint
self.opt_shape = np.array(opt_shape, dtype=np.float32)
self.device = device
def __build_engine(self,
scale: Optional[List[List]] = None,
fp16: bool = True,
with_profiling: bool = True) -> None:
logger = trt.Logger(trt.Logger.WARNING)
trt.init_libnvinfer_plugins(logger, namespace='')
builder = trt.Builder(logger)
config = builder.create_builder_config()
config.max_workspace_size = torch.cuda.get_device_properties(
self.device).total_memory
flag = (1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH))
network = builder.create_network(flag)
parser = trt.OnnxParser(network, logger)
if not parser.parse_from_file(str(self.checkpoint)):
raise RuntimeError(
f'failed to load ONNX file: {str(self.checkpoint)}')
inputs = [network.get_input(i) for i in range(network.num_inputs)]
outputs = [network.get_output(i) for i in range(network.num_outputs)]
profile = None
dshape = -1 in network.get_input(0).shape
if dshape:
profile = builder.create_optimization_profile()
if scale is None:
scale = np.array(
[[1, 1, 0.5, 0.5], [1, 1, 1, 1], [4, 1, 1.5, 1.5]],
dtype=np.float32)
scale = (self.opt_shape * scale).astype(np.int32)
elif isinstance(scale, List):
scale = np.array(scale, dtype=np.int32)
assert scale.shape[0] == 3, 'Input a wrong scale list'
else:
raise NotImplementedError
for inp in inputs:
logger.log(
trt.Logger.WARNING,
f'input "{inp.name}" with shape{inp.shape} {inp.dtype}')
if dshape:
profile.set_shape(inp.name, *scale)
for out in outputs:
logger.log(
trt.Logger.WARNING,
f'output "{out.name}" with shape{out.shape} {out.dtype}')
if fp16 and builder.platform_has_fast_fp16:
config.set_flag(trt.BuilderFlag.FP16)
self.weight = self.checkpoint.with_suffix('.engine')
if dshape:
config.add_optimization_profile(profile)
if with_profiling:
config.profiling_verbosity = trt.ProfilingVerbosity.DETAILED
with builder.build_engine(network, config) as engine:
self.weight.write_bytes(engine.serialize())
logger.log(
trt.Logger.WARNING, f'Build tensorrt engine finish.\n'
f'Save in {str(self.weight.absolute())}')
def build(self,
scale: Optional[List[List]] = None,
fp16: bool = True,
with_profiling=True):
self.__build_engine(scale, fp16, with_profiling)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('checkpoint', help='Checkpoint file')
parser.add_argument(
'--img-size',
nargs='+',
type=int,
default=[640, 640],
help='Image size of height and width')
parser.add_argument(
'--device', type=str, default='cuda:0', help='TensorRT builder device')
parser.add_argument(
'--scales',
type=str,
default='[[1,3,640,640],[1,3,640,640],[1,3,640,640]]',
help='Input scales for build dynamic input shape engine')
parser.add_argument(
'--fp16', action='store_true', help='Build model with fp16 mode')
args = parser.parse_args()
args.img_size *= 2 if len(args.img_size) == 1 else 1
return args
def main(args):
img_size = (1, 3, *args.img_size)
try:
scales = eval(args.scales)
except Exception:
print('Input scales is not a python variable')
print('Set scales default None')
scales = None
builder = EngineBuilder(args.checkpoint, img_size, args.device)
builder.build(scales, fp16=args.fp16)
if __name__ == '__main__':
args = parse_args()
main(args)
| 5,007 | 35.554745 | 79 | py |
mmyolo | mmyolo-main/projects/easydeploy/tools/export.py | import argparse
import os
import warnings
from io import BytesIO
import onnx
import torch
from mmdet.apis import init_detector
from mmengine.config import ConfigDict
from mmengine.utils.path import mkdir_or_exist
from mmyolo.utils import register_all_modules
from projects.easydeploy.model import DeployModel
warnings.filterwarnings(action='ignore', category=torch.jit.TracerWarning)
warnings.filterwarnings(action='ignore', category=torch.jit.ScriptWarning)
warnings.filterwarnings(action='ignore', category=UserWarning)
warnings.filterwarnings(action='ignore', category=FutureWarning)
warnings.filterwarnings(action='ignore', category=ResourceWarning)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('config', help='Config file')
parser.add_argument('checkpoint', help='Checkpoint file')
parser.add_argument(
'--model-only', action='store_true', help='Export model only')
parser.add_argument(
'--work-dir', default='./work_dir', help='Path to save export model')
parser.add_argument(
'--img-size',
nargs='+',
type=int,
default=[640, 640],
help='Image size of height and width')
parser.add_argument('--batch-size', type=int, default=1, help='Batch size')
parser.add_argument(
'--device', default='cuda:0', help='Device used for inference')
parser.add_argument(
'--simplify',
action='store_true',
help='Simplify onnx model by onnx-sim')
parser.add_argument(
'--opset', type=int, default=11, help='ONNX opset version')
parser.add_argument(
'--backend', type=int, default=1, help='Backend for export onnx')
parser.add_argument(
'--pre-topk',
type=int,
default=1000,
help='Postprocess pre topk bboxes feed into NMS')
parser.add_argument(
'--keep-topk',
type=int,
default=100,
help='Postprocess keep topk bboxes out of NMS')
parser.add_argument(
'--iou-threshold',
type=float,
default=0.65,
help='IoU threshold for NMS')
parser.add_argument(
'--score-threshold',
type=float,
default=0.25,
help='Score threshold for NMS')
args = parser.parse_args()
args.img_size *= 2 if len(args.img_size) == 1 else 1
return args
def build_model_from_cfg(config_path, checkpoint_path, device):
model = init_detector(config_path, checkpoint_path, device=device)
model.eval()
return model
def main():
args = parse_args()
register_all_modules()
mkdir_or_exist(args.work_dir)
if args.model_only:
postprocess_cfg = None
output_names = None
else:
postprocess_cfg = ConfigDict(
pre_top_k=args.pre_topk,
keep_top_k=args.keep_topk,
iou_threshold=args.iou_threshold,
score_threshold=args.score_threshold,
backend=args.backend)
output_names = ['num_dets', 'boxes', 'scores', 'labels']
baseModel = build_model_from_cfg(args.config, args.checkpoint, args.device)
deploy_model = DeployModel(
baseModel=baseModel, postprocess_cfg=postprocess_cfg)
deploy_model.eval()
fake_input = torch.randn(args.batch_size, 3,
*args.img_size).to(args.device)
# dry run
deploy_model(fake_input)
save_onnx_path = os.path.join(args.work_dir, 'end2end.onnx')
# export onnx
with BytesIO() as f:
torch.onnx.export(
deploy_model,
fake_input,
f,
input_names=['images'],
output_names=output_names,
opset_version=args.opset)
f.seek(0)
onnx_model = onnx.load(f)
onnx.checker.check_model(onnx_model)
# Fix tensorrt onnx output shape, just for view
if args.backend in (2, 3):
shapes = [
args.batch_size, 1, args.batch_size, args.keep_topk, 4,
args.batch_size, args.keep_topk, args.batch_size,
args.keep_topk
]
for i in onnx_model.graph.output:
for j in i.type.tensor_type.shape.dim:
j.dim_param = str(shapes.pop(0))
if args.simplify:
try:
import onnxsim
onnx_model, check = onnxsim.simplify(onnx_model)
assert check, 'assert check failed'
except Exception as e:
print(f'Simplify failure: {e}')
onnx.save(onnx_model, save_onnx_path)
print(f'ONNX export success, save into {save_onnx_path}')
if __name__ == '__main__':
main()
| 4,623 | 31.335664 | 79 | py |
mmyolo | mmyolo-main/projects/easydeploy/tools/image-demo.py | # Copyright (c) OpenMMLab. All rights reserved.
from projects.easydeploy.model import ORTWrapper, TRTWrapper # isort:skip
import os
import random
from argparse import ArgumentParser
import cv2
import mmcv
import numpy as np
import torch
from mmcv.transforms import Compose
from mmdet.utils import get_test_pipeline_cfg
from mmengine.config import Config, ConfigDict
from mmengine.utils import ProgressBar, path
from mmyolo.utils import register_all_modules
from mmyolo.utils.misc import get_file_list
def parse_args():
parser = ArgumentParser()
parser.add_argument(
'img', help='Image path, include image file, dir and URL.')
parser.add_argument('config', help='Config file')
parser.add_argument('checkpoint', help='Checkpoint file')
parser.add_argument(
'--out-dir', default='./output', help='Path to output file')
parser.add_argument(
'--device', default='cuda:0', help='Device used for inference')
parser.add_argument(
'--show', action='store_true', help='Show the detection results')
args = parser.parse_args()
return args
def preprocess(config):
data_preprocess = config.get('model', {}).get('data_preprocessor', {})
mean = data_preprocess.get('mean', [0., 0., 0.])
std = data_preprocess.get('std', [1., 1., 1.])
mean = torch.tensor(mean, dtype=torch.float32).reshape(1, 3, 1, 1)
std = torch.tensor(std, dtype=torch.float32).reshape(1, 3, 1, 1)
class PreProcess(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
x = x[None].float()
x -= mean.to(x.device)
x /= std.to(x.device)
return x
return PreProcess().eval()
def main():
args = parse_args()
# register all modules in mmdet into the registries
register_all_modules()
colors = [[random.randint(0, 255) for _ in range(3)] for _ in range(1000)]
# build the model from a config file and a checkpoint file
if args.checkpoint.endswith('.onnx'):
model = ORTWrapper(args.checkpoint, args.device)
elif args.checkpoint.endswith('.engine') or args.checkpoint.endswith(
'.plan'):
model = TRTWrapper(args.checkpoint, args.device)
else:
raise NotImplementedError
model.to(args.device)
cfg = Config.fromfile(args.config)
class_names = cfg.get('class_name')
test_pipeline = get_test_pipeline_cfg(cfg)
test_pipeline[0] = ConfigDict({'type': 'mmdet.LoadImageFromNDArray'})
test_pipeline = Compose(test_pipeline)
pre_pipeline = preprocess(cfg)
if not args.show:
path.mkdir_or_exist(args.out_dir)
# get file list
files, source_type = get_file_list(args.img)
# start detector inference
progress_bar = ProgressBar(len(files))
for i, file in enumerate(files):
bgr = mmcv.imread(file)
rgb = mmcv.imconvert(bgr, 'bgr', 'rgb')
data, samples = test_pipeline(dict(img=rgb, img_id=i)).values()
pad_param = samples.get('pad_param',
np.array([0, 0, 0, 0], dtype=np.float32))
h, w = samples.get('ori_shape', rgb.shape[:2])
pad_param = torch.asarray(
[pad_param[2], pad_param[0], pad_param[2], pad_param[0]],
device=args.device)
scale_factor = samples.get('scale_factor', [1., 1])
scale_factor = torch.asarray(scale_factor * 2, device=args.device)
data = pre_pipeline(data).to(args.device)
result = model(data)
if source_type['is_dir']:
filename = os.path.relpath(file, args.img).replace('/', '_')
else:
filename = os.path.basename(file)
out_file = None if args.show else os.path.join(args.out_dir, filename)
# Get candidate predict info by num_dets
num_dets, bboxes, scores, labels = result
scores = scores[0, :num_dets]
bboxes = bboxes[0, :num_dets]
labels = labels[0, :num_dets]
bboxes -= pad_param
bboxes /= scale_factor
bboxes[:, 0::2].clamp_(0, w)
bboxes[:, 1::2].clamp_(0, h)
bboxes = bboxes.round().int()
for (bbox, score, label) in zip(bboxes, scores, labels):
bbox = bbox.tolist()
color = colors[label]
if class_names is not None:
label_name = class_names[label]
name = f'cls:{label_name}_score:{score:0.4f}'
else:
name = f'cls:{label}_score:{score:0.4f}'
cv2.rectangle(bgr, bbox[:2], bbox[2:], color, 2)
cv2.putText(
bgr,
name, (bbox[0], bbox[1] - 2),
cv2.FONT_HERSHEY_SIMPLEX,
2.0, [225, 255, 255],
thickness=3)
if args.show:
mmcv.imshow(bgr, 'result', 0)
else:
mmcv.imwrite(bgr, out_file)
progress_bar.update()
if __name__ == '__main__':
main()
| 4,968 | 31.477124 | 78 | py |
mmyolo | mmyolo-main/projects/easydeploy/bbox_code/bbox_coder.py | # Copyright (c) OpenMMLab. All rights reserved.
from typing import Optional
import torch
from torch import Tensor
def yolov5_bbox_decoder(priors: Tensor, bbox_preds: Tensor,
stride: Tensor) -> Tensor:
bbox_preds = bbox_preds.sigmoid()
x_center = (priors[..., 0] + priors[..., 2]) * 0.5
y_center = (priors[..., 1] + priors[..., 3]) * 0.5
w = priors[..., 2] - priors[..., 0]
h = priors[..., 3] - priors[..., 1]
x_center_pred = (bbox_preds[..., 0] - 0.5) * 2 * stride + x_center
y_center_pred = (bbox_preds[..., 1] - 0.5) * 2 * stride + y_center
w_pred = (bbox_preds[..., 2] * 2)**2 * w
h_pred = (bbox_preds[..., 3] * 2)**2 * h
decoded_bboxes = torch.stack(
[x_center_pred, y_center_pred, w_pred, h_pred], dim=-1)
return decoded_bboxes
def rtmdet_bbox_decoder(priors: Tensor, bbox_preds: Tensor,
stride: Optional[Tensor]) -> Tensor:
stride = stride[None, :, None]
bbox_preds *= stride
tl_x = (priors[..., 0] - bbox_preds[..., 0])
tl_y = (priors[..., 1] - bbox_preds[..., 1])
br_x = (priors[..., 0] + bbox_preds[..., 2])
br_y = (priors[..., 1] + bbox_preds[..., 3])
decoded_bboxes = torch.stack([tl_x, tl_y, br_x, br_y], -1)
return decoded_bboxes
def yolox_bbox_decoder(priors: Tensor, bbox_preds: Tensor,
stride: Optional[Tensor]) -> Tensor:
stride = stride[None, :, None]
xys = (bbox_preds[..., :2] * stride) + priors
whs = bbox_preds[..., 2:].exp() * stride
decoded_bboxes = torch.cat([xys, whs], -1)
return decoded_bboxes
| 1,608 | 33.234043 | 70 | py |
mmyolo | mmyolo-main/projects/easydeploy/model/backendwrapper.py | import warnings
from collections import namedtuple
from functools import partial
from pathlib import Path
from typing import List, Optional, Union
import numpy as np
import onnxruntime
try:
import tensorrt as trt
except Exception:
trt = None
import torch
warnings.filterwarnings(action='ignore', category=DeprecationWarning)
class TRTWrapper(torch.nn.Module):
dtype_mapping = {}
def __init__(self, weight: Union[str, Path],
device: Optional[torch.device]):
super().__init__()
weight = Path(weight) if isinstance(weight, str) else weight
assert weight.exists() and weight.suffix in ('.engine', '.plan')
if isinstance(device, str):
device = torch.device(device)
elif isinstance(device, int):
device = torch.device(f'cuda:{device}')
self.weight = weight
self.device = device
self.stream = torch.cuda.Stream(device=device)
self.__update_mapping()
self.__init_engine()
self.__init_bindings()
def __update_mapping(self):
self.dtype_mapping.update({
trt.bool: torch.bool,
trt.int8: torch.int8,
trt.int32: torch.int32,
trt.float16: torch.float16,
trt.float32: torch.float32
})
def __init_engine(self):
logger = trt.Logger(trt.Logger.ERROR)
self.log = partial(logger.log, trt.Logger.ERROR)
trt.init_libnvinfer_plugins(logger, namespace='')
self.logger = logger
with trt.Runtime(logger) as runtime:
model = runtime.deserialize_cuda_engine(self.weight.read_bytes())
context = model.create_execution_context()
names = [model.get_binding_name(i) for i in range(model.num_bindings)]
num_inputs, num_outputs = 0, 0
for i in range(model.num_bindings):
if model.binding_is_input(i):
num_inputs += 1
else:
num_outputs += 1
self.is_dynamic = -1 in model.get_binding_shape(0)
self.model = model
self.context = context
self.input_names = names[:num_inputs]
self.output_names = names[num_inputs:]
self.num_inputs = num_inputs
self.num_outputs = num_outputs
self.num_bindings = num_inputs + num_outputs
self.bindings: List[int] = [0] * self.num_bindings
def __init_bindings(self):
Binding = namedtuple('Binding', ('name', 'dtype', 'shape'))
inputs_info = []
outputs_info = []
for i, name in enumerate(self.input_names):
assert self.model.get_binding_name(i) == name
dtype = self.dtype_mapping[self.model.get_binding_dtype(i)]
shape = tuple(self.model.get_binding_shape(i))
inputs_info.append(Binding(name, dtype, shape))
for i, name in enumerate(self.output_names):
i += self.num_inputs
assert self.model.get_binding_name(i) == name
dtype = self.dtype_mapping[self.model.get_binding_dtype(i)]
shape = tuple(self.model.get_binding_shape(i))
outputs_info.append(Binding(name, dtype, shape))
self.inputs_info = inputs_info
self.outputs_info = outputs_info
if not self.is_dynamic:
self.output_tensor = [
torch.empty(o.shape, dtype=o.dtype, device=self.device)
for o in outputs_info
]
def forward(self, *inputs):
assert len(inputs) == self.num_inputs
contiguous_inputs: List[torch.Tensor] = [
i.contiguous() for i in inputs
]
for i in range(self.num_inputs):
self.bindings[i] = contiguous_inputs[i].data_ptr()
if self.is_dynamic:
self.context.set_binding_shape(
i, tuple(contiguous_inputs[i].shape))
# create output tensors
outputs: List[torch.Tensor] = []
for i in range(self.num_outputs):
j = i + self.num_inputs
if self.is_dynamic:
shape = tuple(self.context.get_binding_shape(j))
output = torch.empty(
size=shape,
dtype=self.output_dtypes[i],
device=self.device)
else:
output = self.output_tensor[i]
outputs.append(output)
self.bindings[j] = output.data_ptr()
self.context.execute_async_v2(self.bindings, self.stream.cuda_stream)
self.stream.synchronize()
return tuple(outputs)
class ORTWrapper(torch.nn.Module):
def __init__(self, weight: Union[str, Path],
device: Optional[torch.device]):
super().__init__()
weight = Path(weight) if isinstance(weight, str) else weight
assert weight.exists() and weight.suffix == '.onnx'
if isinstance(device, str):
device = torch.device(device)
elif isinstance(device, int):
device = torch.device(f'cuda:{device}')
self.weight = weight
self.device = device
self.__init_session()
self.__init_bindings()
def __init_session(self):
providers = ['CPUExecutionProvider']
if 'cuda' in self.device.type:
providers.insert(0, 'CUDAExecutionProvider')
session = onnxruntime.InferenceSession(
str(self.weight), providers=providers)
self.session = session
def __init_bindings(self):
Binding = namedtuple('Binding', ('name', 'dtype', 'shape'))
inputs_info = []
outputs_info = []
self.is_dynamic = False
for i, tensor in enumerate(self.session.get_inputs()):
if any(not isinstance(i, int) for i in tensor.shape):
self.is_dynamic = True
inputs_info.append(
Binding(tensor.name, tensor.type, tuple(tensor.shape)))
for i, tensor in enumerate(self.session.get_outputs()):
outputs_info.append(
Binding(tensor.name, tensor.type, tuple(tensor.shape)))
self.inputs_info = inputs_info
self.outputs_info = outputs_info
self.num_inputs = len(inputs_info)
def forward(self, *inputs):
assert len(inputs) == self.num_inputs
contiguous_inputs: List[np.ndarray] = [
i.contiguous().cpu().numpy() for i in inputs
]
if not self.is_dynamic:
# make sure input shape is right for static input shape
for i in range(self.num_inputs):
assert contiguous_inputs[i].shape == self.inputs_info[i].shape
outputs = self.session.run([o.name for o in self.outputs_info], {
j.name: contiguous_inputs[i]
for i, j in enumerate(self.inputs_info)
})
return tuple(torch.from_numpy(o).to(self.device) for o in outputs)
| 6,885 | 32.921182 | 78 | py |
mmyolo | mmyolo-main/projects/easydeploy/model/model.py | # Copyright (c) OpenMMLab. All rights reserved.
from functools import partial
from typing import List, Optional
import torch
import torch.nn as nn
from mmdet.models.backbones.csp_darknet import Focus
from mmengine.config import ConfigDict
from torch import Tensor
from mmyolo.models import RepVGGBlock
from mmyolo.models.dense_heads import (RTMDetHead, YOLOv5Head, YOLOv7Head,
YOLOXHead)
from mmyolo.models.layers import CSPLayerWithTwoConv
from ..backbone import DeployC2f, DeployFocus, GConvFocus, NcnnFocus
from ..bbox_code import (rtmdet_bbox_decoder, yolov5_bbox_decoder,
yolox_bbox_decoder)
from ..nms import batched_nms, efficient_nms, onnx_nms
class DeployModel(nn.Module):
def __init__(self,
baseModel: nn.Module,
postprocess_cfg: Optional[ConfigDict] = None):
super().__init__()
self.baseModel = baseModel
if postprocess_cfg is None:
self.with_postprocess = False
else:
self.with_postprocess = True
self.baseHead = baseModel.bbox_head
self.__init_sub_attributes()
self.detector_type = type(self.baseHead)
self.pre_top_k = postprocess_cfg.get('pre_top_k', 1000)
self.keep_top_k = postprocess_cfg.get('keep_top_k', 100)
self.iou_threshold = postprocess_cfg.get('iou_threshold', 0.65)
self.score_threshold = postprocess_cfg.get('score_threshold', 0.25)
self.backend = postprocess_cfg.get('backend', 1)
self.__switch_deploy()
def __init_sub_attributes(self):
self.bbox_decoder = self.baseHead.bbox_coder.decode
self.prior_generate = self.baseHead.prior_generator.grid_priors
self.num_base_priors = self.baseHead.num_base_priors
self.featmap_strides = self.baseHead.featmap_strides
self.num_classes = self.baseHead.num_classes
def __switch_deploy(self):
for layer in self.baseModel.modules():
if isinstance(layer, RepVGGBlock):
layer.switch_to_deploy()
elif isinstance(layer, Focus):
# onnxruntime tensorrt8 tensorrt7
if self.backend in (1, 2, 3):
self.baseModel.backbone.stem = DeployFocus(layer)
# ncnn
elif self.backend == 4:
self.baseModel.backbone.stem = NcnnFocus(layer)
# switch focus to group conv
else:
self.baseModel.backbone.stem = GConvFocus(layer)
elif isinstance(layer, CSPLayerWithTwoConv):
setattr(layer, '__class__', DeployC2f)
def pred_by_feat(self,
cls_scores: List[Tensor],
bbox_preds: List[Tensor],
objectnesses: Optional[List[Tensor]] = None,
**kwargs):
assert len(cls_scores) == len(bbox_preds)
dtype = cls_scores[0].dtype
device = cls_scores[0].device
nms_func = self.select_nms()
if self.detector_type in (YOLOv5Head, YOLOv7Head):
bbox_decoder = yolov5_bbox_decoder
elif self.detector_type is RTMDetHead:
bbox_decoder = rtmdet_bbox_decoder
elif self.detector_type is YOLOXHead:
bbox_decoder = yolox_bbox_decoder
else:
bbox_decoder = self.bbox_decoder
num_imgs = cls_scores[0].shape[0]
featmap_sizes = [cls_score.shape[2:] for cls_score in cls_scores]
mlvl_priors = self.prior_generate(
featmap_sizes, dtype=dtype, device=device)
flatten_priors = torch.cat(mlvl_priors)
mlvl_strides = [
flatten_priors.new_full(
(featmap_size[0] * featmap_size[1] * self.num_base_priors, ),
stride) for featmap_size, stride in zip(
featmap_sizes, self.featmap_strides)
]
flatten_stride = torch.cat(mlvl_strides)
# flatten cls_scores, bbox_preds and objectness
flatten_cls_scores = [
cls_score.permute(0, 2, 3, 1).reshape(num_imgs, -1,
self.num_classes)
for cls_score in cls_scores
]
cls_scores = torch.cat(flatten_cls_scores, dim=1).sigmoid()
flatten_bbox_preds = [
bbox_pred.permute(0, 2, 3, 1).reshape(num_imgs, -1, 4)
for bbox_pred in bbox_preds
]
flatten_bbox_preds = torch.cat(flatten_bbox_preds, dim=1)
if objectnesses is not None:
flatten_objectness = [
objectness.permute(0, 2, 3, 1).reshape(num_imgs, -1)
for objectness in objectnesses
]
flatten_objectness = torch.cat(flatten_objectness, dim=1).sigmoid()
cls_scores = cls_scores * (flatten_objectness.unsqueeze(-1))
scores = cls_scores
bboxes = bbox_decoder(flatten_priors[None], flatten_bbox_preds,
flatten_stride)
return nms_func(bboxes, scores, self.keep_top_k, self.iou_threshold,
self.score_threshold, self.pre_top_k, self.keep_top_k)
def select_nms(self):
if self.backend == 1:
nms_func = onnx_nms
elif self.backend == 2:
nms_func = efficient_nms
elif self.backend == 3:
nms_func = batched_nms
else:
raise NotImplementedError
if type(self.baseHead) in (YOLOv5Head, YOLOv7Head, YOLOXHead):
nms_func = partial(nms_func, box_coding=1)
return nms_func
def forward(self, inputs: Tensor):
neck_outputs = self.baseModel(inputs)
if self.with_postprocess:
return self.pred_by_feat(*neck_outputs)
else:
return neck_outputs
| 5,871 | 37.887417 | 79 | py |
mmyolo | mmyolo-main/projects/easydeploy/nms/ort_nms.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
from torch import Tensor
_XYWH2XYXY = torch.tensor([[1.0, 0.0, 1.0, 0.0], [0.0, 1.0, 0.0, 1.0],
[-0.5, 0.0, 0.5, 0.0], [0.0, -0.5, 0.0, 0.5]],
dtype=torch.float32)
def select_nms_index(scores: Tensor,
boxes: Tensor,
nms_index: Tensor,
batch_size: int,
keep_top_k: int = -1):
batch_inds, cls_inds = nms_index[:, 0], nms_index[:, 1]
box_inds = nms_index[:, 2]
scores = scores[batch_inds, cls_inds, box_inds].unsqueeze(1)
boxes = boxes[batch_inds, box_inds, ...]
dets = torch.cat([boxes, scores], dim=1)
batched_dets = dets.unsqueeze(0).repeat(batch_size, 1, 1)
batch_template = torch.arange(
0, batch_size, dtype=batch_inds.dtype, device=batch_inds.device)
batched_dets = batched_dets.where(
(batch_inds == batch_template.unsqueeze(1)).unsqueeze(-1),
batched_dets.new_zeros(1))
batched_labels = cls_inds.unsqueeze(0).repeat(batch_size, 1)
batched_labels = batched_labels.where(
(batch_inds == batch_template.unsqueeze(1)),
batched_labels.new_ones(1) * -1)
N = batched_dets.shape[0]
batched_dets = torch.cat((batched_dets, batched_dets.new_zeros((N, 1, 5))),
1)
batched_labels = torch.cat((batched_labels, -batched_labels.new_ones(
(N, 1))), 1)
_, topk_inds = batched_dets[:, :, -1].sort(dim=1, descending=True)
topk_batch_inds = torch.arange(
batch_size, dtype=topk_inds.dtype,
device=topk_inds.device).view(-1, 1)
batched_dets = batched_dets[topk_batch_inds, topk_inds, ...]
batched_labels = batched_labels[topk_batch_inds, topk_inds, ...]
batched_dets, batched_scores = batched_dets.split([4, 1], 2)
batched_scores = batched_scores.squeeze(-1)
num_dets = (batched_scores > 0).sum(1, keepdim=True)
return num_dets, batched_dets, batched_scores, batched_labels
class ONNXNMSop(torch.autograd.Function):
@staticmethod
def forward(
ctx,
boxes: Tensor,
scores: Tensor,
max_output_boxes_per_class: Tensor = torch.tensor([100]),
iou_threshold: Tensor = torch.tensor([0.5]),
score_threshold: Tensor = torch.tensor([0.05])
) -> Tensor:
device = boxes.device
batch = scores.shape[0]
num_det = 20
batches = torch.randint(0, batch, (num_det, )).sort()[0].to(device)
idxs = torch.arange(100, 100 + num_det).to(device)
zeros = torch.zeros((num_det, ), dtype=torch.int64).to(device)
selected_indices = torch.cat([batches[None], zeros[None], idxs[None]],
0).T.contiguous()
selected_indices = selected_indices.to(torch.int64)
return selected_indices
@staticmethod
def symbolic(
g,
boxes: Tensor,
scores: Tensor,
max_output_boxes_per_class: Tensor = torch.tensor([100]),
iou_threshold: Tensor = torch.tensor([0.5]),
score_threshold: Tensor = torch.tensor([0.05]),
):
return g.op(
'NonMaxSuppression',
boxes,
scores,
max_output_boxes_per_class,
iou_threshold,
score_threshold,
outputs=1)
def onnx_nms(
boxes: torch.Tensor,
scores: torch.Tensor,
max_output_boxes_per_class: int = 100,
iou_threshold: float = 0.5,
score_threshold: float = 0.05,
pre_top_k: int = -1,
keep_top_k: int = 100,
box_coding: int = 0,
):
max_output_boxes_per_class = torch.tensor([max_output_boxes_per_class])
iou_threshold = torch.tensor([iou_threshold])
score_threshold = torch.tensor([score_threshold])
batch_size, _, _ = scores.shape
if box_coding == 1:
boxes = boxes @ (_XYWH2XYXY.to(boxes.device))
scores = scores.transpose(1, 2).contiguous()
selected_indices = ONNXNMSop.apply(boxes, scores,
max_output_boxes_per_class,
iou_threshold, score_threshold)
num_dets, batched_dets, batched_scores, batched_labels = select_nms_index(
scores, boxes, selected_indices, batch_size, keep_top_k=keep_top_k)
return num_dets, batched_dets, batched_scores, batched_labels.to(
torch.int32)
| 4,445 | 35.146341 | 79 | py |
mmyolo | mmyolo-main/projects/easydeploy/nms/trt_nms.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
from torch import Tensor
_XYWH2XYXY = torch.tensor([[1.0, 0.0, 1.0, 0.0], [0.0, 1.0, 0.0, 1.0],
[-0.5, 0.0, 0.5, 0.0], [0.0, -0.5, 0.0, 0.5]],
dtype=torch.float32)
class TRTEfficientNMSop(torch.autograd.Function):
@staticmethod
def forward(
ctx,
boxes: Tensor,
scores: Tensor,
background_class: int = -1,
box_coding: int = 0,
iou_threshold: float = 0.45,
max_output_boxes: int = 100,
plugin_version: str = '1',
score_activation: int = 0,
score_threshold: float = 0.25,
):
batch_size, _, num_classes = scores.shape
num_det = torch.randint(
0, max_output_boxes, (batch_size, 1), dtype=torch.int32)
det_boxes = torch.randn(batch_size, max_output_boxes, 4)
det_scores = torch.randn(batch_size, max_output_boxes)
det_classes = torch.randint(
0, num_classes, (batch_size, max_output_boxes), dtype=torch.int32)
return num_det, det_boxes, det_scores, det_classes
@staticmethod
def symbolic(g,
boxes: Tensor,
scores: Tensor,
background_class: int = -1,
box_coding: int = 0,
iou_threshold: float = 0.45,
max_output_boxes: int = 100,
plugin_version: str = '1',
score_activation: int = 0,
score_threshold: float = 0.25):
out = g.op(
'TRT::EfficientNMS_TRT',
boxes,
scores,
background_class_i=background_class,
box_coding_i=box_coding,
iou_threshold_f=iou_threshold,
max_output_boxes_i=max_output_boxes,
plugin_version_s=plugin_version,
score_activation_i=score_activation,
score_threshold_f=score_threshold,
outputs=4)
num_det, det_boxes, det_scores, det_classes = out
return num_det, det_boxes, det_scores, det_classes
class TRTbatchedNMSop(torch.autograd.Function):
"""TensorRT NMS operation."""
@staticmethod
def forward(
ctx,
boxes: Tensor,
scores: Tensor,
plugin_version: str = '1',
shareLocation: int = 1,
backgroundLabelId: int = -1,
numClasses: int = 80,
topK: int = 1000,
keepTopK: int = 100,
scoreThreshold: float = 0.25,
iouThreshold: float = 0.45,
isNormalized: int = 0,
clipBoxes: int = 0,
scoreBits: int = 16,
caffeSemantics: int = 1,
):
batch_size, _, numClasses = scores.shape
num_det = torch.randint(
0, keepTopK, (batch_size, 1), dtype=torch.int32)
det_boxes = torch.randn(batch_size, keepTopK, 4)
det_scores = torch.randn(batch_size, keepTopK)
det_classes = torch.randint(0, numClasses,
(batch_size, keepTopK)).float()
return num_det, det_boxes, det_scores, det_classes
@staticmethod
def symbolic(
g,
boxes: Tensor,
scores: Tensor,
plugin_version: str = '1',
shareLocation: int = 1,
backgroundLabelId: int = -1,
numClasses: int = 80,
topK: int = 1000,
keepTopK: int = 100,
scoreThreshold: float = 0.25,
iouThreshold: float = 0.45,
isNormalized: int = 0,
clipBoxes: int = 0,
scoreBits: int = 16,
caffeSemantics: int = 1,
):
out = g.op(
'TRT::BatchedNMSDynamic_TRT',
boxes,
scores,
shareLocation_i=shareLocation,
plugin_version_s=plugin_version,
backgroundLabelId_i=backgroundLabelId,
numClasses_i=numClasses,
topK_i=topK,
keepTopK_i=keepTopK,
scoreThreshold_f=scoreThreshold,
iouThreshold_f=iouThreshold,
isNormalized_i=isNormalized,
clipBoxes_i=clipBoxes,
scoreBits_i=scoreBits,
caffeSemantics_i=caffeSemantics,
outputs=4)
num_det, det_boxes, det_scores, det_classes = out
return num_det, det_boxes, det_scores, det_classes
def _efficient_nms(
boxes: Tensor,
scores: Tensor,
max_output_boxes_per_class: int = 1000,
iou_threshold: float = 0.5,
score_threshold: float = 0.05,
pre_top_k: int = -1,
keep_top_k: int = 100,
box_coding: int = 0,
):
"""Wrapper for `efficient_nms` with TensorRT.
Args:
boxes (Tensor): The bounding boxes of shape [N, num_boxes, 4].
scores (Tensor): The detection scores of shape
[N, num_boxes, num_classes].
max_output_boxes_per_class (int): Maximum number of output
boxes per class of nms. Defaults to 1000.
iou_threshold (float): IOU threshold of nms. Defaults to 0.5.
score_threshold (float): score threshold of nms.
Defaults to 0.05.
pre_top_k (int): Number of top K boxes to keep before nms.
Defaults to -1.
keep_top_k (int): Number of top K boxes to keep after nms.
Defaults to -1.
box_coding (int): Bounding boxes format for nms.
Defaults to 0 means [x1, y1 ,x2, y2].
Set to 1 means [x, y, w, h].
Returns:
tuple[Tensor, Tensor, Tensor, Tensor]:
(num_det, det_boxes, det_scores, det_classes),
`num_det` of shape [N, 1]
`det_boxes` of shape [N, num_det, 4]
`det_scores` of shape [N, num_det]
`det_classes` of shape [N, num_det]
"""
num_det, det_boxes, det_scores, det_classes = TRTEfficientNMSop.apply(
boxes, scores, -1, box_coding, iou_threshold, keep_top_k, '1', 0,
score_threshold)
return num_det, det_boxes, det_scores, det_classes
def _batched_nms(
boxes: Tensor,
scores: Tensor,
max_output_boxes_per_class: int = 1000,
iou_threshold: float = 0.5,
score_threshold: float = 0.05,
pre_top_k: int = -1,
keep_top_k: int = 100,
box_coding: int = 0,
):
"""Wrapper for `efficient_nms` with TensorRT.
Args:
boxes (Tensor): The bounding boxes of shape [N, num_boxes, 4].
scores (Tensor): The detection scores of shape
[N, num_boxes, num_classes].
max_output_boxes_per_class (int): Maximum number of output
boxes per class of nms. Defaults to 1000.
iou_threshold (float): IOU threshold of nms. Defaults to 0.5.
score_threshold (float): score threshold of nms.
Defaults to 0.05.
pre_top_k (int): Number of top K boxes to keep before nms.
Defaults to -1.
keep_top_k (int): Number of top K boxes to keep after nms.
Defaults to -1.
box_coding (int): Bounding boxes format for nms.
Defaults to 0 means [x1, y1 ,x2, y2].
Set to 1 means [x, y, w, h].
Returns:
tuple[Tensor, Tensor, Tensor, Tensor]:
(num_det, det_boxes, det_scores, det_classes),
`num_det` of shape [N, 1]
`det_boxes` of shape [N, num_det, 4]
`det_scores` of shape [N, num_det]
`det_classes` of shape [N, num_det]
"""
if box_coding == 1:
boxes = boxes @ (_XYWH2XYXY.to(boxes.device))
boxes = boxes if boxes.dim() == 4 else boxes.unsqueeze(2)
_, _, numClasses = scores.shape
num_det, det_boxes, det_scores, det_classes = TRTbatchedNMSop.apply(
boxes, scores, '1', 1, -1, int(numClasses), min(pre_top_k, 4096),
keep_top_k, score_threshold, iou_threshold, 0, 0, 16, 1)
det_classes = det_classes.int()
return num_det, det_boxes, det_scores, det_classes
def efficient_nms(*args, **kwargs):
"""Wrapper function for `_efficient_nms`."""
return _efficient_nms(*args, **kwargs)
def batched_nms(*args, **kwargs):
"""Wrapper function for `_batched_nms`."""
return _batched_nms(*args, **kwargs)
| 8,045 | 34.444934 | 78 | py |
mmyolo | mmyolo-main/.dev_scripts/gather_models.py | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import glob
import os
import os.path as osp
import shutil
import subprocess
import time
from collections import OrderedDict
import torch
import yaml
from mmengine.config import Config
from mmengine.fileio import dump
from mmengine.utils import mkdir_or_exist, scandir
def ordered_yaml_dump(data, stream=None, Dumper=yaml.SafeDumper, **kwds):
class OrderedDumper(Dumper):
pass
def _dict_representer(dumper, data):
return dumper.represent_mapping(
yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG, data.items())
OrderedDumper.add_representer(OrderedDict, _dict_representer)
return yaml.dump(data, stream, OrderedDumper, **kwds)
def process_checkpoint(in_file, out_file):
checkpoint = torch.load(in_file, map_location='cpu')
# remove optimizer for smaller file size
if 'optimizer' in checkpoint:
del checkpoint['optimizer']
if 'message_hub' in checkpoint:
del checkpoint['message_hub']
if 'ema_state_dict' in checkpoint:
del checkpoint['ema_state_dict']
for key in list(checkpoint['state_dict']):
if key.startswith('data_preprocessor'):
checkpoint['state_dict'].pop(key)
elif 'priors_base_sizes' in key:
checkpoint['state_dict'].pop(key)
elif 'grid_offset' in key:
checkpoint['state_dict'].pop(key)
elif 'prior_inds' in key:
checkpoint['state_dict'].pop(key)
# if it is necessary to remove some sensitive data in checkpoint['meta'],
# add the code here.
if torch.__version__ >= '1.6':
torch.save(checkpoint, out_file, _use_new_zipfile_serialization=False)
else:
torch.save(checkpoint, out_file)
sha = subprocess.check_output(['sha256sum', out_file]).decode()
final_file = out_file.rstrip('.pth') + f'-{sha[:8]}.pth'
subprocess.Popen(['mv', out_file, final_file])
return final_file
def is_by_epoch(config):
cfg = Config.fromfile('./configs/' + config)
return cfg.train_cfg.type == 'EpochBasedTrainLoop'
def get_final_epoch_or_iter(config):
cfg = Config.fromfile('./configs/' + config)
if cfg.train_cfg.type == 'EpochBasedTrainLoop':
return cfg.train_cfg.max_epochs
else:
return cfg.train_cfg.max_iters
def get_best_epoch_or_iter(exp_dir):
best_epoch_iter_full_path = list(
sorted(glob.glob(osp.join(exp_dir, 'best_*.pth'))))[-1]
best_epoch_or_iter_model_path = best_epoch_iter_full_path.split('/')[-1]
best_epoch_or_iter = best_epoch_or_iter_model_path. \
split('_')[-1].split('.')[0]
return best_epoch_or_iter_model_path, int(best_epoch_or_iter)
def get_real_epoch_or_iter(config):
cfg = Config.fromfile('./configs/' + config)
if cfg.train_cfg.type == 'EpochBasedTrainLoop':
epoch = cfg.train_cfg.max_epochs
return epoch
else:
return cfg.runner.max_iters
def get_final_results(log_json_path,
epoch_or_iter,
results_lut='coco/bbox_mAP',
by_epoch=True):
result_dict = dict()
with open(log_json_path) as f:
r = f.readlines()[-1]
last_metric = r.split(',')[0].split(': ')[-1].strip()
result_dict[results_lut] = last_metric
return result_dict
def get_dataset_name(config):
# If there are more dataset, add here.
name_map = dict(
CityscapesDataset='Cityscapes',
CocoDataset='COCO',
YOLOv5CocoDataset='COCO',
CocoPanopticDataset='COCO',
YOLOv5DOTADataset='DOTA 1.0',
DeepFashionDataset='Deep Fashion',
LVISV05Dataset='LVIS v0.5',
LVISV1Dataset='LVIS v1',
VOCDataset='Pascal VOC',
YOLOv5VOCDataset='Pascal VOC',
WIDERFaceDataset='WIDER Face',
OpenImagesDataset='OpenImagesDataset',
OpenImagesChallengeDataset='OpenImagesChallengeDataset')
cfg = Config.fromfile('./configs/' + config)
return name_map[cfg.dataset_type]
def find_last_dir(model_dir):
dst_times = []
for time_stamp in os.scandir(model_dir):
if osp.isdir(time_stamp):
dst_time = time.mktime(
time.strptime(time_stamp.name, '%Y%m%d_%H%M%S'))
dst_times.append([dst_time, time_stamp.name])
return max(dst_times, key=lambda x: x[0])[1]
def convert_model_info_to_pwc(model_infos):
pwc_files = {}
for model in model_infos:
cfg_folder_name = osp.split(model['config'])[-2]
pwc_model_info = OrderedDict()
pwc_model_info['Name'] = osp.split(model['config'])[-1].split('.')[0]
pwc_model_info['In Collection'] = 'Please fill in Collection name'
pwc_model_info['Config'] = osp.join('configs', model['config'])
# get metadata
meta_data = OrderedDict()
if 'epochs' in model:
meta_data['Epochs'] = get_real_epoch_or_iter(model['config'])
else:
meta_data['Iterations'] = get_real_epoch_or_iter(model['config'])
pwc_model_info['Metadata'] = meta_data
# get dataset name
dataset_name = get_dataset_name(model['config'])
# get results
results = []
# if there are more metrics, add here.
if 'bbox_mAP' in model['results']:
metric = round(model['results']['bbox_mAP'] * 100, 1)
results.append(
OrderedDict(
Task='Object Detection',
Dataset=dataset_name,
Metrics={'box AP': metric}))
if 'segm_mAP' in model['results']:
metric = round(model['results']['segm_mAP'] * 100, 1)
results.append(
OrderedDict(
Task='Instance Segmentation',
Dataset=dataset_name,
Metrics={'mask AP': metric}))
if 'PQ' in model['results']:
metric = round(model['results']['PQ'], 1)
results.append(
OrderedDict(
Task='Panoptic Segmentation',
Dataset=dataset_name,
Metrics={'PQ': metric}))
pwc_model_info['Results'] = results
link_string = 'https://download.openmmlab.com/mmyolo/v0/'
link_string += '{}/{}'.format(model['config'].rstrip('.py'),
osp.split(model['model_path'])[-1])
pwc_model_info['Weights'] = link_string
if cfg_folder_name in pwc_files:
pwc_files[cfg_folder_name].append(pwc_model_info)
else:
pwc_files[cfg_folder_name] = [pwc_model_info]
return pwc_files
def parse_args():
parser = argparse.ArgumentParser(description='Gather benchmarked models')
parser.add_argument(
'root',
type=str,
help='root path of benchmarked models to be gathered')
parser.add_argument(
'out', type=str, help='output path of gathered models to be stored')
parser.add_argument(
'--best',
action='store_true',
help='whether to gather the best model.')
args = parser.parse_args()
return args
# TODO: Refine
def main():
args = parse_args()
models_root = args.root
models_out = args.out
mkdir_or_exist(models_out)
# find all models in the root directory to be gathered
raw_configs = list(scandir('./configs', '.py', recursive=True))
# filter configs that is not trained in the experiments dir
used_configs = []
for raw_config in raw_configs:
if osp.exists(osp.join(models_root, raw_config)):
used_configs.append(raw_config)
print(f'Find {len(used_configs)} models to be gathered')
# find final_ckpt and log file for trained each config
# and parse the best performance
model_infos = []
for used_config in used_configs:
exp_dir = osp.join(models_root, used_config)
by_epoch = is_by_epoch(used_config)
# check whether the exps is finished
if args.best is True:
final_model, final_epoch_or_iter = get_best_epoch_or_iter(exp_dir)
else:
final_epoch_or_iter = get_final_epoch_or_iter(used_config)
final_model = '{}_{}.pth'.format('epoch' if by_epoch else 'iter',
final_epoch_or_iter)
model_path = osp.join(exp_dir, final_model)
# skip if the model is still training
if not osp.exists(model_path):
continue
# get the latest logs
latest_exp_name = find_last_dir(exp_dir)
latest_exp_json = osp.join(exp_dir, latest_exp_name, 'vis_data',
latest_exp_name + '.json')
model_performance = get_final_results(
latest_exp_json, final_epoch_or_iter, by_epoch=by_epoch)
if model_performance is None:
continue
model_info = dict(
config=used_config,
results=model_performance,
final_model=final_model,
latest_exp_json=latest_exp_json,
latest_exp_name=latest_exp_name)
model_info['epochs' if by_epoch else 'iterations'] = \
final_epoch_or_iter
model_infos.append(model_info)
# publish model for each checkpoint
publish_model_infos = []
for model in model_infos:
model_publish_dir = osp.join(models_out, model['config'].rstrip('.py'))
mkdir_or_exist(model_publish_dir)
model_name = osp.split(model['config'])[-1].split('.')[0]
model_name += '_' + model['latest_exp_name']
publish_model_path = osp.join(model_publish_dir, model_name)
trained_model_path = osp.join(models_root, model['config'],
model['final_model'])
# convert model
final_model_path = process_checkpoint(trained_model_path,
publish_model_path)
# copy log
shutil.copy(model['latest_exp_json'],
osp.join(model_publish_dir, f'{model_name}.log.json'))
# copy config to guarantee reproducibility
config_path = model['config']
config_path = osp.join(
'configs',
config_path) if 'configs' not in config_path else config_path
target_config_path = osp.split(config_path)[-1]
shutil.copy(config_path, osp.join(model_publish_dir,
target_config_path))
model['model_path'] = final_model_path
publish_model_infos.append(model)
models = dict(models=publish_model_infos)
print(f'Totally gathered {len(publish_model_infos)} models')
dump(models, osp.join(models_out, 'model_info.json'))
pwc_files = convert_model_info_to_pwc(publish_model_infos)
for name in pwc_files:
with open(osp.join(models_out, name + '_metafile.yml'), 'w') as f:
ordered_yaml_dump(pwc_files[name], f, encoding='utf-8')
if __name__ == '__main__':
main()
| 11,017 | 34.314103 | 79 | py |
mmyolo | mmyolo-main/tests/test_engine/test_hooks/test_yolox_mode_switch_hook.py | # Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
from unittest.mock import Mock
import torch
from mmengine.config import Config
from mmengine.runner import Runner
from torch.utils.data import Dataset
from mmyolo.engine.hooks import YOLOXModeSwitchHook
from mmyolo.utils import register_all_modules
class DummyDataset(Dataset):
METAINFO = dict() # type: ignore
data = torch.randn(12, 2)
label = torch.ones(12)
@property
def metainfo(self):
return self.METAINFO
def __len__(self):
return self.data.size(0)
def __getitem__(self, index):
return dict(inputs=self.data[index], data_sample=self.label[index])
pipeline1 = [
dict(type='mmdet.Resize'),
]
pipeline2 = [
dict(type='mmdet.RandomFlip'),
]
register_all_modules()
class TestYOLOXModeSwitchHook(TestCase):
def test(self):
train_dataloader = dict(
dataset=DummyDataset(),
sampler=dict(type='DefaultSampler', shuffle=True),
batch_size=3,
num_workers=0)
runner = Mock()
runner.model = Mock()
runner.model.module = Mock()
runner.model.bbox_head.use_bbox_aux = False
runner.cfg.train_dataloader = Config(train_dataloader)
runner.train_dataloader = Runner.build_dataloader(train_dataloader)
runner.train_dataloader.dataset.pipeline = pipeline1
hook = YOLOXModeSwitchHook(
num_last_epochs=15, new_train_pipeline=pipeline2)
# test after change mode
runner.epoch = 284
runner.max_epochs = 300
hook.before_train_epoch(runner)
self.assertTrue(runner.model.bbox_head.use_bbox_aux)
self.assertEqual(runner.train_loop.dataloader.dataset.pipeline,
pipeline2)
| 1,810 | 25.632353 | 75 | py |
mmyolo | mmyolo-main/tests/test_engine/test_hooks/test_yolov5_param_scheduler_hook.py | # Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
from unittest.mock import Mock
import torch
from mmengine.config import Config
from mmengine.optim import build_optim_wrapper
from mmengine.runner import Runner
from torch import nn
from torch.utils.data import Dataset
from mmyolo.engine.hooks import YOLOv5ParamSchedulerHook
from mmyolo.utils import register_all_modules
class ToyModel(nn.Module):
def __init__(self):
super().__init__()
self.linear = nn.Linear(2, 1)
def forward(self, inputs, data_samples, mode='tensor'):
labels = torch.stack(data_samples)
inputs = torch.stack(inputs)
outputs = self.linear(inputs)
if mode == 'tensor':
return outputs
elif mode == 'loss':
loss = (labels - outputs).sum()
outputs = dict(loss=loss)
return outputs
else:
return outputs
class DummyDataset(Dataset):
METAINFO = dict() # type: ignore
data = torch.randn(12, 2)
label = torch.ones(12)
@property
def metainfo(self):
return self.METAINFO
def __len__(self):
return self.data.size(0)
def __getitem__(self, index):
return dict(inputs=self.data[index], data_sample=self.label[index])
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(
type='SGD',
lr=0.01,
momentum=0.937,
weight_decay=0.0005,
nesterov=True,
batch_size_per_gpu=1),
constructor='YOLOv5OptimizerConstructor')
register_all_modules()
class TestYOLOv5ParamSchelerHook(TestCase):
def test(self):
model = ToyModel()
train_dataloader = dict(
dataset=DummyDataset(),
sampler=dict(type='DefaultSampler', shuffle=True),
batch_size=3,
num_workers=0)
runner = Mock()
runner.model = model
runner.optim_wrapper = build_optim_wrapper(model, optim_wrapper)
runner.cfg.train_dataloader = Config(train_dataloader)
runner.train_dataloader = Runner.build_dataloader(train_dataloader)
hook = YOLOv5ParamSchedulerHook(
scheduler_type='linear', lr_factor=0.01, max_epochs=300)
# test before train
runner.epoch = 0
runner.iter = 0
hook.before_train(runner)
for group in runner.optim_wrapper.param_groups:
self.assertEqual(group['lr'], 0.01)
self.assertEqual(group['momentum'], 0.937)
self.assertFalse(hook._warmup_end)
# test after training 10 steps
for i in range(10):
runner.iter += 1
hook.before_train_iter(runner, 0)
for group_idx, group in enumerate(runner.optim_wrapper.param_groups):
if group_idx == 2:
self.assertEqual(round(group['lr'], 5), 0.0991)
self.assertEqual(group['momentum'], 0.80137)
self.assertFalse(hook._warmup_end)
# test after warm up
runner.iter = 1000
hook.before_train_iter(runner, 0)
self.assertFalse(hook._warmup_end)
for group in runner.optim_wrapper.param_groups:
self.assertEqual(group['lr'], 0.01)
self.assertEqual(group['momentum'], 0.937)
runner.iter = 1001
hook.before_train_iter(runner, 0)
self.assertTrue(hook._warmup_end)
# test after train_epoch
hook.after_train_epoch(runner)
for group in runner.optim_wrapper.param_groups:
self.assertEqual(group['lr'], 0.01)
self.assertEqual(group['momentum'], 0.937)
| 3,619 | 27.96 | 77 | py |
mmyolo | mmyolo-main/tests/test_engine/test_optimizers/test_yolov5_optim_constructor.py | # Copyright (c) OpenMMLab. All rights reserved.
import copy
from unittest import TestCase
import torch
import torch.nn as nn
from mmengine.optim import build_optim_wrapper
from mmyolo.engine import YOLOv5OptimizerConstructor
from mmyolo.utils import register_all_modules
register_all_modules()
class ExampleModel(nn.Module):
def __init__(self):
super().__init__()
self.param1 = nn.Parameter(torch.ones(1))
self.conv1 = nn.Conv2d(3, 4, kernel_size=1, bias=False)
self.conv2 = nn.Conv2d(4, 2, kernel_size=1)
self.bn = nn.BatchNorm2d(2)
class TestYOLOv5OptimizerConstructor(TestCase):
def setUp(self):
self.model = ExampleModel()
self.base_lr = 0.01
self.weight_decay = 0.0001
self.optim_wrapper_cfg = dict(
type='OptimWrapper',
optimizer=dict(
type='SGD',
lr=self.base_lr,
momentum=0.9,
weight_decay=self.weight_decay,
batch_size_per_gpu=16))
def test_init(self):
YOLOv5OptimizerConstructor(copy.deepcopy(self.optim_wrapper_cfg))
YOLOv5OptimizerConstructor(
copy.deepcopy(self.optim_wrapper_cfg),
paramwise_cfg={'base_total_batch_size': 64})
# `paramwise_cfg` must include `base_total_batch_size` if not None.
with self.assertRaises(AssertionError):
YOLOv5OptimizerConstructor(
copy.deepcopy(self.optim_wrapper_cfg), paramwise_cfg={'a': 64})
def test_build(self):
optim_wrapper = YOLOv5OptimizerConstructor(
copy.deepcopy(self.optim_wrapper_cfg))(
self.model)
# test param_groups
assert len(optim_wrapper.optimizer.param_groups) == 3
for i in range(3):
param_groups_i = optim_wrapper.optimizer.param_groups[i]
assert param_groups_i['lr'] == self.base_lr
if i == 0:
assert param_groups_i['weight_decay'] == self.weight_decay
else:
assert param_groups_i['weight_decay'] == 0
# test weight_decay linear scaling
optim_wrapper_cfg = copy.deepcopy(self.optim_wrapper_cfg)
optim_wrapper_cfg['optimizer']['batch_size_per_gpu'] = 128
optim_wrapper = YOLOv5OptimizerConstructor(optim_wrapper_cfg)(
self.model)
assert optim_wrapper.optimizer.param_groups[0][
'weight_decay'] == self.weight_decay * 2
# test without batch_size_per_gpu
optim_wrapper_cfg = copy.deepcopy(self.optim_wrapper_cfg)
optim_wrapper_cfg['optimizer'].pop('batch_size_per_gpu')
optim_wrapper = dict(
optim_wrapper_cfg, constructor='YOLOv5OptimizerConstructor')
optim_wrapper = build_optim_wrapper(self.model, optim_wrapper)
assert optim_wrapper.optimizer.param_groups[0][
'weight_decay'] == self.weight_decay
| 2,934 | 34.792683 | 79 | py |
mmyolo | mmyolo-main/tests/test_engine/test_optimizers/test_yolov7_optim_wrapper_constructor.py | # Copyright (c) OpenMMLab. All rights reserved.
import copy
from unittest import TestCase
import torch
import torch.nn as nn
from mmengine.optim import build_optim_wrapper
from mmyolo.engine import YOLOv7OptimWrapperConstructor
from mmyolo.utils import register_all_modules
register_all_modules()
class ExampleModel(nn.Module):
def __init__(self):
super().__init__()
self.param1 = nn.Parameter(torch.ones(1))
self.conv1 = nn.Conv2d(3, 4, kernel_size=1, bias=False)
self.conv2 = nn.Conv2d(4, 2, kernel_size=1)
self.bn = nn.BatchNorm2d(2)
class TestYOLOv7OptimWrapperConstructor(TestCase):
def setUp(self):
self.model = ExampleModel()
self.base_lr = 0.01
self.weight_decay = 0.0001
self.optim_wrapper_cfg = dict(
type='OptimWrapper',
optimizer=dict(
type='SGD',
lr=self.base_lr,
momentum=0.9,
weight_decay=self.weight_decay,
batch_size_per_gpu=16))
def test_init(self):
YOLOv7OptimWrapperConstructor(copy.deepcopy(self.optim_wrapper_cfg))
YOLOv7OptimWrapperConstructor(
copy.deepcopy(self.optim_wrapper_cfg),
paramwise_cfg={'base_total_batch_size': 64})
# `paramwise_cfg` must include `base_total_batch_size` if not None.
with self.assertRaises(AssertionError):
YOLOv7OptimWrapperConstructor(
copy.deepcopy(self.optim_wrapper_cfg), paramwise_cfg={'a': 64})
def test_build(self):
optim_wrapper = YOLOv7OptimWrapperConstructor(
copy.deepcopy(self.optim_wrapper_cfg))(
self.model)
# test param_groups
assert len(optim_wrapper.optimizer.param_groups) == 3
for i in range(3):
param_groups_i = optim_wrapper.optimizer.param_groups[i]
assert param_groups_i['lr'] == self.base_lr
if i == 0:
assert param_groups_i['weight_decay'] == self.weight_decay
else:
assert param_groups_i['weight_decay'] == 0
# test weight_decay linear scaling
optim_wrapper_cfg = copy.deepcopy(self.optim_wrapper_cfg)
optim_wrapper_cfg['optimizer']['batch_size_per_gpu'] = 128
optim_wrapper = YOLOv7OptimWrapperConstructor(optim_wrapper_cfg)(
self.model)
assert optim_wrapper.optimizer.param_groups[0][
'weight_decay'] == self.weight_decay * 2
# test without batch_size_per_gpu
optim_wrapper_cfg = copy.deepcopy(self.optim_wrapper_cfg)
optim_wrapper_cfg['optimizer'].pop('batch_size_per_gpu')
optim_wrapper = dict(
optim_wrapper_cfg, constructor='YOLOv7OptimWrapperConstructor')
optim_wrapper = build_optim_wrapper(self.model, optim_wrapper)
assert optim_wrapper.optimizer.param_groups[0][
'weight_decay'] == self.weight_decay
| 2,958 | 35.085366 | 79 | py |
mmyolo | mmyolo-main/tests/test_deploy/test_object_detection.py | # Copyright (c) OpenMMLab. All rights reserved.
import os
from tempfile import NamedTemporaryFile, TemporaryDirectory
import numpy as np
import pytest
import torch
from mmengine import Config
try:
import importlib
importlib.import_module('mmdeploy')
except ImportError:
pytest.skip('mmdeploy is not installed.', allow_module_level=True)
import mmdeploy.backend.onnxruntime as ort_apis
from mmdeploy.apis import build_task_processor
from mmdeploy.codebase import import_codebase
from mmdeploy.utils import load_config
from mmdeploy.utils.config_utils import register_codebase
from mmdeploy.utils.test import SwitchBackendWrapper
try:
codebase = register_codebase('mmyolo')
import_codebase(codebase, ['mmyolo.deploy'])
except ImportError:
pytest.skip('mmyolo is not installed.', allow_module_level=True)
model_cfg_path = 'tests/test_deploy/data/model.py'
model_cfg = load_config(model_cfg_path)[0]
model_cfg.test_dataloader.dataset.data_root = \
'tests/data'
model_cfg.test_dataloader.dataset.ann_file = 'coco_sample.json'
model_cfg.test_evaluator.ann_file = \
'tests/coco_sample.json'
deploy_cfg = Config(
dict(
backend_config=dict(type='onnxruntime'),
codebase_config=dict(
type='mmyolo',
task='ObjectDetection',
post_processing=dict(
score_threshold=0.05,
confidence_threshold=0.005, # for YOLOv3
iou_threshold=0.5,
max_output_boxes_per_class=200,
pre_top_k=5000,
keep_top_k=100,
background_label_id=-1,
),
module=['mmyolo.deploy']),
onnx_config=dict(
type='onnx',
export_params=True,
keep_initializers_as_inputs=False,
opset_version=11,
input_shape=None,
input_names=['input'],
output_names=['dets', 'labels'])))
onnx_file = NamedTemporaryFile(suffix='.onnx').name
task_processor = None
img_shape = (32, 32)
img = np.random.rand(*img_shape, 3)
@pytest.fixture(autouse=True)
def init_task_processor():
global task_processor
task_processor = build_task_processor(model_cfg, deploy_cfg, 'cpu')
@pytest.fixture
def backend_model():
from mmdeploy.backend.onnxruntime import ORTWrapper
ort_apis.__dict__.update({'ORTWrapper': ORTWrapper})
wrapper = SwitchBackendWrapper(ORTWrapper)
wrapper.set(
outputs={
'dets': torch.rand(1, 10, 5).sort(2).values,
'labels': torch.randint(0, 10, (1, 10))
})
yield task_processor.build_backend_model([''])
wrapper.recover()
def test_visualize(backend_model):
img_path = 'tests/data/color.jpg'
input_dict, _ = task_processor.create_input(
img_path, input_shape=img_shape)
results = backend_model.test_step(input_dict)[0]
with TemporaryDirectory() as dir:
filename = dir + 'tmp.jpg'
task_processor.visualize(img, results, filename, 'window')
assert os.path.exists(filename)
| 3,052 | 30.474227 | 71 | py |
mmyolo | mmyolo-main/tests/test_deploy/test_mmyolo_models.py | # Copyright (c) OpenMMLab. All rights reserved.
import os
import random
import numpy as np
import pytest
import torch
from mmengine import Config
try:
import importlib
importlib.import_module('mmdeploy')
except ImportError:
pytest.skip('mmdeploy is not installed.', allow_module_level=True)
from mmdeploy.codebase import import_codebase
from mmdeploy.utils import Backend
from mmdeploy.utils.config_utils import register_codebase
from mmdeploy.utils.test import (WrapModel, check_backend, get_model_outputs,
get_rewrite_outputs)
try:
codebase = register_codebase('mmyolo')
import_codebase(codebase, ['mmyolo.deploy'])
except ImportError:
pytest.skip('mmyolo is not installed.', allow_module_level=True)
def seed_everything(seed=1029):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed) # if you are using multi-GPU.
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.enabled = False
def get_yolov5_head_model():
"""YOLOv5 Head Config."""
test_cfg = Config(
dict(
multi_label=True,
nms_pre=30000,
score_thr=0.001,
nms=dict(type='nms', iou_threshold=0.65),
max_per_img=300))
from mmyolo.models.dense_heads import YOLOv5Head
head_module = dict(
type='YOLOv5HeadModule',
num_classes=4,
in_channels=[2, 4, 8],
featmap_strides=[8, 16, 32],
num_base_priors=1)
model = YOLOv5Head(head_module, test_cfg=test_cfg)
model.requires_grad_(False)
return model
@pytest.mark.parametrize('backend_type', [Backend.ONNXRUNTIME])
def test_yolov5_head_predict_by_feat(backend_type: Backend):
"""Test predict_by_feat rewrite of YOLOXHead."""
check_backend(backend_type)
yolov5_head = get_yolov5_head_model()
yolov5_head.cpu().eval()
s = 256
batch_img_metas = [{
'scale_factor': (1.0, 1.0),
'pad_shape': (s, s, 3),
'img_shape': (s, s, 3),
'ori_shape': (s, s, 3)
}]
output_names = ['dets', 'labels']
deploy_cfg = Config(
dict(
backend_config=dict(type=backend_type.value),
onnx_config=dict(output_names=output_names, input_shape=None),
codebase_config=dict(
type='mmyolo',
task='ObjectDetection',
post_processing=dict(
score_threshold=0.05,
iou_threshold=0.5,
max_output_boxes_per_class=20,
pre_top_k=-1,
keep_top_k=10,
background_label_id=-1,
),
module=['mmyolo.deploy'])))
seed_everything(1234)
cls_scores = [
torch.rand(1, yolov5_head.num_classes * yolov5_head.num_base_priors,
4 * pow(2, i), 4 * pow(2, i)) for i in range(3, 0, -1)
]
seed_everything(5678)
bbox_preds = [
torch.rand(1, 4 * yolov5_head.num_base_priors, 4 * pow(2, i),
4 * pow(2, i)) for i in range(3, 0, -1)
]
seed_everything(9101)
objectnesses = [
torch.rand(1, 1 * yolov5_head.num_base_priors, 4 * pow(2, i),
4 * pow(2, i)) for i in range(3, 0, -1)
]
# to get outputs of pytorch model
model_inputs = {
'cls_scores': cls_scores,
'bbox_preds': bbox_preds,
'objectnesses': objectnesses,
'batch_img_metas': batch_img_metas,
'with_nms': True
}
model_outputs = get_model_outputs(yolov5_head, 'predict_by_feat',
model_inputs)
# to get outputs of onnx model after rewrite
wrapped_model = WrapModel(
yolov5_head,
'predict_by_feat',
batch_img_metas=batch_img_metas,
with_nms=True)
rewrite_inputs = {
'cls_scores': cls_scores,
'bbox_preds': bbox_preds,
'objectnesses': objectnesses,
}
rewrite_outputs, is_backend_output = get_rewrite_outputs(
wrapped_model=wrapped_model,
model_inputs=rewrite_inputs,
deploy_cfg=deploy_cfg)
if is_backend_output:
# hard code to make two tensors with the same shape
# rewrite and original codes applied different nms strategy
min_shape = min(model_outputs[0].bboxes.shape[0],
rewrite_outputs[0].shape[1], 5)
for i in range(len(model_outputs)):
rewrite_outputs[0][i, :min_shape, 0::2] = \
rewrite_outputs[0][i, :min_shape, 0::2].clamp_(0, s)
rewrite_outputs[0][i, :min_shape, 1::2] = \
rewrite_outputs[0][i, :min_shape, 1::2].clamp_(0, s)
assert np.allclose(
model_outputs[i].bboxes[:min_shape],
rewrite_outputs[0][i, :min_shape, :4],
rtol=1e-03,
atol=1e-05)
assert np.allclose(
model_outputs[i].scores[:min_shape],
rewrite_outputs[0][i, :min_shape, 4],
rtol=1e-03,
atol=1e-05)
assert np.allclose(
model_outputs[i].labels[:min_shape],
rewrite_outputs[1][i, :min_shape],
rtol=1e-03,
atol=1e-05)
else:
assert rewrite_outputs is not None
| 5,546 | 32.415663 | 77 | py |
mmyolo | mmyolo-main/tests/test_models/test_layers/test_ema.py | # Copyright (c) OpenMMLab. All rights reserved.
import itertools
import math
from unittest import TestCase
import torch
import torch.nn as nn
from mmengine.testing import assert_allclose
from mmyolo.models.layers import ExpMomentumEMA
class TestEMA(TestCase):
def test_exp_momentum_ema(self):
model = nn.Sequential(nn.Conv2d(1, 5, kernel_size=3), nn.Linear(5, 10))
# Test invalid gamma
with self.assertRaisesRegex(AssertionError,
'gamma must be greater than 0'):
ExpMomentumEMA(model, gamma=-1)
# Test EMA
model = torch.nn.Sequential(
torch.nn.Conv2d(1, 5, kernel_size=3), torch.nn.Linear(5, 10))
momentum = 0.1
gamma = 4
ema_model = ExpMomentumEMA(model, momentum=momentum, gamma=gamma)
averaged_params = [
torch.zeros_like(param) for param in model.parameters()
]
n_updates = 10
for i in range(n_updates):
updated_averaged_params = []
for p, p_avg in zip(model.parameters(), averaged_params):
p.detach().add_(torch.randn_like(p))
if i == 0:
updated_averaged_params.append(p.clone())
else:
m = (1 - momentum) * math.exp(-(1 + i) / gamma) + momentum
updated_averaged_params.append(
(p_avg * (1 - m) + p * m).clone())
ema_model.update_parameters(model)
averaged_params = updated_averaged_params
for p_target, p_ema in zip(averaged_params, ema_model.parameters()):
assert_allclose(p_target, p_ema)
def test_exp_momentum_ema_update_buffer(self):
model = nn.Sequential(
nn.Conv2d(1, 5, kernel_size=3), nn.BatchNorm2d(5, momentum=0.3),
nn.Linear(5, 10))
# Test invalid gamma
with self.assertRaisesRegex(AssertionError,
'gamma must be greater than 0'):
ExpMomentumEMA(model, gamma=-1)
# Test EMA with momentum annealing.
momentum = 0.1
gamma = 4
ema_model = ExpMomentumEMA(
model, gamma=gamma, momentum=momentum, update_buffers=True)
averaged_params = [
torch.zeros_like(param)
for param in itertools.chain(model.parameters(), model.buffers())
if param.size() != torch.Size([])
]
n_updates = 10
for i in range(n_updates):
updated_averaged_params = []
params = [
param for param in itertools.chain(model.parameters(),
model.buffers())
if param.size() != torch.Size([])
]
for p, p_avg in zip(params, averaged_params):
p.detach().add_(torch.randn_like(p))
if i == 0:
updated_averaged_params.append(p.clone())
else:
m = (1 - momentum) * math.exp(-(1 + i) / gamma) + momentum
updated_averaged_params.append(
(p_avg * (1 - m) + p * m).clone())
ema_model.update_parameters(model)
averaged_params = updated_averaged_params
ema_params = [
param for param in itertools.chain(ema_model.module.parameters(),
ema_model.module.buffers())
if param.size() != torch.Size([])
]
for p_target, p_ema in zip(averaged_params, ema_params):
assert_allclose(p_target, p_ema)
| 3,634 | 37.263158 | 79 | py |
mmyolo | mmyolo-main/tests/test_models/test_layers/test_yolo_bricks.py | # Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmyolo.models.layers import SPPFBottleneck
from mmyolo.utils import register_all_modules
register_all_modules()
class TestSPPFBottleneck(TestCase):
def test_forward(self):
input_tensor = torch.randn((1, 3, 20, 20))
bottleneck = SPPFBottleneck(3, 16)
out_tensor = bottleneck(input_tensor)
self.assertEqual(out_tensor.shape, (1, 16, 20, 20))
bottleneck = SPPFBottleneck(3, 16, kernel_sizes=[3, 5, 7])
out_tensor = bottleneck(input_tensor)
self.assertEqual(out_tensor.shape, (1, 16, 20, 20))
# set len(kernel_sizes)=4
bottleneck = SPPFBottleneck(3, 16, kernel_sizes=[3, 5, 7, 9])
out_tensor = bottleneck(input_tensor)
self.assertEqual(out_tensor.shape, (1, 16, 20, 20))
# set use_conv_first=False
bottleneck = SPPFBottleneck(
3, 16, use_conv_first=False, kernel_sizes=[3, 5, 7, 9])
out_tensor = bottleneck(input_tensor)
self.assertEqual(out_tensor.shape, (1, 16, 20, 20))
| 1,111 | 30.771429 | 69 | py |
mmyolo | mmyolo-main/tests/test_models/test_backbone/test_csp_resnet.py | # Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import pytest
import torch
from torch.nn.modules.batchnorm import _BatchNorm
from mmyolo.models import PPYOLOECSPResNet
from mmyolo.utils import register_all_modules
from .utils import check_norm_state, is_norm
register_all_modules()
class TestPPYOLOECSPResNet(TestCase):
def test_init(self):
# out_indices in range(len(arch_setting) + 1)
with pytest.raises(AssertionError):
PPYOLOECSPResNet(out_indices=(6, ))
with pytest.raises(ValueError):
# frozen_stages must in range(-1, len(arch_setting) + 1)
PPYOLOECSPResNet(frozen_stages=6)
def test_forward(self):
# Test PPYOLOECSPResNet with first stage frozen
frozen_stages = 1
model = PPYOLOECSPResNet(frozen_stages=frozen_stages)
model.init_weights()
model.train()
for mod in model.stem.modules():
for param in mod.parameters():
assert param.requires_grad is False
for i in range(1, frozen_stages + 1):
layer = getattr(model, f'stage{i}')
for mod in layer.modules():
if isinstance(mod, _BatchNorm):
assert mod.training is False
for param in layer.parameters():
assert param.requires_grad is False
# Test PPYOLOECSPResNet with norm_eval=True
model = PPYOLOECSPResNet(norm_eval=True)
model.train()
assert check_norm_state(model.modules(), False)
# Test PPYOLOECSPResNet-P5 forward with widen_factor=0.25
model = PPYOLOECSPResNet(
arch='P5', widen_factor=0.25, out_indices=range(0, 5))
model.train()
imgs = torch.randn(1, 3, 64, 64)
feat = model(imgs)
assert len(feat) == 5
assert feat[0].shape == torch.Size((1, 16, 32, 32))
assert feat[1].shape == torch.Size((1, 32, 16, 16))
assert feat[2].shape == torch.Size((1, 64, 8, 8))
assert feat[3].shape == torch.Size((1, 128, 4, 4))
assert feat[4].shape == torch.Size((1, 256, 2, 2))
# Test PPYOLOECSPResNet forward with dict(type='ReLU')
model = PPYOLOECSPResNet(
widen_factor=0.125,
act_cfg=dict(type='ReLU'),
out_indices=range(0, 5))
model.train()
imgs = torch.randn(1, 3, 64, 64)
feat = model(imgs)
assert len(feat) == 5
assert feat[0].shape == torch.Size((1, 8, 32, 32))
assert feat[1].shape == torch.Size((1, 16, 16, 16))
assert feat[2].shape == torch.Size((1, 32, 8, 8))
assert feat[3].shape == torch.Size((1, 64, 4, 4))
assert feat[4].shape == torch.Size((1, 128, 2, 2))
# Test PPYOLOECSPResNet with BatchNorm forward
model = PPYOLOECSPResNet(widen_factor=0.125, out_indices=range(0, 5))
for m in model.modules():
if is_norm(m):
assert isinstance(m, _BatchNorm)
model.train()
imgs = torch.randn(1, 3, 64, 64)
feat = model(imgs)
assert len(feat) == 5
assert feat[0].shape == torch.Size((1, 8, 32, 32))
assert feat[1].shape == torch.Size((1, 16, 16, 16))
assert feat[2].shape == torch.Size((1, 32, 8, 8))
assert feat[3].shape == torch.Size((1, 64, 4, 4))
assert feat[4].shape == torch.Size((1, 128, 2, 2))
# Test PPYOLOECSPResNet with BatchNorm forward
model = PPYOLOECSPResNet(plugins=[
dict(
cfg=dict(type='mmdet.DropBlock', drop_prob=0.1, block_size=3),
stages=(False, False, True, True)),
])
assert len(model.stage1) == 1
assert len(model.stage2) == 1
assert len(model.stage3) == 2 # +DropBlock
assert len(model.stage4) == 2 # +DropBlock
model.train()
imgs = torch.randn(1, 3, 256, 256)
feat = model(imgs)
assert len(feat) == 3
assert feat[0].shape == torch.Size((1, 256, 32, 32))
assert feat[1].shape == torch.Size((1, 512, 16, 16))
assert feat[2].shape == torch.Size((1, 1024, 8, 8))
| 4,162 | 35.517544 | 78 | py |
mmyolo | mmyolo-main/tests/test_models/test_backbone/test_efficient_rep.py | # Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import pytest
import torch
from torch.nn.modules.batchnorm import _BatchNorm
from mmyolo.models.backbones import YOLOv6CSPBep, YOLOv6EfficientRep
from mmyolo.utils import register_all_modules
from .utils import check_norm_state, is_norm
register_all_modules()
class TestYOLOv6EfficientRep(TestCase):
def test_init(self):
# out_indices in range(len(arch_setting) + 1)
with pytest.raises(AssertionError):
YOLOv6EfficientRep(out_indices=(6, ))
with pytest.raises(ValueError):
# frozen_stages must in range(-1, len(arch_setting) + 1)
YOLOv6EfficientRep(frozen_stages=6)
def test_YOLOv6EfficientRep_forward(self):
# Test YOLOv6EfficientRep with first stage frozen
frozen_stages = 1
model = YOLOv6EfficientRep(frozen_stages=frozen_stages)
model.init_weights()
model.train()
for mod in model.stem.modules():
for param in mod.parameters():
assert param.requires_grad is False
for i in range(1, frozen_stages + 1):
layer = getattr(model, f'stage{i}')
for mod in layer.modules():
if isinstance(mod, _BatchNorm):
assert mod.training is False
for param in layer.parameters():
assert param.requires_grad is False
# Test YOLOv6EfficientRep with norm_eval=True
model = YOLOv6EfficientRep(norm_eval=True)
model.train()
assert check_norm_state(model.modules(), False)
# Test YOLOv6EfficientRep-P5 forward with widen_factor=0.25
model = YOLOv6EfficientRep(
arch='P5', widen_factor=0.25, out_indices=range(0, 5))
model.train()
imgs = torch.randn(1, 3, 64, 64)
feat = model(imgs)
assert len(feat) == 5
assert feat[0].shape == torch.Size((1, 16, 32, 32))
assert feat[1].shape == torch.Size((1, 32, 16, 16))
assert feat[2].shape == torch.Size((1, 64, 8, 8))
assert feat[3].shape == torch.Size((1, 128, 4, 4))
assert feat[4].shape == torch.Size((1, 256, 2, 2))
# Test YOLOv6EfficientRep forward with dict(type='ReLU')
model = YOLOv6EfficientRep(
widen_factor=0.125,
act_cfg=dict(type='ReLU'),
out_indices=range(0, 5))
model.train()
imgs = torch.randn(1, 3, 64, 64)
feat = model(imgs)
assert len(feat) == 5
assert feat[0].shape == torch.Size((1, 8, 32, 32))
assert feat[1].shape == torch.Size((1, 16, 16, 16))
assert feat[2].shape == torch.Size((1, 32, 8, 8))
assert feat[3].shape == torch.Size((1, 64, 4, 4))
assert feat[4].shape == torch.Size((1, 128, 2, 2))
# Test YOLOv6EfficientRep with BatchNorm forward
model = YOLOv6EfficientRep(widen_factor=0.125, out_indices=range(0, 5))
for m in model.modules():
if is_norm(m):
assert isinstance(m, _BatchNorm)
model.train()
imgs = torch.randn(1, 3, 64, 64)
feat = model(imgs)
assert len(feat) == 5
assert feat[0].shape == torch.Size((1, 8, 32, 32))
assert feat[1].shape == torch.Size((1, 16, 16, 16))
assert feat[2].shape == torch.Size((1, 32, 8, 8))
assert feat[3].shape == torch.Size((1, 64, 4, 4))
assert feat[4].shape == torch.Size((1, 128, 2, 2))
# Test YOLOv6EfficientRep with BatchNorm forward
model = YOLOv6EfficientRep(plugins=[
dict(
cfg=dict(type='mmdet.DropBlock', drop_prob=0.1, block_size=3),
stages=(False, False, True, True)),
])
assert len(model.stage1) == 1
assert len(model.stage2) == 1
assert len(model.stage3) == 2 # +DropBlock
assert len(model.stage4) == 3 # +SPPF+DropBlock
model.train()
imgs = torch.randn(1, 3, 256, 256)
feat = model(imgs)
assert len(feat) == 3
assert feat[0].shape == torch.Size((1, 256, 32, 32))
assert feat[1].shape == torch.Size((1, 512, 16, 16))
assert feat[2].shape == torch.Size((1, 1024, 8, 8))
def test_YOLOv6CSPBep_forward(self):
# Test YOLOv6CSPBep with first stage frozen
frozen_stages = 1
model = YOLOv6CSPBep(frozen_stages=frozen_stages)
model.init_weights()
model.train()
for mod in model.stem.modules():
for param in mod.parameters():
assert param.requires_grad is False
for i in range(1, frozen_stages + 1):
layer = getattr(model, f'stage{i}')
for mod in layer.modules():
if isinstance(mod, _BatchNorm):
assert mod.training is False
for param in layer.parameters():
assert param.requires_grad is False
# Test YOLOv6CSPBep with norm_eval=True
model = YOLOv6CSPBep(norm_eval=True)
model.train()
assert check_norm_state(model.modules(), False)
# Test YOLOv6CSPBep forward with widen_factor=0.25
model = YOLOv6CSPBep(
arch='P5', widen_factor=0.25, out_indices=range(0, 5))
model.train()
imgs = torch.randn(1, 3, 64, 64)
feat = model(imgs)
assert len(feat) == 5
assert feat[0].shape == torch.Size((1, 16, 32, 32))
assert feat[1].shape == torch.Size((1, 32, 16, 16))
assert feat[2].shape == torch.Size((1, 64, 8, 8))
assert feat[3].shape == torch.Size((1, 128, 4, 4))
assert feat[4].shape == torch.Size((1, 256, 2, 2))
# Test YOLOv6CSPBep forward with dict(type='ReLU')
model = YOLOv6CSPBep(
widen_factor=0.125,
act_cfg=dict(type='ReLU'),
out_indices=range(0, 5))
model.train()
imgs = torch.randn(1, 3, 64, 64)
feat = model(imgs)
assert len(feat) == 5
assert feat[0].shape == torch.Size((1, 8, 32, 32))
assert feat[1].shape == torch.Size((1, 16, 16, 16))
assert feat[2].shape == torch.Size((1, 32, 8, 8))
assert feat[3].shape == torch.Size((1, 64, 4, 4))
assert feat[4].shape == torch.Size((1, 128, 2, 2))
# Test YOLOv6CSPBep with BatchNorm forward
model = YOLOv6CSPBep(widen_factor=0.125, out_indices=range(0, 5))
for m in model.modules():
if is_norm(m):
assert isinstance(m, _BatchNorm)
model.train()
imgs = torch.randn(1, 3, 64, 64)
feat = model(imgs)
assert len(feat) == 5
assert feat[0].shape == torch.Size((1, 8, 32, 32))
assert feat[1].shape == torch.Size((1, 16, 16, 16))
assert feat[2].shape == torch.Size((1, 32, 8, 8))
assert feat[3].shape == torch.Size((1, 64, 4, 4))
assert feat[4].shape == torch.Size((1, 128, 2, 2))
# Test YOLOv6CSPBep with BatchNorm forward
model = YOLOv6CSPBep(plugins=[
dict(
cfg=dict(type='mmdet.DropBlock', drop_prob=0.1, block_size=3),
stages=(False, False, True, True)),
])
assert len(model.stage1) == 1
assert len(model.stage2) == 1
assert len(model.stage3) == 2 # +DropBlock
assert len(model.stage4) == 3 # +SPPF+DropBlock
model.train()
imgs = torch.randn(1, 3, 256, 256)
feat = model(imgs)
assert len(feat) == 3
assert feat[0].shape == torch.Size((1, 256, 32, 32))
assert feat[1].shape == torch.Size((1, 512, 16, 16))
assert feat[2].shape == torch.Size((1, 1024, 8, 8))
| 7,688 | 36.876847 | 79 | py |
mmyolo | mmyolo-main/tests/test_models/test_backbone/utils.py | # Copyright (c) OpenMMLab. All rights reserved.
from mmdet.models.backbones.res2net import Bottle2neck
from mmdet.models.backbones.resnet import BasicBlock, Bottleneck
from mmdet.models.backbones.resnext import Bottleneck as BottleneckX
from mmdet.models.layers import SimplifiedBasicBlock
from torch.nn.modules import GroupNorm
from torch.nn.modules.batchnorm import _BatchNorm
def is_block(modules):
"""Check if is ResNet building block."""
if isinstance(modules, (BasicBlock, Bottleneck, BottleneckX, Bottle2neck,
SimplifiedBasicBlock)):
return True
return False
def is_norm(modules):
"""Check if is one of the norms."""
if isinstance(modules, (GroupNorm, _BatchNorm)):
return True
return False
def check_norm_state(modules, train_state):
"""Check if norm layer is in correct train state."""
for mod in modules:
if isinstance(mod, _BatchNorm):
if mod.training != train_state:
return False
return True
| 1,026 | 31.09375 | 77 | py |
mmyolo | mmyolo-main/tests/test_models/test_backbone/test_yolov7_backbone.py | # Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import pytest
import torch
from torch.nn.modules.batchnorm import _BatchNorm
from mmyolo.models.backbones import YOLOv7Backbone
from mmyolo.utils import register_all_modules
from .utils import check_norm_state
register_all_modules()
class TestYOLOv7Backbone(TestCase):
def test_init(self):
# out_indices in range(len(arch_setting) + 1)
with pytest.raises(AssertionError):
YOLOv7Backbone(out_indices=(6, ))
with pytest.raises(ValueError):
# frozen_stages must in range(-1, len(arch_setting) + 1)
YOLOv7Backbone(frozen_stages=6)
def test_forward(self):
# Test YOLOv7Backbone-L with first stage frozen
frozen_stages = 1
model = YOLOv7Backbone(frozen_stages=frozen_stages)
model.init_weights()
model.train()
for mod in model.stem.modules():
for param in mod.parameters():
assert param.requires_grad is False
for i in range(1, frozen_stages + 1):
layer = getattr(model, f'stage{i}')
for mod in layer.modules():
if isinstance(mod, _BatchNorm):
assert mod.training is False
for param in layer.parameters():
assert param.requires_grad is False
# Test YOLOv7Backbone-L with norm_eval=True
model = YOLOv7Backbone(norm_eval=True)
model.train()
assert check_norm_state(model.modules(), False)
# Test YOLOv7Backbone-L forward with widen_factor=0.25
model = YOLOv7Backbone(
widen_factor=0.25, out_indices=tuple(range(0, 5)))
model.train()
imgs = torch.randn(1, 3, 64, 64)
feat = model(imgs)
assert len(feat) == 5
assert feat[0].shape == torch.Size((1, 16, 32, 32))
assert feat[1].shape == torch.Size((1, 64, 16, 16))
assert feat[2].shape == torch.Size((1, 128, 8, 8))
assert feat[3].shape == torch.Size((1, 256, 4, 4))
assert feat[4].shape == torch.Size((1, 256, 2, 2))
# Test YOLOv7Backbone-L with plugins
model = YOLOv7Backbone(
widen_factor=0.25,
plugins=[
dict(
cfg=dict(
type='mmdet.DropBlock', drop_prob=0.1, block_size=3),
stages=(False, False, True, True)),
])
assert len(model.stage1) == 2
assert len(model.stage2) == 2
assert len(model.stage3) == 3 # +DropBlock
assert len(model.stage4) == 3 # +DropBlock
model.train()
imgs = torch.randn(1, 3, 128, 128)
feat = model(imgs)
assert len(feat) == 3
assert feat[0].shape == torch.Size((1, 128, 16, 16))
assert feat[1].shape == torch.Size((1, 256, 8, 8))
assert feat[2].shape == torch.Size((1, 256, 4, 4))
# Test YOLOv7Backbone-X forward with widen_factor=0.25
model = YOLOv7Backbone(arch='X', widen_factor=0.25)
model.train()
imgs = torch.randn(1, 3, 64, 64)
feat = model(imgs)
assert len(feat) == 3
assert feat[0].shape == torch.Size((1, 160, 8, 8))
assert feat[1].shape == torch.Size((1, 320, 4, 4))
assert feat[2].shape == torch.Size((1, 320, 2, 2))
# Test YOLOv7Backbone-tiny forward with widen_factor=0.25
model = YOLOv7Backbone(arch='Tiny', widen_factor=0.25)
model.train()
feat = model(imgs)
assert len(feat) == 3
assert feat[0].shape == torch.Size((1, 32, 8, 8))
assert feat[1].shape == torch.Size((1, 64, 4, 4))
assert feat[2].shape == torch.Size((1, 128, 2, 2))
# Test YOLOv7Backbone-w forward with widen_factor=0.25
model = YOLOv7Backbone(
arch='W', widen_factor=0.25, out_indices=(2, 3, 4, 5))
model.train()
imgs = torch.randn(1, 3, 128, 128)
feat = model(imgs)
assert len(feat) == 4
assert feat[0].shape == torch.Size((1, 64, 16, 16))
assert feat[1].shape == torch.Size((1, 128, 8, 8))
assert feat[2].shape == torch.Size((1, 192, 4, 4))
assert feat[3].shape == torch.Size((1, 256, 2, 2))
# Test YOLOv7Backbone-w forward with widen_factor=0.25
model = YOLOv7Backbone(
arch='D', widen_factor=0.25, out_indices=(2, 3, 4, 5))
model.train()
feat = model(imgs)
assert len(feat) == 4
assert feat[0].shape == torch.Size((1, 96, 16, 16))
assert feat[1].shape == torch.Size((1, 192, 8, 8))
assert feat[2].shape == torch.Size((1, 288, 4, 4))
assert feat[3].shape == torch.Size((1, 384, 2, 2))
# Test YOLOv7Backbone-w forward with widen_factor=0.25
model = YOLOv7Backbone(
arch='E', widen_factor=0.25, out_indices=(2, 3, 4, 5))
model.train()
feat = model(imgs)
assert len(feat) == 4
assert feat[0].shape == torch.Size((1, 80, 16, 16))
assert feat[1].shape == torch.Size((1, 160, 8, 8))
assert feat[2].shape == torch.Size((1, 240, 4, 4))
assert feat[3].shape == torch.Size((1, 320, 2, 2))
# Test YOLOv7Backbone-w forward with widen_factor=0.25
model = YOLOv7Backbone(
arch='E2E', widen_factor=0.25, out_indices=(2, 3, 4, 5))
model.train()
feat = model(imgs)
assert len(feat) == 4
assert feat[0].shape == torch.Size((1, 80, 16, 16))
assert feat[1].shape == torch.Size((1, 160, 8, 8))
assert feat[2].shape == torch.Size((1, 240, 4, 4))
assert feat[3].shape == torch.Size((1, 320, 2, 2))
| 5,705 | 35.812903 | 77 | py |
mmyolo | mmyolo-main/tests/test_models/test_backbone/test_csp_darknet.py | # Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import pytest
import torch
from parameterized import parameterized
from torch.nn.modules.batchnorm import _BatchNorm
from mmyolo.models.backbones import (YOLOv5CSPDarknet, YOLOv8CSPDarknet,
YOLOXCSPDarknet)
from mmyolo.utils import register_all_modules
from .utils import check_norm_state, is_norm
register_all_modules()
class TestCSPDarknet(TestCase):
@parameterized.expand([(YOLOv5CSPDarknet, ), (YOLOXCSPDarknet, ),
(YOLOv8CSPDarknet, )])
def test_init(self, module_class):
# out_indices in range(len(arch_setting) + 1)
with pytest.raises(AssertionError):
module_class(out_indices=(6, ))
with pytest.raises(ValueError):
# frozen_stages must in range(-1, len(arch_setting) + 1)
module_class(frozen_stages=6)
@parameterized.expand([(YOLOv5CSPDarknet, ), (YOLOXCSPDarknet, ),
(YOLOv8CSPDarknet, )])
def test_forward(self, module_class):
# Test CSPDarknet with first stage frozen
frozen_stages = 1
model = module_class(frozen_stages=frozen_stages)
model.init_weights()
model.train()
for mod in model.stem.modules():
for param in mod.parameters():
assert param.requires_grad is False
for i in range(1, frozen_stages + 1):
layer = getattr(model, f'stage{i}')
for mod in layer.modules():
if isinstance(mod, _BatchNorm):
assert mod.training is False
for param in layer.parameters():
assert param.requires_grad is False
# Test CSPDarknet with norm_eval=True
model = module_class(norm_eval=True)
model.train()
assert check_norm_state(model.modules(), False)
# Test CSPDarknet-P5 forward with widen_factor=0.25
model = module_class(
arch='P5', widen_factor=0.25, out_indices=range(0, 5))
model.train()
imgs = torch.randn(1, 3, 64, 64)
feat = model(imgs)
assert len(feat) == 5
assert feat[0].shape == torch.Size((1, 16, 32, 32))
assert feat[1].shape == torch.Size((1, 32, 16, 16))
assert feat[2].shape == torch.Size((1, 64, 8, 8))
assert feat[3].shape == torch.Size((1, 128, 4, 4))
assert feat[4].shape == torch.Size((1, 256, 2, 2))
# Test CSPDarknet forward with dict(type='ReLU')
model = module_class(
widen_factor=0.125,
act_cfg=dict(type='ReLU'),
out_indices=range(0, 5))
model.train()
imgs = torch.randn(1, 3, 64, 64)
feat = model(imgs)
assert len(feat) == 5
assert feat[0].shape == torch.Size((1, 8, 32, 32))
assert feat[1].shape == torch.Size((1, 16, 16, 16))
assert feat[2].shape == torch.Size((1, 32, 8, 8))
assert feat[3].shape == torch.Size((1, 64, 4, 4))
assert feat[4].shape == torch.Size((1, 128, 2, 2))
# Test CSPDarknet with BatchNorm forward
model = module_class(widen_factor=0.125, out_indices=range(0, 5))
for m in model.modules():
if is_norm(m):
assert isinstance(m, _BatchNorm)
model.train()
imgs = torch.randn(1, 3, 64, 64)
feat = model(imgs)
assert len(feat) == 5
assert feat[0].shape == torch.Size((1, 8, 32, 32))
assert feat[1].shape == torch.Size((1, 16, 16, 16))
assert feat[2].shape == torch.Size((1, 32, 8, 8))
assert feat[3].shape == torch.Size((1, 64, 4, 4))
assert feat[4].shape == torch.Size((1, 128, 2, 2))
# Test CSPDarknet with Dropout Block
model = module_class(plugins=[
dict(
cfg=dict(type='mmdet.DropBlock', drop_prob=0.1, block_size=3),
stages=(False, False, True, True)),
])
assert len(model.stage1) == 2
assert len(model.stage2) == 2
assert len(model.stage3) == 3 # +DropBlock
assert len(model.stage4) == 4 # +SPPF+DropBlock
model.train()
imgs = torch.randn(1, 3, 256, 256)
feat = model(imgs)
assert len(feat) == 3
assert feat[0].shape == torch.Size((1, 256, 32, 32))
assert feat[1].shape == torch.Size((1, 512, 16, 16))
assert feat[2].shape == torch.Size((1, 1024, 8, 8))
| 4,481 | 36.35 | 78 | py |
mmyolo | mmyolo-main/tests/test_models/test_data_preprocessor/test_data_preprocessor.py | # Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmdet.structures import DetDataSample
from mmengine import MessageHub
from mmyolo.models import PPYOLOEBatchRandomResize, PPYOLOEDetDataPreprocessor
from mmyolo.models.data_preprocessors import (YOLOv5DetDataPreprocessor,
YOLOXBatchSyncRandomResize)
from mmyolo.utils import register_all_modules
register_all_modules()
class TestYOLOv5DetDataPreprocessor(TestCase):
def test_forward(self):
processor = YOLOv5DetDataPreprocessor(mean=[0, 0, 0], std=[1, 1, 1])
data = {
'inputs': [torch.randint(0, 256, (3, 11, 10))],
'data_samples': [DetDataSample()]
}
out_data = processor(data, training=False)
batch_inputs, batch_data_samples = out_data['inputs'], out_data[
'data_samples']
self.assertEqual(batch_inputs.shape, (1, 3, 11, 10))
self.assertEqual(len(batch_data_samples), 1)
# test channel_conversion
processor = YOLOv5DetDataPreprocessor(
mean=[0., 0., 0.], std=[1., 1., 1.], bgr_to_rgb=True)
out_data = processor(data, training=False)
batch_inputs, batch_data_samples = out_data['inputs'], out_data[
'data_samples']
self.assertEqual(batch_inputs.shape, (1, 3, 11, 10))
self.assertEqual(len(batch_data_samples), 1)
# test padding, training=False
data = {
'inputs': [
torch.randint(0, 256, (3, 10, 11)),
torch.randint(0, 256, (3, 9, 14))
]
}
processor = YOLOv5DetDataPreprocessor(
mean=[0., 0., 0.], std=[1., 1., 1.], bgr_to_rgb=True)
out_data = processor(data, training=False)
batch_inputs, batch_data_samples = out_data['inputs'], out_data[
'data_samples']
self.assertEqual(batch_inputs.shape, (2, 3, 10, 14))
self.assertIsNone(batch_data_samples)
# test training
data = {
'inputs': torch.randint(0, 256, (2, 3, 10, 11)),
'data_samples': {
'bboxes_labels': torch.randint(0, 11, (18, 6))
},
}
out_data = processor(data, training=True)
batch_inputs, batch_data_samples = out_data['inputs'], out_data[
'data_samples']
self.assertIn('img_metas', batch_data_samples)
self.assertIn('bboxes_labels', batch_data_samples)
self.assertEqual(batch_inputs.shape, (2, 3, 10, 11))
self.assertIsInstance(batch_data_samples['bboxes_labels'],
torch.Tensor)
self.assertIsInstance(batch_data_samples['img_metas'], list)
data = {
'inputs': [torch.randint(0, 256, (3, 11, 10))],
'data_samples': [DetDataSample()]
}
# data_samples must be dict
with self.assertRaises(AssertionError):
processor(data, training=True)
class TestPPYOLOEDetDataPreprocessor(TestCase):
def test_batch_random_resize(self):
processor = PPYOLOEDetDataPreprocessor(
pad_size_divisor=32,
batch_augments=[
dict(
type='PPYOLOEBatchRandomResize',
random_size_range=(320, 480),
interval=1,
size_divisor=32,
random_interp=True,
keep_ratio=False)
],
mean=[0., 0., 0.],
std=[255., 255., 255.],
bgr_to_rgb=True)
self.assertTrue(
isinstance(processor.batch_augments[0], PPYOLOEBatchRandomResize))
message_hub = MessageHub.get_instance('test_batch_random_resize')
message_hub.update_info('iter', 0)
# test training
data = {
'inputs': [
torch.randint(0, 256, (3, 10, 11)),
torch.randint(0, 256, (3, 10, 11))
],
'data_samples': {
'bboxes_labels': torch.randint(0, 11, (18, 6)).float()
},
}
out_data = processor(data, training=True)
batch_data_samples = out_data['data_samples']
self.assertIn('img_metas', batch_data_samples)
self.assertIn('bboxes_labels', batch_data_samples)
self.assertIsInstance(batch_data_samples['bboxes_labels'],
torch.Tensor)
self.assertIsInstance(batch_data_samples['img_metas'], list)
data = {
'inputs': [torch.randint(0, 256, (3, 11, 10))],
'data_samples': DetDataSample()
}
# data_samples must be list
with self.assertRaises(AssertionError):
processor(data, training=True)
class TestYOLOXDetDataPreprocessor(TestCase):
def test_batch_sync_random_size(self):
processor = YOLOXBatchSyncRandomResize(
random_size_range=(480, 800), size_divisor=32, interval=1)
self.assertTrue(isinstance(processor, YOLOXBatchSyncRandomResize))
message_hub = MessageHub.get_instance(
'test_yolox_batch_sync_random_resize')
message_hub.update_info('iter', 0)
# test training
inputs = torch.randint(0, 256, (4, 3, 10, 11))
data_samples = {'bboxes_labels': torch.randint(0, 11, (18, 6)).float()}
inputs, data_samples = processor(inputs, data_samples)
self.assertIn('bboxes_labels', data_samples)
self.assertIsInstance(data_samples['bboxes_labels'], torch.Tensor)
self.assertIsInstance(inputs, torch.Tensor)
inputs = torch.randint(0, 256, (4, 3, 10, 11))
data_samples = DetDataSample()
# data_samples must be dict
with self.assertRaises(AssertionError):
processor(inputs, data_samples)
| 5,829 | 36.133758 | 79 | py |
mmyolo | mmyolo-main/tests/test_models/test_utils/test_misc.py | # Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
import pytest
import torch
from mmengine.structures import InstanceData
from torch import Tensor
from mmyolo.models.utils import gt_instances_preprocess
from mmyolo.utils import register_all_modules
register_all_modules()
class TestGtInstancesPreprocess:
@pytest.mark.parametrize('box_dim', [4, 5])
def test(self, box_dim):
gt_instances = InstanceData(
bboxes=torch.empty((0, box_dim)), labels=torch.LongTensor([]))
batch_size = 1
batch_instance = gt_instances_preprocess([gt_instances], batch_size)
assert isinstance(batch_instance, Tensor)
assert len(batch_instance.shape) == 3, 'the len of result must be 3.'
assert batch_instance.size(-1) == box_dim + 1
@pytest.mark.parametrize('box_dim', [4, 5])
def test_fast_version(self, box_dim: int):
gt_instances = torch.from_numpy(
np.array([[0., 1., *(0., ) * box_dim]], dtype=np.float32))
batch_size = 1
batch_instance = gt_instances_preprocess(gt_instances, batch_size)
assert isinstance(batch_instance, Tensor)
assert len(batch_instance.shape) == 3, 'the len of result must be 3.'
assert batch_instance.shape[1] == 1
assert batch_instance.shape[2] == box_dim + 1
| 1,330 | 35.972222 | 77 | py |
mmyolo | mmyolo-main/tests/test_models/test_necks/test_ppyoloe_csppan.py | # Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmyolo.models import PPYOLOECSPPAFPN
from mmyolo.utils import register_all_modules
register_all_modules()
class TestPPYOLOECSPPAFPN(TestCase):
def test_forward(self):
s = 64
in_channels = [8, 16, 32]
feat_sizes = [s // 2**i for i in range(4)] # [32, 16, 8]
out_channels = [8, 16, 32]
feats = [
torch.rand(1, in_channels[i], feat_sizes[i], feat_sizes[i])
for i in range(len(in_channels))
]
neck = PPYOLOECSPPAFPN(
in_channels=in_channels, out_channels=out_channels)
outs = neck(feats)
assert len(outs) == len(feats)
for i in range(len(feats)):
assert outs[i].shape[1] == out_channels[i]
assert outs[i].shape[2] == outs[i].shape[3] == s // (2**i)
def test_drop_block(self):
s = 64
in_channels = [8, 16, 32]
feat_sizes = [s // 2**i for i in range(4)] # [32, 16, 8]
out_channels = [8, 16, 32]
feats = [
torch.rand(1, in_channels[i], feat_sizes[i], feat_sizes[i])
for i in range(len(in_channels))
]
neck = PPYOLOECSPPAFPN(
in_channels=in_channels,
out_channels=out_channels,
drop_block_cfg=dict(
type='mmdet.DropBlock',
drop_prob=0.1,
block_size=3,
warm_iters=0))
neck.train()
outs = neck(feats)
assert len(outs) == len(feats)
for i in range(len(feats)):
assert outs[i].shape[1] == out_channels[i]
assert outs[i].shape[2] == outs[i].shape[3] == s // (2**i)
| 1,741 | 31.259259 | 71 | py |
mmyolo | mmyolo-main/tests/test_models/test_necks/test_cspnext_pafpn.py | # Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmyolo.models.necks import CSPNeXtPAFPN
from mmyolo.utils import register_all_modules
register_all_modules()
class TestCSPNeXtPAFPN(TestCase):
def test_forward(self):
s = 64
in_channels = [8, 16, 32]
feat_sizes = [s // 2**i for i in range(4)] # [32, 16, 8]
out_channels = 24
feats = [
torch.rand(1, in_channels[i], feat_sizes[i], feat_sizes[i])
for i in range(len(in_channels))
]
neck = CSPNeXtPAFPN(in_channels=in_channels, out_channels=out_channels)
outs = neck(feats)
assert len(outs) == len(feats)
for i in range(len(feats)):
assert outs[i].shape[1] == out_channels
assert outs[i].shape[2] == outs[i].shape[3] == s // (2**i)
# test depth-wise
neck = CSPNeXtPAFPN(
in_channels=in_channels,
out_channels=out_channels,
use_depthwise=True)
from mmcv.cnn.bricks import DepthwiseSeparableConvModule
self.assertTrue(neck.conv, DepthwiseSeparableConvModule)
| 1,159 | 29.526316 | 79 | py |
mmyolo | mmyolo-main/tests/test_models/test_necks/test_yolov8_pafpn.py | # Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmyolo.models import YOLOv8PAFPN
from mmyolo.utils import register_all_modules
register_all_modules()
class TestYOLOv8PAFPN(TestCase):
def test_YOLOv8PAFPN_forward(self):
s = 64
in_channels = [8, 16, 32]
feat_sizes = [s // 2**i for i in range(4)] # [32, 16, 8]
out_channels = [8, 16, 32]
feats = [
torch.rand(1, in_channels[i], feat_sizes[i], feat_sizes[i])
for i in range(len(in_channels))
]
neck = YOLOv8PAFPN(in_channels=in_channels, out_channels=out_channels)
outs = neck(feats)
assert len(outs) == len(feats)
for i in range(len(feats)):
assert outs[i].shape[1] == out_channels[i]
assert outs[i].shape[2] == outs[i].shape[3] == s // (2**i)
| 879 | 29.344828 | 78 | py |
mmyolo | mmyolo-main/tests/test_models/test_necks/test_yolox_pafpn.py | # Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmyolo.models.necks import YOLOXPAFPN
from mmyolo.utils import register_all_modules
register_all_modules()
class TestYOLOXPAFPN(TestCase):
def test_forward(self):
s = 64
in_channels = [8, 16, 32]
feat_sizes = [s // 2**i for i in range(4)] # [32, 16, 8]
out_channels = 24
feats = [
torch.rand(1, in_channels[i], feat_sizes[i], feat_sizes[i])
for i in range(len(in_channels))
]
neck = YOLOXPAFPN(in_channels=in_channels, out_channels=out_channels)
outs = neck(feats)
assert len(outs) == len(feats)
for i in range(len(feats)):
assert outs[i].shape[1] == out_channels
assert outs[i].shape[2] == outs[i].shape[3] == s // (2**i)
| 858 | 28.62069 | 77 | py |
mmyolo | mmyolo-main/tests/test_models/test_necks/test_yolov7_pafpn.py | # Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmcv.cnn import ConvModule
from mmyolo.models.necks import YOLOv7PAFPN
from mmyolo.utils import register_all_modules
register_all_modules()
class TestYOLOv7PAFPN(TestCase):
def test_forward(self):
# test P5
s = 64
in_channels = [8, 16, 32]
feat_sizes = [s // 2**i for i in range(4)] # [32, 16, 8]
out_channels = [8, 16, 32]
feats = [
torch.rand(1, in_channels[i], feat_sizes[i], feat_sizes[i])
for i in range(len(in_channels))
]
neck = YOLOv7PAFPN(in_channels=in_channels, out_channels=out_channels)
outs = neck(feats)
assert len(outs) == len(feats)
for i in range(len(feats)):
assert outs[i].shape[1] == out_channels[i] * 2
assert outs[i].shape[2] == outs[i].shape[3] == s // (2**i)
# test is_tiny_version
neck = YOLOv7PAFPN(
in_channels=in_channels,
out_channels=out_channels,
is_tiny_version=True)
outs = neck(feats)
assert len(outs) == len(feats)
for i in range(len(feats)):
assert outs[i].shape[1] == out_channels[i] * 2
assert outs[i].shape[2] == outs[i].shape[3] == s // (2**i)
# test use_in_channels_in_downsample
neck = YOLOv7PAFPN(
in_channels=in_channels,
out_channels=out_channels,
use_in_channels_in_downsample=True)
for f in feats:
print(f.shape)
outs = neck(feats)
for f in outs:
print(f.shape)
assert len(outs) == len(feats)
for i in range(len(feats)):
assert outs[i].shape[1] == out_channels[i] * 2
assert outs[i].shape[2] == outs[i].shape[3] == s // (2**i)
# test use_repconv_outs is False
neck = YOLOv7PAFPN(
in_channels=in_channels,
out_channels=out_channels,
use_repconv_outs=False)
self.assertIsInstance(neck.out_layers[0], ConvModule)
# test P6
s = 64
in_channels = [8, 16, 32, 64]
feat_sizes = [s // 2**i for i in range(4)]
out_channels = [8, 16, 32, 64]
feats = [
torch.rand(1, in_channels[i], feat_sizes[i], feat_sizes[i])
for i in range(len(in_channels))
]
neck = YOLOv7PAFPN(in_channels=in_channels, out_channels=out_channels)
outs = neck(feats)
assert len(outs) == len(feats)
for i in range(len(feats)):
assert outs[i].shape[1] == out_channels[i]
assert outs[i].shape[2] == outs[i].shape[3] == s // (2**i)
| 2,718 | 32.9875 | 78 | py |
mmyolo | mmyolo-main/tests/test_models/test_necks/test_yolov6_pafpn.py | # Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmyolo.models.necks import YOLOv6CSPRepPAFPN, YOLOv6RepPAFPN
from mmyolo.utils import register_all_modules
register_all_modules()
class TestYOLOv6PAFPN(TestCase):
def test_YOLOv6RepPAFP_forward(self):
s = 64
in_channels = [8, 16, 32]
feat_sizes = [s // 2**i for i in range(4)] # [32, 16, 8]
out_channels = [8, 16, 32]
feats = [
torch.rand(1, in_channels[i], feat_sizes[i], feat_sizes[i])
for i in range(len(in_channels))
]
neck = YOLOv6RepPAFPN(
in_channels=in_channels, out_channels=out_channels)
outs = neck(feats)
assert len(outs) == len(feats)
for i in range(len(feats)):
assert outs[i].shape[1] == out_channels[i]
assert outs[i].shape[2] == outs[i].shape[3] == s // (2**i)
def test_YOLOv6CSPRepPAFPN_forward(self):
s = 64
in_channels = [8, 16, 32]
feat_sizes = [s // 2**i for i in range(4)] # [32, 16, 8]
out_channels = [8, 16, 32]
feats = [
torch.rand(1, in_channels[i], feat_sizes[i], feat_sizes[i])
for i in range(len(in_channels))
]
neck = YOLOv6CSPRepPAFPN(
in_channels=in_channels, out_channels=out_channels)
outs = neck(feats)
assert len(outs) == len(feats)
for i in range(len(feats)):
assert outs[i].shape[1] == out_channels[i]
assert outs[i].shape[2] == outs[i].shape[3] == s // (2**i)
| 1,593 | 32.914894 | 71 | py |
mmyolo | mmyolo-main/tests/test_models/test_necks/test_yolov5_pafpn.py | # Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmyolo.models.necks import YOLOv5PAFPN
from mmyolo.utils import register_all_modules
register_all_modules()
class TestYOLOv5PAFPN(TestCase):
def test_forward(self):
s = 64
in_channels = [8, 16, 32]
feat_sizes = [s // 2**i for i in range(4)] # [32, 16, 8]
out_channels = [8, 16, 32]
feats = [
torch.rand(1, in_channels[i], feat_sizes[i], feat_sizes[i])
for i in range(len(in_channels))
]
neck = YOLOv5PAFPN(in_channels=in_channels, out_channels=out_channels)
outs = neck(feats)
assert len(outs) == len(feats)
for i in range(len(feats)):
assert outs[i].shape[1] == out_channels[i]
assert outs[i].shape[2] == outs[i].shape[3] == s // (2**i)
| 873 | 29.137931 | 78 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.