| import torch |
| import torch.nn as nn |
|
|
| from ..builder import DETECTORS |
| from .detr import DETR |
| from ..bricks import AffineDropPath |
| from ..utils.bbox_tools import proposal_cw_to_se |
| from ..utils.post_processing import batched_nms, convert_to_seconds |
|
|
|
|
| @DETECTORS.register_module() |
| class DeformableDETR(DETR): |
| def __init__( |
| self, |
| projection, |
| transformer, |
| neck=None, |
| backbone=None, |
| ): |
| super(DeformableDETR, self).__init__( |
| projection=projection, |
| transformer=transformer, |
| neck=neck, |
| backbone=backbone, |
| ) |
|
|
| def forward_train(self, inputs, masks, metas, gt_segments, gt_labels, **kwargs): |
| if self.with_backbone: |
| x = self.backbone(inputs) |
| else: |
| x = inputs |
|
|
| if self.with_projection: |
| x, masks = self.projection(x, masks) |
|
|
| if self.with_neck: |
| x, masks = self.neck(x, masks) |
|
|
| |
| if isinstance(masks, list): |
| padding_masks = [~mask for mask in masks] |
| elif isinstance(masks, torch.Tensor): |
| padding_masks = ~masks |
| else: |
| raise TypeError("masks should be either list or torch.Tensor") |
|
|
| losses = dict() |
| transformer_loss = self.transformer.forward_train( |
| x, |
| padding_masks, |
| gt_segments=gt_segments, |
| gt_labels=gt_labels, |
| **kwargs, |
| ) |
| losses.update(transformer_loss) |
|
|
| |
| losses["cost"] = sum(_value for _key, _value in losses.items()) |
| return losses |
|
|
| def forward_test(self, inputs, masks, metas=None, infer_cfg=None, **kwargs): |
| if self.with_backbone: |
| x = self.backbone(inputs) |
| else: |
| x = inputs |
|
|
| if self.with_projection: |
| x, masks = self.projection(x, masks) |
|
|
| if self.with_neck: |
| x, masks = self.neck(x, masks) |
|
|
| |
| if isinstance(masks, list): |
| padding_masks = [~mask for mask in masks] |
| elif isinstance(masks, torch.Tensor): |
| padding_masks = ~masks |
| else: |
| raise TypeError("masks should be either list or torch.Tensor") |
|
|
| output = self.transformer.forward_test(x, padding_masks, **kwargs) |
|
|
| predictions = output, masks[0] |
| return predictions |
|
|
| @torch.no_grad() |
| def post_processing(self, predictions, metas, post_cfg, ext_cls, **kwargs): |
| output, masks = predictions |
| pred_logits = output["pred_logits"] |
| pred_boxes = output["pred_boxes"] |
|
|
| pre_nms_topk = getattr(post_cfg, "pre_nms_topk", 200) |
| bs, _, num_classes = pred_logits.shape |
|
|
| |
| prob = pred_logits.sigmoid() |
| topk_values, topk_indexes = torch.topk(prob.view(bs, -1), pre_nms_topk, dim=1) |
| batch_scores = topk_values |
| topk_boxes = torch.div(topk_indexes, num_classes, rounding_mode="floor") |
| batch_labels = torch.fmod(topk_indexes, num_classes) |
|
|
| batch_proposals = proposal_cw_to_se(pred_boxes) * torch.sum(masks, dim=1)[:, None, None] |
| batch_proposals = torch.gather(batch_proposals, 1, topk_boxes.unsqueeze(-1).repeat(1, 1, 2)) |
|
|
| results = {} |
| for i in range(len(metas)): |
| segments = batch_proposals[i].detach().cpu() |
| scores = batch_scores[i].detach().cpu() |
| labels = batch_labels[i].detach().cpu() |
|
|
| |
| if post_cfg.sliding_window == False and post_cfg.nms is not None: |
| segments, scores, labels = batched_nms(segments, scores, labels, **post_cfg.nms) |
|
|
| video_id = metas[i]["video_name"] |
|
|
| |
| segments = convert_to_seconds(segments, metas[i]) |
|
|
| |
| if isinstance(ext_cls, list): |
| labels = [ext_cls[label.item()] for label in labels] |
| else: |
| segments, labels, scores = ext_cls(video_id, segments, scores) |
|
|
| results_per_video = [] |
| for segment, label, score in zip(segments, labels, scores): |
| |
| results_per_video.append( |
| dict( |
| segment=[round(seg.item(), 2) for seg in segment], |
| label=label, |
| score=round(score.item(), 4), |
| ) |
| ) |
|
|
| if video_id in results.keys(): |
| results[video_id].extend(results_per_video) |
| else: |
| results[video_id] = results_per_video |
|
|
| return results |
|
|
| def get_optim_groups(self, cfg): |
| |
| |
| decay = set() |
| no_decay = set() |
| whitelist_weight_modules = (nn.Linear, nn.Conv1d) |
| blacklist_weight_modules = (nn.LayerNorm, nn.GroupNorm) |
|
|
| |
| for mn, m in self.named_modules(): |
| for pn, p in m.named_parameters(): |
| fpn = "%s.%s" % (mn, pn) if mn else pn |
|
|
| |
| if fpn.startswith("backbone"): |
| continue |
|
|
| if pn.endswith("bias"): |
| |
| no_decay.add(fpn) |
| elif pn.endswith("weight") and isinstance(m, whitelist_weight_modules): |
| |
| decay.add(fpn) |
| elif pn.endswith("weight") and isinstance(m, blacklist_weight_modules): |
| |
| no_decay.add(fpn) |
| elif pn.endswith("scale") and isinstance(m, AffineDropPath): |
| |
| no_decay.add(fpn) |
| elif pn.endswith("in_proj_weight") and isinstance(m, nn.MultiheadAttention): |
| decay.add(fpn) |
| elif pn.endswith("level_embeds"): |
| |
| no_decay.add(fpn) |
| elif pn.endswith("weight") and ("tgt_embed" in pn): |
| no_decay.add(fpn) |
|
|
| |
| param_dict = {pn: p for pn, p in self.named_parameters() if not pn.startswith("backbone")} |
| inter_params = decay & no_decay |
| union_params = decay | no_decay |
| assert len(inter_params) == 0, "parameters %s made it into both decay/no_decay sets!" % (str(inter_params),) |
| assert ( |
| len(param_dict.keys() - union_params) == 0 |
| ), "parameters %s were not separated into either decay/no_decay set!" % (str(param_dict.keys() - union_params),) |
|
|
| |
| optim_groups = [ |
| {"params": [param_dict[pn] for pn in sorted(list(decay))], "weight_decay": cfg["weight_decay"]}, |
| {"params": [param_dict[pn] for pn in sorted(list(no_decay))], "weight_decay": 0.0}, |
| ] |
| return optim_groups |
|
|