id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
155,775 | import copy
import logging
import numpy as np
import torch
from detectron2.data import detection_utils as utils
from detectron2.data import transforms as T
from pycocotools import mask as coco_mask
def convert_coco_poly_to_mask(segmentations, height, width):
masks = []
for polygons in segmentations:
rles = coco_mask.frPyObjects(polygons, height, width)
mask = coco_mask.decode(rles)
if len(mask.shape) < 3:
mask = mask[..., None]
mask = torch.as_tensor(mask, dtype=torch.uint8)
mask = mask.any(dim=2)
masks.append(mask)
if masks:
masks = torch.stack(masks, dim=0)
else:
masks = torch.zeros((0, height, width), dtype=torch.uint8)
return masks | null |
155,776 | import copy
import logging
import numpy as np
import torch
from detectron2.data import detection_utils as utils
from detectron2.data import transforms as T
from pycocotools import mask as coco_mask
The provided code snippet includes necessary dependencies for implementing the `build_transform_gen` function. Write a Python function `def build_transform_gen( image_size, min_scale, max_scale, random_flip: str = "horizontal", is_train: bool = True, )` to solve the following problem:
Create a list of default :class:`Augmentation`. Now it includes resizing and flipping. Returns: list[Augmentation]
Here is the function:
def build_transform_gen(
image_size,
min_scale,
max_scale,
random_flip: str = "horizontal",
is_train: bool = True,
):
"""
Create a list of default :class:`Augmentation`.
Now it includes resizing and flipping.
Returns:
list[Augmentation]
"""
assert is_train, "Only support training augmentation."
assert random_flip in ["none", "horizontal",
"vertical"], f"Only support none/horizontal/vertical flip, but got {random_flip}"
augmentation = []
if random_flip != "none":
augmentation.append(
T.RandomFlip(
horizontal=random_flip == "horizontal",
vertical=random_flip == "vertical",
)
)
augmentation.extend([
T.ResizeScale(
min_scale=min_scale, max_scale=max_scale, target_height=image_size, target_width=image_size,
),
T.FixedSizeCrop(crop_size=(image_size, image_size))
])
return augmentation | Create a list of default :class:`Augmentation`. Now it includes resizing and flipping. Returns: list[Augmentation] |
155,777 | import logging
import torch
import torch.nn.functional as F
from torch import nn
from detectron2.utils.comm import get_world_size
from detectron2.projects.point_rend.point_features import (
get_uncertain_point_coords_with_randomness,
point_sample,
)
from ..utils.misc import is_dist_avail_and_initialized, nested_tensor_from_tensor_list
from projects.maskdino.utils import box_ops
The provided code snippet includes necessary dependencies for implementing the `sigmoid_focal_loss` function. Write a Python function `def sigmoid_focal_loss(inputs, targets, num_boxes, alpha: float = 0.25, gamma: float = 2)` to solve the following problem:
Loss used in RetinaNet for dense detection: https://arxiv.org/abs/1708.02002. Args: inputs: A float tensor of arbitrary shape. The predictions for each example. targets: A float tensor with the same shape as inputs. Stores the binary classification label for each element in inputs (0 for the negative class and 1 for the positive class). alpha: (optional) Weighting factor in range (0,1) to balance positive vs negative examples. Default = -1 (no weighting). gamma: Exponent of the modulating factor (1 - p_t) to balance easy vs hard examples. Returns: Loss tensor
Here is the function:
def sigmoid_focal_loss(inputs, targets, num_boxes, alpha: float = 0.25, gamma: float = 2):
"""
Loss used in RetinaNet for dense detection: https://arxiv.org/abs/1708.02002.
Args:
inputs: A float tensor of arbitrary shape.
The predictions for each example.
targets: A float tensor with the same shape as inputs. Stores the binary
classification label for each element in inputs
(0 for the negative class and 1 for the positive class).
alpha: (optional) Weighting factor in range (0,1) to balance
positive vs negative examples. Default = -1 (no weighting).
gamma: Exponent of the modulating factor (1 - p_t) to
balance easy vs hard examples.
Returns:
Loss tensor
"""
prob = inputs.sigmoid()
ce_loss = F.binary_cross_entropy_with_logits(inputs, targets, reduction="none")
p_t = prob * targets + (1 - prob) * (1 - targets)
loss = ce_loss * ((1 - p_t) ** gamma)
if alpha >= 0:
alpha_t = alpha * targets + (1 - alpha) * (1 - targets)
loss = alpha_t * loss
return loss.mean(1).sum() / num_boxes | Loss used in RetinaNet for dense detection: https://arxiv.org/abs/1708.02002. Args: inputs: A float tensor of arbitrary shape. The predictions for each example. targets: A float tensor with the same shape as inputs. Stores the binary classification label for each element in inputs (0 for the negative class and 1 for the positive class). alpha: (optional) Weighting factor in range (0,1) to balance positive vs negative examples. Default = -1 (no weighting). gamma: Exponent of the modulating factor (1 - p_t) to balance easy vs hard examples. Returns: Loss tensor |
155,778 | import logging
import torch
import torch.nn.functional as F
from torch import nn
from detectron2.utils.comm import get_world_size
from detectron2.projects.point_rend.point_features import (
get_uncertain_point_coords_with_randomness,
point_sample,
)
from ..utils.misc import is_dist_avail_and_initialized, nested_tensor_from_tensor_list
from projects.maskdino.utils import box_ops
The provided code snippet includes necessary dependencies for implementing the `dice_loss` function. Write a Python function `def dice_loss( inputs: torch.Tensor, targets: torch.Tensor, num_masks: float, )` to solve the following problem:
Compute the DICE loss, similar to generalized IOU for masks Args: inputs: A float tensor of arbitrary shape. The predictions for each example. targets: A float tensor with the same shape as inputs. Stores the binary classification label for each element in inputs (0 for the negative class and 1 for the positive class).
Here is the function:
def dice_loss(
inputs: torch.Tensor,
targets: torch.Tensor,
num_masks: float,
):
"""
Compute the DICE loss, similar to generalized IOU for masks
Args:
inputs: A float tensor of arbitrary shape.
The predictions for each example.
targets: A float tensor with the same shape as inputs. Stores the binary
classification label for each element in inputs
(0 for the negative class and 1 for the positive class).
"""
inputs = inputs.sigmoid()
inputs = inputs.flatten(1)
numerator = 2 * (inputs * targets).sum(-1)
denominator = inputs.sum(-1) + targets.sum(-1)
loss = 1 - (numerator + 1) / (denominator + 1)
return loss.sum() / num_masks | Compute the DICE loss, similar to generalized IOU for masks Args: inputs: A float tensor of arbitrary shape. The predictions for each example. targets: A float tensor with the same shape as inputs. Stores the binary classification label for each element in inputs (0 for the negative class and 1 for the positive class). |
155,779 | import logging
import torch
import torch.nn.functional as F
from torch import nn
from detectron2.utils.comm import get_world_size
from detectron2.projects.point_rend.point_features import (
get_uncertain_point_coords_with_randomness,
point_sample,
)
from ..utils.misc import is_dist_avail_and_initialized, nested_tensor_from_tensor_list
from projects.maskdino.utils import box_ops
The provided code snippet includes necessary dependencies for implementing the `sigmoid_ce_loss` function. Write a Python function `def sigmoid_ce_loss( inputs: torch.Tensor, targets: torch.Tensor, num_masks: float, )` to solve the following problem:
Args: inputs: A float tensor of arbitrary shape. The predictions for each example. targets: A float tensor with the same shape as inputs. Stores the binary classification label for each element in inputs (0 for the negative class and 1 for the positive class). Returns: Loss tensor
Here is the function:
def sigmoid_ce_loss(
inputs: torch.Tensor,
targets: torch.Tensor,
num_masks: float,
):
"""
Args:
inputs: A float tensor of arbitrary shape.
The predictions for each example.
targets: A float tensor with the same shape as inputs. Stores the binary
classification label for each element in inputs
(0 for the negative class and 1 for the positive class).
Returns:
Loss tensor
"""
loss = F.binary_cross_entropy_with_logits(inputs, targets, reduction="none")
return loss.mean(1).sum() / num_masks | Args: inputs: A float tensor of arbitrary shape. The predictions for each example. targets: A float tensor with the same shape as inputs. Stores the binary classification label for each element in inputs (0 for the negative class and 1 for the positive class). Returns: Loss tensor |
155,780 | import logging
import torch
import torch.nn.functional as F
from torch import nn
from detectron2.utils.comm import get_world_size
from detectron2.projects.point_rend.point_features import (
get_uncertain_point_coords_with_randomness,
point_sample,
)
from ..utils.misc import is_dist_avail_and_initialized, nested_tensor_from_tensor_list
from projects.maskdino.utils import box_ops
The provided code snippet includes necessary dependencies for implementing the `calculate_uncertainty` function. Write a Python function `def calculate_uncertainty(logits)` to solve the following problem:
We estimate uncerainty as L1 distance between 0.0 and the logit prediction in 'logits' for the foreground class in `classes`. Args: logits (Tensor): A tensor of shape (R, 1, ...) for class-specific or class-agnostic, where R is the total number of predicted masks in all images and C is the number of foreground classes. The values are logits. Returns: scores (Tensor): A tensor of shape (R, 1, ...) that contains uncertainty scores with the most uncertain locations having the highest uncertainty score.
Here is the function:
def calculate_uncertainty(logits):
"""
We estimate uncerainty as L1 distance between 0.0 and the logit prediction in 'logits' for the
foreground class in `classes`.
Args:
logits (Tensor): A tensor of shape (R, 1, ...) for class-specific or
class-agnostic, where R is the total number of predicted masks in all images and C is
the number of foreground classes. The values are logits.
Returns:
scores (Tensor): A tensor of shape (R, 1, ...) that contains uncertainty scores with
the most uncertain locations having the highest uncertainty score.
"""
assert logits.shape[1] == 1
gt_class_logits = logits.clone()
return -(torch.abs(gt_class_logits)) | We estimate uncerainty as L1 distance between 0.0 and the logit prediction in 'logits' for the foreground class in `classes`. Args: logits (Tensor): A tensor of shape (R, 1, ...) for class-specific or class-agnostic, where R is the total number of predicted masks in all images and C is the number of foreground classes. The values are logits. Returns: scores (Tensor): A tensor of shape (R, 1, ...) that contains uncertainty scores with the most uncertain locations having the highest uncertainty score. |
155,781 | import logging
import fvcore.nn.weight_init as weight_init
import torch
from torch import nn
from torch.nn import functional as F
from detectron2.config import configurable
from detectron2.layers import Conv2d
from detectron2.utils.registry import Registry
from detectron2.structures import BitMasks
from .dino_decoder import TransformerDecoder, DeformableTransformerDecoderLayer
from ...utils.utils import MLP, gen_encoder_output_proposals, inverse_sigmoid
from ...utils import box_ops
TRANSFORMER_DECODER_REGISTRY = Registry("TRANSFORMER_MODULE")
TRANSFORMER_DECODER_REGISTRY.__doc__ = """
Registry for transformer module in MaskDINO.
"""
The provided code snippet includes necessary dependencies for implementing the `build_transformer_decoder` function. Write a Python function `def build_transformer_decoder(cfg, in_channels, mask_classification=True)` to solve the following problem:
Build a instance embedding branch from `cfg.MODEL.INS_EMBED_HEAD.NAME`.
Here is the function:
def build_transformer_decoder(cfg, in_channels, mask_classification=True):
"""
Build a instance embedding branch from `cfg.MODEL.INS_EMBED_HEAD.NAME`.
"""
name = cfg.MODEL.MaskDINO.TRANSFORMER_DECODER_NAME
return TRANSFORMER_DECODER_REGISTRY.get(name)(cfg, in_channels, mask_classification) | Build a instance embedding branch from `cfg.MODEL.INS_EMBED_HEAD.NAME`. |
155,782 | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.checkpoint as checkpoint
from timm.models.layers import DropPath, to_2tuple, trunc_normal_
from detectron2.modeling import BACKBONE_REGISTRY, Backbone, ShapeSpec
The provided code snippet includes necessary dependencies for implementing the `window_partition` function. Write a Python function `def window_partition(x, window_size)` to solve the following problem:
Args: x: (B, H, W, C) window_size (int): window size Returns: windows: (num_windows*B, window_size, window_size, C)
Here is the function:
def window_partition(x, window_size):
"""
Args:
x: (B, H, W, C)
window_size (int): window size
Returns:
windows: (num_windows*B, window_size, window_size, C)
"""
B, H, W, C = x.shape
x = x.view(B, H // window_size, window_size, W // window_size, window_size, C)
windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C)
return windows | Args: x: (B, H, W, C) window_size (int): window size Returns: windows: (num_windows*B, window_size, window_size, C) |
155,783 | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.checkpoint as checkpoint
from timm.models.layers import DropPath, to_2tuple, trunc_normal_
from detectron2.modeling import BACKBONE_REGISTRY, Backbone, ShapeSpec
The provided code snippet includes necessary dependencies for implementing the `window_reverse` function. Write a Python function `def window_reverse(windows, window_size, H, W)` to solve the following problem:
Args: windows: (num_windows*B, window_size, window_size, C) window_size (int): Window size H (int): Height of image W (int): Width of image Returns: x: (B, H, W, C)
Here is the function:
def window_reverse(windows, window_size, H, W):
"""
Args:
windows: (num_windows*B, window_size, window_size, C)
window_size (int): Window size
H (int): Height of image
W (int): Width of image
Returns:
x: (B, H, W, C)
"""
B = int(windows.shape[0] / (H * W / window_size / window_size))
x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1)
x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1)
return x | Args: windows: (num_windows*B, window_size, window_size, C) window_size (int): Window size H (int): Height of image W (int): Width of image Returns: x: (B, H, W, C) |
155,784 | import math
import time
import numpy as np
import json
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.checkpoint as checkpoint
from timm.models.layers import DropPath, to_2tuple, trunc_normal_
from detectron2.modeling import BACKBONE_REGISTRY, Backbone, ShapeSpec
class FocalNet(nn.Module):
""" FocalNet backbone.
Args:
pretrain_img_size (int): Input image size for training the pretrained model,
used in absolute postion embedding. Default 224.
patch_size (int | tuple(int)): Patch size. Default: 4.
in_chans (int): Number of input image channels. Default: 3.
embed_dim (int): Number of linear projection output channels. Default: 96.
depths (tuple[int]): Depths of each Swin Transformer stage.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4.
drop_rate (float): Dropout rate.
drop_path_rate (float): Stochastic depth rate. Default: 0.2.
norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm.
patch_norm (bool): If True, add normalization after patch embedding. Default: True.
out_indices (Sequence[int]): Output from which stages.
frozen_stages (int): Stages to be frozen (stop grad and set eval mode).
-1 means not freezing any parameters.
focal_levels (Sequence[int]): Number of focal levels at four stages
focal_windows (Sequence[int]): Focal window sizes at first focal level at four stages
use_conv_embed (bool): Whether use overlapped convolution for patch embedding
use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False.
"""
def __init__(self,
pretrain_img_size=1600,
patch_size=4,
in_chans=3,
embed_dim=96,
depths=[2, 2, 6, 2],
mlp_ratio=4.,
drop_rate=0.,
drop_path_rate=0.2,
norm_layer=nn.LayerNorm,
patch_norm=True,
out_indices=(0, 1, 2, 3),
frozen_stages=-1,
focal_levels=[2,2,2,2],
focal_windows=[9,9,9,9],
use_conv_embed=False,
use_postln=False,
use_postln_in_modulation=False,
use_layerscale=False,
normalize_modulator=False,
use_checkpoint=False,
):
super().__init__()
self.pretrain_img_size = pretrain_img_size
self.num_layers = len(depths)
self.embed_dim = embed_dim
self.patch_norm = patch_norm
self.out_indices = out_indices
self.frozen_stages = frozen_stages
# split image into non-overlapping patches
self.patch_embed = PatchEmbed(
patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim,
norm_layer=norm_layer if self.patch_norm else None,
use_conv_embed=use_conv_embed, is_stem=True)
self.pos_drop = nn.Dropout(p=drop_rate)
# stochastic depth
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule
# build layers
self.layers = nn.ModuleList()
for i_layer in range(self.num_layers):
layer = BasicLayer(
dim=int(embed_dim * 2 ** i_layer),
depth=depths[i_layer],
mlp_ratio=mlp_ratio,
drop=drop_rate,
drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])],
norm_layer=norm_layer,
downsample=PatchEmbed if (i_layer < self.num_layers - 1) else None,
focal_window=focal_windows[i_layer],
focal_level=focal_levels[i_layer],
use_conv_embed=use_conv_embed,
use_postln=use_postln,
use_postln_in_modulation=use_postln_in_modulation,
normalize_modulator=normalize_modulator,
use_layerscale=use_layerscale,
use_checkpoint=use_checkpoint)
self.layers.append(layer)
num_features = [int(embed_dim * 2 ** i) for i in range(self.num_layers)]
self.num_features = num_features
# add a norm layer for each output
for i_layer in out_indices:
layer = norm_layer(num_features[i_layer])
layer_name = f'norm{i_layer}'
self.add_module(layer_name, layer)
self._freeze_stages()
def _freeze_stages(self):
if self.frozen_stages >= 0:
self.patch_embed.eval()
for param in self.patch_embed.parameters():
param.requires_grad = False
if self.frozen_stages >= 2:
self.pos_drop.eval()
for i in range(0, self.frozen_stages - 1):
m = self.layers[i]
m.eval()
for param in m.parameters():
param.requires_grad = False
def init_weights(self, pretrained=None):
"""Initialize the weights in backbone.
Args:
pretrained (str, optional): Path to pre-trained weights.
Defaults to None.
"""
def _init_weights(m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
# if isinstance(pretrained, str):
# self.apply(_init_weights)
# logger = get_root_logger()
# load_checkpoint(self, pretrained, strict=False, logger=logger)
# elif pretrained is None:
# self.apply(_init_weights)
# else:
# raise TypeError('pretrained must be a str or None')
def forward(self, x):
"""Forward function."""
# x = tensor_list.tensors
tic = time.time()
x = self.patch_embed(x)
Wh, Ww = x.size(2), x.size(3)
x = x.flatten(2).transpose(1, 2)
x = self.pos_drop(x)
# outs = []
outs = {}
for i in range(self.num_layers):
layer = self.layers[i]
x_out, H, W, x, Wh, Ww = layer(x, Wh, Ww)
if i in self.out_indices:
norm_layer = getattr(self, f'norm{i}')
x_out = norm_layer(x_out)
out = x_out.view(-1, H, W, self.num_features[i]).permute(0, 3, 1, 2).contiguous()
# outs.append(out)
outs["res{}".format(i + 2)] = out
toc = time.time()
# # collect for nesttensors
# outs_dict = {}
# for idx, out_i in enumerate(outs):
# m = tensor_list.mask
# assert m is not None
# mask = F.interpolate(m[None].float(), size=out_i.shape[-2:]).to(torch.bool)[0]
# outs_dict[idx] = NestedTensor(out_i, mask)
return outs
def train(self, mode=True):
"""Convert the model into training mode while keep layers freezed."""
super(FocalNet, self).train(mode)
self._freeze_stages()
def build_focalnet(modelname, **kw):
assert modelname in ['focalnet_L_384_22k', 'focalnet_L_384_22k_fl4', 'focalnet_XL_384_22k']
if 'focal_levels' in kw:
kw['focal_levels'] = [kw['focal_levels']] * 4
if 'focal_windows' in kw:
kw['focal_windows'] = [kw['focal_windows']] * 4
model_para_dict = {
'focalnet_L_384_22k': dict(
embed_dim=192,
depths=[ 2, 2, 18, 2 ],
focal_levels=kw.get('focal_levels', [3, 3, 3, 3]),
focal_windows=kw.get('focal_windows', [5, 5, 5, 5]),
use_conv_embed=True,
use_postln=True,
use_postln_in_modulation=False,
use_layerscale=True,
normalize_modulator=False,
),
'focalnet_L_384_22k_fl4': dict(
embed_dim=192,
depths=[ 2, 2, 18, 2 ],
focal_levels=kw.get('focal_levels', [4, 4, 4, 4]),
focal_windows=kw.get('focal_windows', [3, 3, 3, 3]),
use_conv_embed=True,
use_postln=True,
use_postln_in_modulation=False,
use_layerscale=True,
normalize_modulator=True,
),
'focalnet_XL_384_22k': dict(
embed_dim=256,
depths=[ 2, 2, 18, 2 ],
focal_levels=kw.get('focal_levels', [3, 3, 3, 3]),
focal_windows=kw.get('focal_windows', [5, 5, 5, 5]),
use_conv_embed=True,
use_postln=True,
use_postln_in_modulation=False,
use_layerscale=True,
normalize_modulator=False,
),
'focalnet_huge_224_22k': dict(
embed_dim=352,
depths=[ 2, 2, 18, 2 ],
focal_levels=kw.get('focal_levels', [3, 3, 3, 3]),
focal_windows=kw.get('focal_windows', [5, 5, 5, 5]),
use_conv_embed=True,
use_postln=True,
use_postln_in_modulation=False,
use_layerscale=True,
normalize_modulator=False,
),
}
kw_cgf = model_para_dict[modelname]
kw_cgf.update(kw)
model = FocalNet(**kw_cgf)
return model | null |
155,785 | import torch
import torch.nn.functional as F
from scipy.optimize import linear_sum_assignment
from torch import nn
from torch.cuda.amp import autocast
from detectron2.projects.point_rend.point_features import point_sample
from projects.maskdino.utils.box_ops import generalized_box_iou,box_cxcywh_to_xyxy
import random
The provided code snippet includes necessary dependencies for implementing the `batch_dice_loss` function. Write a Python function `def batch_dice_loss(inputs: torch.Tensor, targets: torch.Tensor)` to solve the following problem:
Compute the DICE loss, similar to generalized IOU for masks Args: inputs: A float tensor of arbitrary shape. The predictions for each example. targets: A float tensor with the same shape as inputs. Stores the binary classification label for each element in inputs (0 for the negative class and 1 for the positive class).
Here is the function:
def batch_dice_loss(inputs: torch.Tensor, targets: torch.Tensor):
"""
Compute the DICE loss, similar to generalized IOU for masks
Args:
inputs: A float tensor of arbitrary shape.
The predictions for each example.
targets: A float tensor with the same shape as inputs. Stores the binary
classification label for each element in inputs
(0 for the negative class and 1 for the positive class).
"""
inputs = inputs.sigmoid()
inputs = inputs.flatten(1)
numerator = 2 * torch.einsum("nc,mc->nm", inputs, targets)
denominator = inputs.sum(-1)[:, None] + targets.sum(-1)[None, :]
loss = 1 - (numerator + 1) / (denominator + 1)
return loss | Compute the DICE loss, similar to generalized IOU for masks Args: inputs: A float tensor of arbitrary shape. The predictions for each example. targets: A float tensor with the same shape as inputs. Stores the binary classification label for each element in inputs (0 for the negative class and 1 for the positive class). |
155,786 | import torch
import torch.nn.functional as F
from scipy.optimize import linear_sum_assignment
from torch import nn
from torch.cuda.amp import autocast
from detectron2.projects.point_rend.point_features import point_sample
from projects.maskdino.utils.box_ops import generalized_box_iou,box_cxcywh_to_xyxy
import random
The provided code snippet includes necessary dependencies for implementing the `batch_sigmoid_ce_loss` function. Write a Python function `def batch_sigmoid_ce_loss(inputs: torch.Tensor, targets: torch.Tensor)` to solve the following problem:
Args: inputs: A float tensor of arbitrary shape. The predictions for each example. targets: A float tensor with the same shape as inputs. Stores the binary classification label for each element in inputs (0 for the negative class and 1 for the positive class). Returns: Loss tensor
Here is the function:
def batch_sigmoid_ce_loss(inputs: torch.Tensor, targets: torch.Tensor):
"""
Args:
inputs: A float tensor of arbitrary shape.
The predictions for each example.
targets: A float tensor with the same shape as inputs. Stores the binary
classification label for each element in inputs
(0 for the negative class and 1 for the positive class).
Returns:
Loss tensor
"""
hw = inputs.shape[1]
pos = F.binary_cross_entropy_with_logits(
inputs, torch.ones_like(inputs), reduction="none"
)
neg = F.binary_cross_entropy_with_logits(
inputs, torch.zeros_like(inputs), reduction="none"
)
loss = torch.einsum("nc,mc->nm", pos, targets) + torch.einsum(
"nc,mc->nm", neg, (1 - targets)
)
return loss / hw | Args: inputs: A float tensor of arbitrary shape. The predictions for each example. targets: A float tensor with the same shape as inputs. Stores the binary classification label for each element in inputs (0 for the negative class and 1 for the positive class). Returns: Loss tensor |
155,787 | import logging
import numpy as np
from typing import Callable, Dict, List, Optional, Tuple, Union
import fvcore.nn.weight_init as weight_init
import torch
from torch import nn
from torch.nn import functional as F
from torch.nn.init import xavier_uniform_, constant_, uniform_, normal_
from torch.cuda.amp import autocast
from detectron2.config import configurable
from detectron2.layers import Conv2d, ShapeSpec, get_norm
from detectron2.modeling import SEM_SEG_HEADS_REGISTRY
from .position_encoding import PositionEmbeddingSine
from ...utils.utils import _get_clones, _get_activation_fn
from detrex.layers import MultiScaleDeformableAttention
The provided code snippet includes necessary dependencies for implementing the `build_pixel_decoder` function. Write a Python function `def build_pixel_decoder(cfg, input_shape)` to solve the following problem:
Build a pixel decoder from `cfg.MODEL.MaskDINO.PIXEL_DECODER_NAME`.
Here is the function:
def build_pixel_decoder(cfg, input_shape):
"""
Build a pixel decoder from `cfg.MODEL.MaskDINO.PIXEL_DECODER_NAME`.
"""
name = cfg.MODEL.SEM_SEG_HEAD.PIXEL_DECODER_NAME
model = SEM_SEG_HEADS_REGISTRY.get(name)(cfg, input_shape)
forward_features = getattr(model, "forward_features", None)
if not callable(forward_features):
raise ValueError(
"Only SEM_SEG_HEADS with forward_features method can be used as pixel decoder. "
f"Please implement forward_features for {name} to only return mask features."
)
return model | Build a pixel decoder from `cfg.MODEL.MaskDINO.PIXEL_DECODER_NAME`. |
155,788 | import torch
import copy
from torch import nn, Tensor
import os
import math
import torch.nn.functional as F
from torch import nn
def inverse_sigmoid(x, eps=1e-5):
x = x.clamp(min=0, max=1)
x1 = x.clamp(min=eps)
x2 = (1 - x).clamp(min=eps)
return torch.log(x1/x2) | null |
155,789 | import torch
import copy
from torch import nn, Tensor
import os
import math
import torch.nn.functional as F
from torch import nn
The provided code snippet includes necessary dependencies for implementing the `gen_encoder_output_proposals` function. Write a Python function `def gen_encoder_output_proposals(memory:Tensor, memory_padding_mask:Tensor, spatial_shapes:Tensor)` to solve the following problem:
Input: - memory: bs, \sum{hw}, d_model - memory_padding_mask: bs, \sum{hw} - spatial_shapes: nlevel, 2 Output: - output_memory: bs, \sum{hw}, d_model - output_proposals: bs, \sum{hw}, 4
Here is the function:
def gen_encoder_output_proposals(memory:Tensor, memory_padding_mask:Tensor, spatial_shapes:Tensor):
"""
Input:
- memory: bs, \sum{hw}, d_model
- memory_padding_mask: bs, \sum{hw}
- spatial_shapes: nlevel, 2
Output:
- output_memory: bs, \sum{hw}, d_model
- output_proposals: bs, \sum{hw}, 4
"""
N_, S_, C_ = memory.shape
base_scale = 4.0
proposals = []
_cur = 0
for lvl, (H_, W_) in enumerate(spatial_shapes):
mask_flatten_ = memory_padding_mask[:, _cur:(_cur + H_ * W_)].view(N_, H_, W_, 1)
valid_H = torch.sum(~mask_flatten_[:, :, 0, 0], 1)
valid_W = torch.sum(~mask_flatten_[:, 0, :, 0], 1)
grid_y, grid_x = torch.meshgrid(torch.linspace(0, H_ - 1, H_, dtype=torch.float32, device=memory.device),
torch.linspace(0, W_ - 1, W_, dtype=torch.float32, device=memory.device))
grid = torch.cat([grid_x.unsqueeze(-1), grid_y.unsqueeze(-1)], -1)
scale = torch.cat([valid_W.unsqueeze(-1), valid_H.unsqueeze(-1)], 1).view(N_, 1, 1, 2)
grid = (grid.unsqueeze(0).expand(N_, -1, -1, -1) + 0.5) / scale
wh = torch.ones_like(grid) * 0.05 * (2.0 ** lvl)
proposal = torch.cat((grid, wh), -1).view(N_, -1, 4)
proposals.append(proposal)
_cur += (H_ * W_)
output_proposals = torch.cat(proposals, 1)
output_proposals_valid = ((output_proposals > 0.01) & (output_proposals < 0.99)).all(-1, keepdim=True)
output_proposals = torch.log(output_proposals / (1 - output_proposals))
output_proposals = output_proposals.masked_fill(memory_padding_mask.unsqueeze(-1), float('inf'))
output_proposals = output_proposals.masked_fill(~output_proposals_valid, float('inf'))
output_memory = memory
output_memory = output_memory.masked_fill(memory_padding_mask.unsqueeze(-1), float(0))
output_memory = output_memory.masked_fill(~output_proposals_valid, float(0))
return output_memory, output_proposals | Input: - memory: bs, \sum{hw}, d_model - memory_padding_mask: bs, \sum{hw} - spatial_shapes: nlevel, 2 Output: - output_memory: bs, \sum{hw}, d_model - output_proposals: bs, \sum{hw}, 4 |
155,790 | import torch
import copy
from torch import nn, Tensor
import os
import math
import torch.nn.functional as F
from torch import nn
def gen_sineembed_for_position(pos_tensor):
# n_query, bs, _ = pos_tensor.size()
# sineembed_tensor = torch.zeros(n_query, bs, 256)
scale = 2 * math.pi
dim_t = torch.arange(128, dtype=torch.float32, device=pos_tensor.device)
dim_t = 10000 ** (2 * torch.div(dim_t, 2,rounding_mode='trunc') / 128)
x_embed = pos_tensor[:, :, 0] * scale
y_embed = pos_tensor[:, :, 1] * scale
pos_x = x_embed[:, :, None] / dim_t
pos_y = y_embed[:, :, None] / dim_t
pos_x = torch.stack((pos_x[:, :, 0::2].sin(), pos_x[:, :, 1::2].cos()), dim=3).flatten(2)
pos_y = torch.stack((pos_y[:, :, 0::2].sin(), pos_y[:, :, 1::2].cos()), dim=3).flatten(2)
if pos_tensor.size(-1) == 2:
pos = torch.cat((pos_y, pos_x), dim=2)
elif pos_tensor.size(-1) == 4:
w_embed = pos_tensor[:, :, 2] * scale
pos_w = w_embed[:, :, None] / dim_t
pos_w = torch.stack((pos_w[:, :, 0::2].sin(), pos_w[:, :, 1::2].cos()), dim=3).flatten(2)
h_embed = pos_tensor[:, :, 3] * scale
pos_h = h_embed[:, :, None] / dim_t
pos_h = torch.stack((pos_h[:, :, 0::2].sin(), pos_h[:, :, 1::2].cos()), dim=3).flatten(2)
pos = torch.cat((pos_y, pos_x, pos_w, pos_h), dim=2)
else:
raise ValueError("Unknown pos_tensor shape(-1):{}".format(pos_tensor.size(-1)))
return pos | null |
155,791 | import torch
import copy
from torch import nn, Tensor
import os
import math
import torch.nn.functional as F
from torch import nn
The provided code snippet includes necessary dependencies for implementing the `_get_activation_fn` function. Write a Python function `def _get_activation_fn(activation)` to solve the following problem:
Return an activation function given a string
Here is the function:
def _get_activation_fn(activation):
"""Return an activation function given a string"""
if activation == "relu":
return F.relu
if activation == "gelu":
return F.gelu
if activation == "glu":
return F.glu
if activation == "prelu":
return nn.PReLU()
if activation == "selu":
return F.selu
raise RuntimeError(F"activation should be relu/gelu, not {activation}.") | Return an activation function given a string |
155,792 | import torch
import copy
from torch import nn, Tensor
import os
import math
import torch.nn.functional as F
from torch import nn
def _get_clones(module, N, layer_share=False):
# import ipdb; ipdb.set_trace()
if layer_share:
return nn.ModuleList([module for i in range(N)])
else:
return nn.ModuleList([copy.deepcopy(module) for i in range(N)]) | null |
155,795 | import torch
from torchvision.ops.boxes import box_area
def box_iou(boxes1, boxes2):
area1 = box_area(boxes1)
area2 = box_area(boxes2)
# import ipdb; ipdb.set_trace()
lt = torch.max(boxes1[:, None, :2], boxes2[:, :2]) # [N,M,2]
rb = torch.min(boxes1[:, None, 2:], boxes2[:, 2:]) # [N,M,2]
wh = (rb - lt).clamp(min=0) # [N,M,2]
inter = wh[:, :, 0] * wh[:, :, 1] # [N,M]
union = area1[:, None] + area2 - inter
iou = inter / (union + 1e-6)
return iou, union
The provided code snippet includes necessary dependencies for implementing the `generalized_box_iou` function. Write a Python function `def generalized_box_iou(boxes1, boxes2)` to solve the following problem:
Generalized IoU from https://giou.stanford.edu/ The boxes should be in [x0, y0, x1, y1] format Returns a [N, M] pairwise matrix, where N = len(boxes1) and M = len(boxes2)
Here is the function:
def generalized_box_iou(boxes1, boxes2):
"""
Generalized IoU from https://giou.stanford.edu/
The boxes should be in [x0, y0, x1, y1] format
Returns a [N, M] pairwise matrix, where N = len(boxes1)
and M = len(boxes2)
"""
# degenerate boxes gives inf / nan results
# so do an early check
assert (boxes1[:, 2:] >= boxes1[:, :2]).all()
assert (boxes2[:, 2:] >= boxes2[:, :2]).all()
iou, union = box_iou(boxes1, boxes2)
lt = torch.min(boxes1[:, None, :2], boxes2[:, :2])
rb = torch.max(boxes1[:, None, 2:], boxes2[:, 2:])
wh = (rb - lt).clamp(min=0) # [N,M,2]
area = wh[:, :, 0] * wh[:, :, 1]
return iou - (area - union) / (area + 1e-6) | Generalized IoU from https://giou.stanford.edu/ The boxes should be in [x0, y0, x1, y1] format Returns a [N, M] pairwise matrix, where N = len(boxes1) and M = len(boxes2) |
155,798 | from typing import List, Optional
import torch
import torch.distributed as dist
import torchvision
from torch import Tensor
def _max_by_axis(the_list):
# type: (List[List[int]]) -> List[int]
maxes = the_list[0]
for sublist in the_list[1:]:
for index, item in enumerate(sublist):
maxes[index] = max(maxes[index], item)
return maxes
class NestedTensor(object):
def __init__(self, tensors, mask: Optional[Tensor]):
self.tensors = tensors
self.mask = mask
def to(self, device):
# type: (Device) -> NestedTensor # noqa
cast_tensor = self.tensors.to(device)
mask = self.mask
if mask is not None:
assert mask is not None
cast_mask = mask.to(device)
else:
cast_mask = None
return NestedTensor(cast_tensor, cast_mask)
def decompose(self):
return self.tensors, self.mask
def __repr__(self):
return str(self.tensors)
def _onnx_nested_tensor_from_tensor_list(tensor_list: List[Tensor]) -> NestedTensor:
max_size = []
for i in range(tensor_list[0].dim()):
max_size_i = torch.max(
torch.stack([img.shape[i] for img in tensor_list]).to(torch.float32)
).to(torch.int64)
max_size.append(max_size_i)
max_size = tuple(max_size)
# work around for
# pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img)
# m[: img.shape[1], :img.shape[2]] = False
# which is not yet supported in onnx
padded_imgs = []
padded_masks = []
for img in tensor_list:
padding = [(s1 - s2) for s1, s2 in zip(max_size, tuple(img.shape))]
padded_img = torch.nn.functional.pad(img, (0, padding[2], 0, padding[1], 0, padding[0]))
padded_imgs.append(padded_img)
m = torch.zeros_like(img[0], dtype=torch.int, device=img.device)
padded_mask = torch.nn.functional.pad(m, (0, padding[2], 0, padding[1]), "constant", 1)
padded_masks.append(padded_mask.to(torch.bool))
tensor = torch.stack(padded_imgs)
mask = torch.stack(padded_masks)
return NestedTensor(tensor, mask=mask)
def nested_tensor_from_tensor_list(tensor_list: List[Tensor]):
# TODO make this more general
if tensor_list[0].ndim == 3:
if torchvision._is_tracing():
# nested_tensor_from_tensor_list() does not export well to ONNX
# call _onnx_nested_tensor_from_tensor_list() instead
return _onnx_nested_tensor_from_tensor_list(tensor_list)
# TODO make it support different-sized images
max_size = _max_by_axis([list(img.shape) for img in tensor_list])
# min_size = tuple(min(s) for s in zip(*[img.shape for img in tensor_list]))
batch_shape = [len(tensor_list)] + max_size
b, c, h, w = batch_shape
dtype = tensor_list[0].dtype
device = tensor_list[0].device
tensor = torch.zeros(batch_shape, dtype=dtype, device=device)
mask = torch.ones((b, h, w), dtype=torch.bool, device=device)
for img, pad_img, m in zip(tensor_list, tensor, mask):
pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img)
m[: img.shape[1], : img.shape[2]] = False
else:
raise ValueError("not supported")
return NestedTensor(tensor, mask) | null |
155,799 | from typing import List, Optional
import torch
import torch.distributed as dist
import torchvision
from torch import Tensor
def is_dist_avail_and_initialized():
if not dist.is_available():
return False
if not dist.is_initialized():
return False
return True | null |
155,800 | from typing import List, Optional
import torch
import torch.distributed as dist
import torchvision
from torch import Tensor
The provided code snippet includes necessary dependencies for implementing the `masks_to_boxes` function. Write a Python function `def masks_to_boxes(masks)` to solve the following problem:
Compute the bounding boxes around the provided masks The masks should be in format [N, H, W] where N is the number of masks, (H, W) are the spatial dimensions. Returns a [N, 4] tensors, with the boxes in xyxy format
Here is the function:
def masks_to_boxes(masks):
"""Compute the bounding boxes around the provided masks
The masks should be in format [N, H, W] where N is the number of masks, (H, W) are the spatial dimensions.
Returns a [N, 4] tensors, with the boxes in xyxy format
"""
if masks.numel() == 0:
return torch.zeros((0, 4), device=masks.device)
h, w = masks.shape[-2:]
y = torch.arange(0, h, dtype=torch.float)
x = torch.arange(0, w, dtype=torch.float)
y, x = torch.meshgrid(y, x)
x_mask = masks * x.unsqueeze(0)
x_max = x_mask.flatten(1).max(-1)[0]
x_min = x_mask.masked_fill(~(masks.bool()), 1e8).flatten(1).min(-1)[0]
y_mask = masks * y.unsqueeze(0)
y_max = y_mask.flatten(1).max(-1)[0]
y_min = y_mask.masked_fill(~(masks.bool()), 1e8).flatten(1).min(-1)[0]
return torch.stack([x_min, y_min, x_max, y_max], 1) | Compute the bounding boxes around the provided masks The masks should be in format [N, H, W] where N is the number of masks, (H, W) are the spatial dimensions. Returns a [N, 4] tensors, with the boxes in xyxy format |
155,801 | import torch
import torch.nn.functional as F
from detrex.layers import box_cxcywh_to_xyxy, generalized_box_iou,box_iou
The provided code snippet includes necessary dependencies for implementing the `sigmoid_focal_loss` function. Write a Python function `def sigmoid_focal_loss(inputs, targets, num_boxes, alpha: float = 0.25, gamma: float = 2)` to solve the following problem:
Loss used in RetinaNet for dense detection: https://arxiv.org/abs/1708.02002. Args: inputs (torch.Tensor): A float tensor of arbitrary shape. The predictions for each example. targets (torch.Tensor): A float tensor with the same shape as inputs. Stores the binary classification label for each element in inputs (0 for the negative class and 1 for the positive class). num_boxes (int): The number of boxes. alpha (float, optional): Weighting factor in range (0, 1) to balance positive vs negative examples. Default: 0.25. gamma (float): Exponent of the modulating factor (1 - p_t) to balance easy vs hard examples. Default: 2. Returns: torch.Tensor: The computed sigmoid focal loss.
Here is the function:
def sigmoid_focal_loss(inputs, targets, num_boxes, alpha: float = 0.25, gamma: float = 2):
"""
Loss used in RetinaNet for dense detection: https://arxiv.org/abs/1708.02002.
Args:
inputs (torch.Tensor): A float tensor of arbitrary shape.
The predictions for each example.
targets (torch.Tensor): A float tensor with the same shape as inputs. Stores the binary
classification label for each element in inputs
(0 for the negative class and 1 for the positive class).
num_boxes (int): The number of boxes.
alpha (float, optional): Weighting factor in range (0, 1) to balance
positive vs negative examples. Default: 0.25.
gamma (float): Exponent of the modulating factor (1 - p_t) to
balance easy vs hard examples. Default: 2.
Returns:
torch.Tensor: The computed sigmoid focal loss.
"""
prob = inputs.sigmoid()
ce_loss = F.binary_cross_entropy_with_logits(inputs, targets, reduction="none")
p_t = prob * targets + (1 - prob) * (1 - targets)
loss = ce_loss * ((1 - p_t) ** gamma)
if alpha >= 0:
alpha_t = alpha * targets + (1 - alpha) * (1 - targets)
loss = alpha_t * loss
return loss.mean(1).sum() / num_boxes | Loss used in RetinaNet for dense detection: https://arxiv.org/abs/1708.02002. Args: inputs (torch.Tensor): A float tensor of arbitrary shape. The predictions for each example. targets (torch.Tensor): A float tensor with the same shape as inputs. Stores the binary classification label for each element in inputs (0 for the negative class and 1 for the positive class). num_boxes (int): The number of boxes. alpha (float, optional): Weighting factor in range (0, 1) to balance positive vs negative examples. Default: 0.25. gamma (float): Exponent of the modulating factor (1 - p_t) to balance easy vs hard examples. Default: 2. Returns: torch.Tensor: The computed sigmoid focal loss. |
155,802 | import torch
import torch.nn.functional as F
from detrex.layers import box_cxcywh_to_xyxy, generalized_box_iou,box_iou
def binary_cross_entropy_loss_with_logits(inputs, pos_weights, neg_weights, avg_factor):
p = inputs.sigmoid()
loss = -pos_weights * p.log() - neg_weights * (1-p).log()
return loss.sum()/avg_factor | null |
155,803 | import torch
import torch.nn.functional as F
from detrex.layers import box_cxcywh_to_xyxy, generalized_box_iou,box_iou
def get_local_rank( quality, indices):
#quality: one-dimension tensor
#indices: matching result
bs = len(indices)
device = quality.device
tgt_size = [len(tgt_ind) for _,tgt_ind in indices]
ind_start = 0
rank_list = []
for i in range(bs):
if tgt_size[i] == 0:
rank_list.append(torch.zeros(0,dtype=torch.long,device=device))
continue
num_tgt = max(indices[i][1]) + 1
# split quality of one item
quality_per_img = quality[ind_start:ind_start+tgt_size[i]]
ind_start += tgt_size[i]
#suppose candidate bag sizes are equal
k = torch.div(tgt_size[i], num_tgt,rounding_mode='floor')
#sort quality in each candidate bag
quality_per_img = quality_per_img.reshape(num_tgt, k)
ind = quality_per_img.sort(dim=-1,descending=True)[1]
#scatter ranks, eg:[0.3,0.6,0.5] -> [2,0,1]
rank_per_img = torch.zeros_like(quality_per_img, dtype=torch.long, device = device)
rank_per_img.scatter_(-1, ind, torch.arange(k,device=device, dtype=torch.long).repeat(num_tgt,1))
rank_list.append(rank_per_img.flatten())
return torch.cat(rank_list, 0)
def IA_BCE_loss(src_logits,pos_idx_c, src_boxes, target_boxes, indices, avg_factor, alpha,gamma, w_prime=1, ):
prob = src_logits.sigmoid()
#init positive weights and negative weights
pos_weights = torch.zeros_like(src_logits)
neg_weights = prob ** gamma
#ious_scores between matched boxes and GT boxes
iou_scores = torch.diag(box_iou(box_cxcywh_to_xyxy(src_boxes), box_cxcywh_to_xyxy( target_boxes))[0])
#t is the quality metric
t = prob[pos_idx_c]**alpha * iou_scores ** (1-alpha)
t = torch.clamp(t, 0.01).detach()
rank = get_local_rank(t, indices)
#define positive weights for SoftBceLoss
if type(w_prime) != int:
rank_weight = w_prime[rank]
else:
rank_weight = w_prime
t = t * rank_weight
pos_weights[pos_idx_c] = t
neg_weights[pos_idx_c] = (1 -t)
loss = -pos_weights * prob.log() - neg_weights * (1-prob).log()
return loss.sum()/avg_factor, rank_weight | null |
155,804 | import torch
import torch.nn as nn
import torch.nn.functional as F
from detrex.layers import box_cxcywh_to_xyxy, generalized_box_iou,box_iou
from detrex.utils import get_world_size, is_dist_avail_and_initialized
from ..losses import binary_cross_entropy_loss_with_logits
The provided code snippet includes necessary dependencies for implementing the `sigmoid_focal_loss` function. Write a Python function `def sigmoid_focal_loss(inputs, targets, num_boxes, alpha: float = 0.25, gamma: float = 2)` to solve the following problem:
Loss used in RetinaNet for dense detection: https://arxiv.org/abs/1708.02002. Args: inputs (torch.Tensor): A float tensor of arbitrary shape. The predictions for each example. targets (torch.Tensor): A float tensor with the same shape as inputs. Stores the binary classification label for each element in inputs (0 for the negative class and 1 for the positive class). num_boxes (int): The number of boxes. alpha (float, optional): Weighting factor in range (0, 1) to balance positive vs negative examples. Default: 0.25. gamma (float): Exponent of the modulating factor (1 - p_t) to balance easy vs hard examples. Default: 2. Returns: torch.Tensor: The computed sigmoid focal loss.
Here is the function:
def sigmoid_focal_loss(inputs, targets, num_boxes, alpha: float = 0.25, gamma: float = 2):
"""
Loss used in RetinaNet for dense detection: https://arxiv.org/abs/1708.02002.
Args:
inputs (torch.Tensor): A float tensor of arbitrary shape.
The predictions for each example.
targets (torch.Tensor): A float tensor with the same shape as inputs. Stores the binary
classification label for each element in inputs
(0 for the negative class and 1 for the positive class).
num_boxes (int): The number of boxes.
alpha (float, optional): Weighting factor in range (0, 1) to balance
positive vs negative examples. Default: 0.25.
gamma (float): Exponent of the modulating factor (1 - p_t) to
balance easy vs hard examples. Default: 2.
Returns:
torch.Tensor: The computed sigmoid focal loss.
"""
prob = inputs.sigmoid()
ce_loss = F.binary_cross_entropy_with_logits(inputs, targets, reduction="none")
p_t = prob * targets + (1 - prob) * (1 - targets)
loss = ce_loss * ((1 - p_t) ** gamma)
if alpha >= 0:
alpha_t = alpha * targets + (1 - alpha) * (1 - targets)
loss = alpha_t * loss
return loss.mean(1).sum() / num_boxes | Loss used in RetinaNet for dense detection: https://arxiv.org/abs/1708.02002. Args: inputs (torch.Tensor): A float tensor of arbitrary shape. The predictions for each example. targets (torch.Tensor): A float tensor with the same shape as inputs. Stores the binary classification label for each element in inputs (0 for the negative class and 1 for the positive class). num_boxes (int): The number of boxes. alpha (float, optional): Weighting factor in range (0, 1) to balance positive vs negative examples. Default: 0.25. gamma (float): Exponent of the modulating factor (1 - p_t) to balance easy vs hard examples. Default: 2. Returns: torch.Tensor: The computed sigmoid focal loss. |
155,805 | import itertools
import logging
import numpy as np
import operator
import pickle
from typing import Any, Callable, Dict, List, Optional, Union
import torch
import random
import torch.utils.data as torchdata
from tabulate import tabulate
from termcolor import colored
from detectron2.config import configurable
from detectron2.structures import BoxMode
from detectron2.utils.comm import get_world_size
from detectron2.utils.env import seed_all_rng
from detectron2.utils.file_io import PathManager
from detectron2.utils.logger import _log_api_usage, log_first_n
from detectron2.data.catalog import DatasetCatalog, MetadataCatalog
from detectron2.data.common import AspectRatioGroupedDataset, DatasetFromList, ToIterableDataset
from detectron2.data.dataset_mapper import DatasetMapper
from detectron2.data.detection_utils import check_metadata_consistency
from detectron2.data.samplers import (
InferenceSampler,
RandomSubsetTrainingSampler,
RepeatFactorTrainingSampler,
TrainingSampler,
)
from detectron2.utils.serialize import PicklableWrapper
def mot_collate_fn(batch: List[dict]) -> dict:
ret_dict = {}
for key in list(batch[0].keys()):
assert not isinstance(batch[0][key], torch.Tensor)
ret_dict[key] = [img_info[key] for img_info in batch]
if isinstance(ret_dict[key][0], list):
ret_dict[key] = list(map(list, zip(*ret_dict[key])))
return ret_dict | null |
155,806 | import itertools
import logging
import numpy as np
import operator
import pickle
from typing import Any, Callable, Dict, List, Optional, Union
import torch
import random
import torch.utils.data as torchdata
from tabulate import tabulate
from termcolor import colored
from detectron2.config import configurable
from detectron2.structures import BoxMode
from detectron2.utils.comm import get_world_size
from detectron2.utils.env import seed_all_rng
from detectron2.utils.file_io import PathManager
from detectron2.utils.logger import _log_api_usage, log_first_n
from detectron2.data.catalog import DatasetCatalog, MetadataCatalog
from detectron2.data.common import AspectRatioGroupedDataset, DatasetFromList, ToIterableDataset
from detectron2.data.dataset_mapper import DatasetMapper
from detectron2.data.detection_utils import check_metadata_consistency
from detectron2.data.samplers import (
InferenceSampler,
RandomSubsetTrainingSampler,
RepeatFactorTrainingSampler,
TrainingSampler,
)
from detectron2.utils.serialize import PicklableWrapper
def get_detection_dataset_dicts(
names,
filter_empty=True,
min_keypoints=0,
proposal_files=None,
check_consistency=True,
):
def _train_loader_from_config(cfg, mapper=None, *, dataset=None, sampler=None):
if dataset is None:
dataset = get_detection_dataset_dicts(
cfg.DATASETS.TRAIN,
filter_empty=cfg.DATALOADER.FILTER_EMPTY_ANNOTATIONS,
min_keypoints=cfg.MODEL.ROI_KEYPOINT_HEAD.MIN_KEYPOINTS_PER_IMAGE
if cfg.MODEL.KEYPOINT_ON
else 0,
proposal_files=cfg.DATASETS.PROPOSAL_FILES_TRAIN if cfg.MODEL.LOAD_PROPOSALS else None,
)
_log_api_usage("dataset." + cfg.DATASETS.TRAIN[0])
if mapper is None:
mapper = DatasetMapper(cfg, True)
if sampler is None:
sampler_name = cfg.DATALOADER.SAMPLER_TRAIN
logger = logging.getLogger(__name__)
if isinstance(dataset, torchdata.IterableDataset):
logger.info("Not using any sampler since the dataset is IterableDataset.")
sampler = None
else:
logger.info("Using training sampler {}".format(sampler_name))
if sampler_name == "TrainingSampler":
sampler = TrainingSampler(len(dataset))
elif sampler_name == "RepeatFactorTrainingSampler":
repeat_factors = RepeatFactorTrainingSampler.repeat_factors_from_category_frequency(
dataset, cfg.DATALOADER.REPEAT_THRESHOLD
)
sampler = RepeatFactorTrainingSampler(repeat_factors)
elif sampler_name == "RandomSubsetTrainingSampler":
sampler = RandomSubsetTrainingSampler(
len(dataset), cfg.DATALOADER.RANDOM_SUBSET_RATIO
)
else:
raise ValueError("Unknown training sampler: {}".format(sampler_name))
return {
"dataset": dataset,
"sampler": sampler,
"mapper": mapper,
"total_batch_size": cfg.SOLVER.IMS_PER_BATCH,
"aspect_ratio_grouping": cfg.DATALOADER.ASPECT_RATIO_GROUPING,
"num_workers": cfg.DATALOADER.NUM_WORKERS,
} | null |
155,807 | import itertools
import logging
import numpy as np
import operator
import pickle
from typing import Any, Callable, Dict, List, Optional, Union
import torch
import random
import torch.utils.data as torchdata
from tabulate import tabulate
from termcolor import colored
from detectron2.config import configurable
from detectron2.structures import BoxMode
from detectron2.utils.comm import get_world_size
from detectron2.utils.env import seed_all_rng
from detectron2.utils.file_io import PathManager
from detectron2.utils.logger import _log_api_usage, log_first_n
from detectron2.data.catalog import DatasetCatalog, MetadataCatalog
from detectron2.data.common import AspectRatioGroupedDataset, DatasetFromList, ToIterableDataset
from detectron2.data.dataset_mapper import DatasetMapper
from detectron2.data.detection_utils import check_metadata_consistency
from detectron2.data.samplers import (
InferenceSampler,
RandomSubsetTrainingSampler,
RepeatFactorTrainingSampler,
TrainingSampler,
)
from detectron2.utils.serialize import PicklableWrapper
class MotMapDataset(torchdata.Dataset):
"""
Map a function over the elements in a dataset.
"""
def __init__(self, dataset, map_func):
"""
Args:
dataset: a dataset where map function is applied. Can be either
map-style or iterable dataset. When given an iterable dataset,
the returned object will also be an iterable dataset.
map_func: a callable which maps the element in dataset. map_func can
return None to skip the data (e.g. in case of errors).
How None is handled depends on the style of `dataset`.
If `dataset` is map-style, it randomly tries other elements.
If `dataset` is iterable, it skips the data and tries the next.
"""
self._dataset = dataset
self._map_func = PicklableWrapper(map_func) # wrap so that a lambda will work
self._rng = random.Random(42)
self._fallback_candidates = set(range(len(dataset)))
def __new__(cls, dataset, map_func):
is_iterable = isinstance(dataset, torchdata.IterableDataset)
if is_iterable:
return _MapIterableDataset(dataset, map_func)
else:
return super().__new__(cls)
def __getnewargs__(self):
return self._dataset, self._map_func
def __len__(self):
return len(self._dataset)
def __getitem__(self, idx):
retry_count = 0
cur_idx = int(idx)
while True:
data = self._map_func(self._dataset, cur_idx)
if data is not None:
self._fallback_candidates.add(cur_idx)
return data
# _map_func fails for this idx, use a random new index from the pool
retry_count += 1
self._fallback_candidates.discard(cur_idx)
cur_idx = self._rng.sample(self._fallback_candidates, k=1)[0]
if retry_count >= 3:
logger = logging.getLogger(__name__)
logger.warning(
"Failed to apply `_map_func` for idx: {}, retry count: {}".format(
idx, retry_count
)
)
def build_batch_data_loader(
dataset,
sampler,
total_batch_size,
*,
aspect_ratio_grouping=False,
num_workers=0,
collate_fn=None,
):
"""
Build a batched dataloader. The main differences from `torch.utils.data.DataLoader` are:
1. support aspect ratio grouping options
2. use no "batch collation", because this is common for detection training
Args:
dataset (torch.utils.data.Dataset): a pytorch map-style or iterable dataset.
sampler (torch.utils.data.sampler.Sampler or None): a sampler that produces indices.
Must be provided iff. ``dataset`` is a map-style dataset.
total_batch_size, aspect_ratio_grouping, num_workers, collate_fn: see
:func:`build_detection_train_loader`.
Returns:
iterable[list]. Length of each list is the batch size of the current
GPU. Each element in the list comes from the dataset.
"""
world_size = get_world_size()
assert (
total_batch_size > 0 and total_batch_size % world_size == 0
), "Total batch size ({}) must be divisible by the number of gpus ({}).".format(
total_batch_size, world_size
)
batch_size = total_batch_size // world_size
if isinstance(dataset, torchdata.IterableDataset):
assert sampler is None, "sampler must be None if dataset is IterableDataset"
else:
dataset = ToIterableDataset(dataset, sampler)
if aspect_ratio_grouping:
data_loader = torchdata.DataLoader(
dataset,
num_workers=num_workers,
collate_fn=operator.itemgetter(0), # don't batch, but yield individual elements
worker_init_fn=worker_init_reset_seed,
) # yield individual mapped dict
data_loader = AspectRatioGroupedDataset(data_loader, batch_size)
if collate_fn is None:
return data_loader
return MotMapDataset(data_loader, collate_fn)
else:
return torchdata.DataLoader(
dataset,
batch_size=batch_size,
drop_last=True,
num_workers=num_workers,
collate_fn=trivial_batch_collator if collate_fn is None else collate_fn,
worker_init_fn=worker_init_reset_seed,
)
The provided code snippet includes necessary dependencies for implementing the `build_mot_train_loader` function. Write a Python function `def build_mot_train_loader( dataset, *, mapper, sampler=None, total_batch_size, aspect_ratio_grouping=True, num_workers=0, collate_fn=None, )` to solve the following problem:
Build a dataloader for object detection with some default features. Args: dataset (list or torch.utils.data.Dataset): a list of dataset dicts, or a pytorch dataset (either map-style or iterable). It can be obtained by using :func:`DatasetCatalog.get` or :func:`get_detection_dataset_dicts`. mapper (callable): a callable which takes a sample (dict) from dataset and returns the format to be consumed by the model. When using cfg, the default choice is ``DatasetMapper(cfg, is_train=True)``. sampler (torch.utils.data.sampler.Sampler or None): a sampler that produces indices to be applied on ``dataset``. If ``dataset`` is map-style, the default sampler is a :class:`TrainingSampler`, which coordinates an infinite random shuffle sequence across all workers. Sampler must be None if ``dataset`` is iterable. total_batch_size (int): total batch size across all workers. aspect_ratio_grouping (bool): whether to group images with similar aspect ratio for efficiency. When enabled, it requires each element in dataset be a dict with keys "width" and "height". num_workers (int): number of parallel data loading workers collate_fn: a function that determines how to do batching, same as the argument of `torch.utils.data.DataLoader`. Defaults to do no collation and return a list of data. No collation is OK for small batch size and simple data structures. If your batch size is large and each sample contains too many small tensors, it's more efficient to collate them in data loader. Returns: torch.utils.data.DataLoader: a dataloader. Each output from it is a ``list[mapped_element]`` of length ``total_batch_size / num_workers``, where ``mapped_element`` is produced by the ``mapper``.
Here is the function:
def build_mot_train_loader(
dataset,
*,
mapper,
sampler=None,
total_batch_size,
aspect_ratio_grouping=True,
num_workers=0,
collate_fn=None,
):
"""
Build a dataloader for object detection with some default features.
Args:
dataset (list or torch.utils.data.Dataset): a list of dataset dicts,
or a pytorch dataset (either map-style or iterable). It can be obtained
by using :func:`DatasetCatalog.get` or :func:`get_detection_dataset_dicts`.
mapper (callable): a callable which takes a sample (dict) from dataset and
returns the format to be consumed by the model.
When using cfg, the default choice is ``DatasetMapper(cfg, is_train=True)``.
sampler (torch.utils.data.sampler.Sampler or None): a sampler that produces
indices to be applied on ``dataset``.
If ``dataset`` is map-style, the default sampler is a :class:`TrainingSampler`,
which coordinates an infinite random shuffle sequence across all workers.
Sampler must be None if ``dataset`` is iterable.
total_batch_size (int): total batch size across all workers.
aspect_ratio_grouping (bool): whether to group images with similar
aspect ratio for efficiency. When enabled, it requires each
element in dataset be a dict with keys "width" and "height".
num_workers (int): number of parallel data loading workers
collate_fn: a function that determines how to do batching, same as the argument of
`torch.utils.data.DataLoader`. Defaults to do no collation and return a list of
data. No collation is OK for small batch size and simple data structures.
If your batch size is large and each sample contains too many small tensors,
it's more efficient to collate them in data loader.
Returns:
torch.utils.data.DataLoader:
a dataloader. Each output from it is a ``list[mapped_element]`` of length
``total_batch_size / num_workers``, where ``mapped_element`` is produced
by the ``mapper``.
"""
if isinstance(dataset, list):
dataset = DatasetFromList(dataset, copy=False)
if mapper is not None:
dataset = MotMapDataset(dataset, mapper)
if isinstance(dataset, torchdata.IterableDataset):
assert sampler is None, "sampler must be None if dataset is IterableDataset"
else:
if sampler is None:
sampler = TrainingSampler(len(dataset))
assert isinstance(sampler, torchdata.Sampler), f"Expect a Sampler but got {type(sampler)}"
return build_batch_data_loader(
dataset,
sampler,
total_batch_size,
aspect_ratio_grouping=aspect_ratio_grouping,
num_workers=num_workers,
collate_fn=collate_fn,
) | Build a dataloader for object detection with some default features. Args: dataset (list or torch.utils.data.Dataset): a list of dataset dicts, or a pytorch dataset (either map-style or iterable). It can be obtained by using :func:`DatasetCatalog.get` or :func:`get_detection_dataset_dicts`. mapper (callable): a callable which takes a sample (dict) from dataset and returns the format to be consumed by the model. When using cfg, the default choice is ``DatasetMapper(cfg, is_train=True)``. sampler (torch.utils.data.sampler.Sampler or None): a sampler that produces indices to be applied on ``dataset``. If ``dataset`` is map-style, the default sampler is a :class:`TrainingSampler`, which coordinates an infinite random shuffle sequence across all workers. Sampler must be None if ``dataset`` is iterable. total_batch_size (int): total batch size across all workers. aspect_ratio_grouping (bool): whether to group images with similar aspect ratio for efficiency. When enabled, it requires each element in dataset be a dict with keys "width" and "height". num_workers (int): number of parallel data loading workers collate_fn: a function that determines how to do batching, same as the argument of `torch.utils.data.DataLoader`. Defaults to do no collation and return a list of data. No collation is OK for small batch size and simple data structures. If your batch size is large and each sample contains too many small tensors, it's more efficient to collate them in data loader. Returns: torch.utils.data.DataLoader: a dataloader. Each output from it is a ``list[mapped_element]`` of length ``total_batch_size / num_workers``, where ``mapped_element`` is produced by the ``mapper``. |
155,808 | import itertools
import logging
import numpy as np
import operator
import pickle
from typing import Any, Callable, Dict, List, Optional, Union
import torch
import random
import torch.utils.data as torchdata
from tabulate import tabulate
from termcolor import colored
from detectron2.config import configurable
from detectron2.structures import BoxMode
from detectron2.utils.comm import get_world_size
from detectron2.utils.env import seed_all_rng
from detectron2.utils.file_io import PathManager
from detectron2.utils.logger import _log_api_usage, log_first_n
from detectron2.data.catalog import DatasetCatalog, MetadataCatalog
from detectron2.data.common import AspectRatioGroupedDataset, DatasetFromList, ToIterableDataset
from detectron2.data.dataset_mapper import DatasetMapper
from detectron2.data.detection_utils import check_metadata_consistency
from detectron2.data.samplers import (
InferenceSampler,
RandomSubsetTrainingSampler,
RepeatFactorTrainingSampler,
TrainingSampler,
)
from detectron2.utils.serialize import PicklableWrapper
def get_detection_dataset_dicts(
names,
filter_empty=True,
min_keypoints=0,
proposal_files=None,
check_consistency=True,
):
"""
Load and prepare dataset dicts for instance detection/segmentation and semantic segmentation.
Args:
names (str or list[str]): a dataset name or a list of dataset names
filter_empty (bool): whether to filter out images without instance annotations
min_keypoints (int): filter out images with fewer keypoints than
`min_keypoints`. Set to 0 to do nothing.
proposal_files (list[str]): if given, a list of object proposal files
that match each dataset in `names`.
check_consistency (bool): whether to check if datasets have consistent metadata.
Returns:
list[dict]: a list of dicts following the standard dataset dict format.
"""
if isinstance(names, str):
names = [names]
assert len(names), names
dataset_dicts = [DatasetCatalog.get(dataset_name) for dataset_name in names]
if isinstance(dataset_dicts[0], torchdata.Dataset):
if len(dataset_dicts) > 1:
# ConcatDataset does not work for iterable style dataset.
# We could support concat for iterable as well, but it's often
# not a good idea to concat iterables anyway.
return torchdata.ConcatDataset(dataset_dicts)
return dataset_dicts[0]
for dataset_name, dicts in zip(names, dataset_dicts):
assert len(dicts), "Dataset '{}' is empty!".format(dataset_name)
if proposal_files is not None:
assert len(names) == len(proposal_files)
# load precomputed proposals from proposal files
dataset_dicts = [
load_proposals_into_dataset(dataset_i_dicts, proposal_file)
for dataset_i_dicts, proposal_file in zip(dataset_dicts, proposal_files)
]
dataset_dicts = list(itertools.chain.from_iterable(dataset_dicts))
has_instances = "annotations" in dataset_dicts[0]
if filter_empty and has_instances:
dataset_dicts = filter_images_with_only_crowd_annotations(dataset_dicts)
if min_keypoints > 0 and has_instances:
dataset_dicts = filter_images_with_few_keypoints(dataset_dicts, min_keypoints)
if check_consistency and has_instances:
try:
class_names = MetadataCatalog.get(names[0]).thing_classes
check_metadata_consistency("thing_classes", names)
print_instances_class_histogram(dataset_dicts, class_names)
except AttributeError: # class names are not available for this dataset
pass
assert len(dataset_dicts), "No valid data found in {}.".format(",".join(names))
return dataset_dicts
The provided code snippet includes necessary dependencies for implementing the `_test_loader_from_config` function. Write a Python function `def _test_loader_from_config(cfg, dataset_name, mapper=None)` to solve the following problem:
Uses the given `dataset_name` argument (instead of the names in cfg), because the standard practice is to evaluate each test set individually (not combining them).
Here is the function:
def _test_loader_from_config(cfg, dataset_name, mapper=None):
"""
Uses the given `dataset_name` argument (instead of the names in cfg), because the
standard practice is to evaluate each test set individually (not combining them).
"""
if isinstance(dataset_name, str):
dataset_name = [dataset_name]
dataset = get_detection_dataset_dicts(
dataset_name,
filter_empty=False,
proposal_files=[
cfg.DATASETS.PROPOSAL_FILES_TEST[list(cfg.DATASETS.TEST).index(x)] for x in dataset_name
]
if cfg.MODEL.LOAD_PROPOSALS
else None,
)
if mapper is None:
mapper = DatasetMapper(cfg, False)
return {
"dataset": dataset,
"mapper": mapper,
"num_workers": cfg.DATALOADER.NUM_WORKERS,
"sampler": InferenceSampler(len(dataset))
if not isinstance(dataset, torchdata.IterableDataset)
else None,
} | Uses the given `dataset_name` argument (instead of the names in cfg), because the standard practice is to evaluate each test set individually (not combining them). |
155,809 | import itertools
import logging
import numpy as np
import operator
import pickle
from typing import Any, Callable, Dict, List, Optional, Union
import torch
import random
import torch.utils.data as torchdata
from tabulate import tabulate
from termcolor import colored
from detectron2.config import configurable
from detectron2.structures import BoxMode
from detectron2.utils.comm import get_world_size
from detectron2.utils.env import seed_all_rng
from detectron2.utils.file_io import PathManager
from detectron2.utils.logger import _log_api_usage, log_first_n
from detectron2.data.catalog import DatasetCatalog, MetadataCatalog
from detectron2.data.common import AspectRatioGroupedDataset, DatasetFromList, ToIterableDataset
from detectron2.data.dataset_mapper import DatasetMapper
from detectron2.data.detection_utils import check_metadata_consistency
from detectron2.data.samplers import (
InferenceSampler,
RandomSubsetTrainingSampler,
RepeatFactorTrainingSampler,
TrainingSampler,
)
from detectron2.utils.serialize import PicklableWrapper
class MotMapDataset(torchdata.Dataset):
"""
Map a function over the elements in a dataset.
"""
def __init__(self, dataset, map_func):
"""
Args:
dataset: a dataset where map function is applied. Can be either
map-style or iterable dataset. When given an iterable dataset,
the returned object will also be an iterable dataset.
map_func: a callable which maps the element in dataset. map_func can
return None to skip the data (e.g. in case of errors).
How None is handled depends on the style of `dataset`.
If `dataset` is map-style, it randomly tries other elements.
If `dataset` is iterable, it skips the data and tries the next.
"""
self._dataset = dataset
self._map_func = PicklableWrapper(map_func) # wrap so that a lambda will work
self._rng = random.Random(42)
self._fallback_candidates = set(range(len(dataset)))
def __new__(cls, dataset, map_func):
is_iterable = isinstance(dataset, torchdata.IterableDataset)
if is_iterable:
return _MapIterableDataset(dataset, map_func)
else:
return super().__new__(cls)
def __getnewargs__(self):
return self._dataset, self._map_func
def __len__(self):
return len(self._dataset)
def __getitem__(self, idx):
retry_count = 0
cur_idx = int(idx)
while True:
data = self._map_func(self._dataset, cur_idx)
if data is not None:
self._fallback_candidates.add(cur_idx)
return data
# _map_func fails for this idx, use a random new index from the pool
retry_count += 1
self._fallback_candidates.discard(cur_idx)
cur_idx = self._rng.sample(self._fallback_candidates, k=1)[0]
if retry_count >= 3:
logger = logging.getLogger(__name__)
logger.warning(
"Failed to apply `_map_func` for idx: {}, retry count: {}".format(
idx, retry_count
)
)
def trivial_batch_collator(batch):
"""
A batch collator that does nothing.
"""
return batch
The provided code snippet includes necessary dependencies for implementing the `build_mot_test_loader` function. Write a Python function `def build_mot_test_loader( dataset: Union[List[Any], torchdata.Dataset], *, mapper: Callable[[Dict[str, Any]], Any], sampler: Optional[torchdata.Sampler] = None, batch_size: int = 1, num_workers: int = 0, collate_fn: Optional[Callable[[List[Any]], Any]] = None, ) -> torchdata.DataLoader` to solve the following problem:
Similar to `build_detection_train_loader`, with default batch size = 1, and sampler = :class:`InferenceSampler`. This sampler coordinates all workers to produce the exact set of all samples. Args: dataset: a list of dataset dicts, or a pytorch dataset (either map-style or iterable). They can be obtained by using :func:`DatasetCatalog.get` or :func:`get_detection_dataset_dicts`. mapper: a callable which takes a sample (dict) from dataset and returns the format to be consumed by the model. When using cfg, the default choice is ``DatasetMapper(cfg, is_train=False)``. sampler: a sampler that produces indices to be applied on ``dataset``. Default to :class:`InferenceSampler`, which splits the dataset across all workers. Sampler must be None if `dataset` is iterable. batch_size: the batch size of the data loader to be created. Default to 1 image per worker since this is the standard when reporting inference time in papers. num_workers: number of parallel data loading workers collate_fn: same as the argument of `torch.utils.data.DataLoader`. Defaults to do no collation and return a list of data. Returns: DataLoader: a torch DataLoader, that loads the given detection dataset, with test-time transformation and batching. Examples: :: data_loader = build_detection_test_loader( DatasetRegistry.get("my_test"), mapper=DatasetMapper(...)) # or, instantiate with a CfgNode: data_loader = build_detection_test_loader(cfg, "my_test")
Here is the function:
def build_mot_test_loader(
dataset: Union[List[Any], torchdata.Dataset],
*,
mapper: Callable[[Dict[str, Any]], Any],
sampler: Optional[torchdata.Sampler] = None,
batch_size: int = 1,
num_workers: int = 0,
collate_fn: Optional[Callable[[List[Any]], Any]] = None,
) -> torchdata.DataLoader:
"""
Similar to `build_detection_train_loader`, with default batch size = 1,
and sampler = :class:`InferenceSampler`. This sampler coordinates all workers
to produce the exact set of all samples.
Args:
dataset: a list of dataset dicts,
or a pytorch dataset (either map-style or iterable). They can be obtained
by using :func:`DatasetCatalog.get` or :func:`get_detection_dataset_dicts`.
mapper: a callable which takes a sample (dict) from dataset
and returns the format to be consumed by the model.
When using cfg, the default choice is ``DatasetMapper(cfg, is_train=False)``.
sampler: a sampler that produces
indices to be applied on ``dataset``. Default to :class:`InferenceSampler`,
which splits the dataset across all workers. Sampler must be None
if `dataset` is iterable.
batch_size: the batch size of the data loader to be created.
Default to 1 image per worker since this is the standard when reporting
inference time in papers.
num_workers: number of parallel data loading workers
collate_fn: same as the argument of `torch.utils.data.DataLoader`.
Defaults to do no collation and return a list of data.
Returns:
DataLoader: a torch DataLoader, that loads the given detection
dataset, with test-time transformation and batching.
Examples:
::
data_loader = build_detection_test_loader(
DatasetRegistry.get("my_test"),
mapper=DatasetMapper(...))
# or, instantiate with a CfgNode:
data_loader = build_detection_test_loader(cfg, "my_test")
"""
# 按视频分组
dataset_ = []
vid_name = ""
data_vid = []
for d in dataset:
if vid_name != d['video_name']:
vid_name = d['video_name']
if len(data_vid): dataset_.append(data_vid)
data_vid = []
data_vid.append(d)
if len(data_vid): dataset_.append(data_vid)
dataset = dataset_
if isinstance(dataset, list):
dataset = DatasetFromList(dataset, copy=False)
if mapper is not None:
dataset = MotMapDataset(dataset, mapper)
if isinstance(dataset, torchdata.IterableDataset):
assert sampler is None, "sampler must be None if dataset is IterableDataset"
else:
if sampler is None:
sampler = InferenceSampler(len(dataset))
return torchdata.DataLoader(
dataset,
batch_size=batch_size,
sampler=sampler,
drop_last=False,
num_workers=num_workers,
collate_fn=trivial_batch_collator if collate_fn is None else collate_fn,
) | Similar to `build_detection_train_loader`, with default batch size = 1, and sampler = :class:`InferenceSampler`. This sampler coordinates all workers to produce the exact set of all samples. Args: dataset: a list of dataset dicts, or a pytorch dataset (either map-style or iterable). They can be obtained by using :func:`DatasetCatalog.get` or :func:`get_detection_dataset_dicts`. mapper: a callable which takes a sample (dict) from dataset and returns the format to be consumed by the model. When using cfg, the default choice is ``DatasetMapper(cfg, is_train=False)``. sampler: a sampler that produces indices to be applied on ``dataset``. Default to :class:`InferenceSampler`, which splits the dataset across all workers. Sampler must be None if `dataset` is iterable. batch_size: the batch size of the data loader to be created. Default to 1 image per worker since this is the standard when reporting inference time in papers. num_workers: number of parallel data loading workers collate_fn: same as the argument of `torch.utils.data.DataLoader`. Defaults to do no collation and return a list of data. Returns: DataLoader: a torch DataLoader, that loads the given detection dataset, with test-time transformation and batching. Examples: :: data_loader = build_detection_test_loader( DatasetRegistry.get("my_test"), mapper=DatasetMapper(...)) # or, instantiate with a CfgNode: data_loader = build_detection_test_loader(cfg, "my_test") |
155,810 | import copy
import random
import PIL
import cv2
import torch
import torchvision.transforms as T
import torchvision.transforms.functional as F
from PIL import Image, ImageDraw
import numpy as np
import os
def box_xywh_to_cxcywh(x):
x0, y0, w, h = x.unbind(-1)
b = [x0 + w / 2, y0 + h / 2, w, h]
return torch.stack(b, dim=-1) | null |
155,811 | import copy
import random
import PIL
import cv2
import torch
import torchvision.transforms as T
import torchvision.transforms.functional as F
from PIL import Image, ImageDraw
import numpy as np
import os
def crop(image, target, region):
cropped_image = F.crop(image, *region)
target = target.copy()
i, j, h, w = region
# should we do something wrt the original size?
target["size"] = torch.tensor([h, w])
fields = ["labels", "area", "iscrowd"]
if 'obj_ids' in target:
fields.append('obj_ids')
if "boxes" in target:
boxes = target["boxes"]
max_size = torch.as_tensor([w, h], dtype=torch.float32)
cropped_boxes = boxes - torch.as_tensor([j, i, j, i])
cropped_boxes = torch.min(cropped_boxes.reshape(-1, 2, 2), max_size)
cropped_boxes = cropped_boxes.clamp(min=0)
area = (cropped_boxes[:, 1, :] - cropped_boxes[:, 0, :]).prod(dim=1)
target["boxes"] = cropped_boxes.reshape(-1, 4)
target["area"] = area
fields.append("boxes")
if "masks" in target:
# FIXME should we update the area here if there are no boxes?
target['masks'] = target['masks'][:, i:i + h, j:j + w]
fields.append("masks")
# remove elements for which the boxes or masks that have zero area
if "boxes" in target or "masks" in target:
# favor boxes selection when defining which elements to keep
# this is compatible with previous implementation
if "boxes" in target:
cropped_boxes = target['boxes'].reshape(-1, 2, 2)
keep = torch.all(cropped_boxes[:, 1, :] > cropped_boxes[:, 0, :], dim=1)
else:
keep = target['masks'].flatten(1).any(1)
for field in fields:
target[field] = target[field][keep]
return cropped_image, target
def crop_mot(image, target, region):
cropped_image = F.crop(image, *region)
target = target.copy()
i, j, h, w = region
# should we do something wrt the original size?
target["size"] = torch.tensor([h, w])
fields = ["labels", "iscrowd", "obj_ids", "scores"]
if "boxes" in target:
boxes = target["boxes"]
max_size = torch.as_tensor([w, h], dtype=torch.float32)
cropped_boxes = boxes - torch.as_tensor([j, i, j, i])
cropped_boxes = torch.min(cropped_boxes.reshape(-1, 2, 2), max_size)
cropped_boxes = cropped_boxes.clamp(min=0)
target["boxes"] = cropped_boxes.reshape(-1, 4)
fields.append("boxes")
if "masks" in target:
# FIXME should we update the area here if there are no boxes?
target['masks'] = target['masks'][:, i:i + h, j:j + w]
fields.append("masks")
# remove elements for which the boxes or masks that have zero area
if "boxes" in target or "masks" in target:
# favor boxes selection when defining which elements to keep
# this is compatible with previous implementation
if "boxes" in target:
cropped_boxes = target['boxes'].reshape(-1, 2, 2)
keep = torch.all(cropped_boxes[:, 1, :] > cropped_boxes[:, 0, :], dim=1)
else:
keep = target['masks'].flatten(1).any(1)
for field in fields:
n_size = len(target[field])
target[field] = target[field][keep[:n_size]]
return cropped_image, target | null |
155,812 | import copy
import random
import PIL
import cv2
import torch
import torchvision.transforms as T
import torchvision.transforms.functional as F
from PIL import Image, ImageDraw
import numpy as np
import os
def crop(image, target, region):
def resize(image, target, size, max_size=None):
def random_shift(image, target, region, sizes):
oh, ow = sizes
# step 1, shift crop and re-scale image firstly
cropped_image = F.crop(image, *region)
cropped_image = F.resize(cropped_image, sizes)
target = target.copy()
i, j, h, w = region
# should we do something wrt the original size?
target["size"] = torch.tensor([h, w])
fields = ["labels", "scores", "iscrowd", "obj_ids"]
if "boxes" in target:
boxes = target["boxes"]
cropped_boxes = boxes - torch.as_tensor([j, i, j, i])
cropped_boxes *= torch.as_tensor([ow / w, oh / h, ow / w, oh / h])
target["boxes"] = cropped_boxes.reshape(-1, 4)
fields.append("boxes")
if "masks" in target:
# FIXME should we update the area here if there are no boxes?
target['masks'] = target['masks'][:, i:i + h, j:j + w]
fields.append("masks")
# remove elements for which the boxes or masks that have zero area
if "boxes" in target or "masks" in target:
# favor boxes selection when defining which elements to keep
# this is compatible with previous implementation
if "boxes" in target:
cropped_boxes = target['boxes'].reshape(-1, 2, 2)
max_size = torch.as_tensor([w, h], dtype=torch.float32)
cropped_boxes = torch.min(cropped_boxes.reshape(-1, 2, 2), max_size)
cropped_boxes = cropped_boxes.clamp(min=0)
keep = torch.all(cropped_boxes[:, 1, :] > cropped_boxes[:, 0, :], dim=1)
else:
keep = target['masks'].flatten(1).any(1)
for field in fields:
n_size = len(target[field])
target[field] = target[field][keep[:n_size]]
return cropped_image, target | null |
155,813 | import copy
import random
import PIL
import cv2
import torch
import torchvision.transforms as T
import torchvision.transforms.functional as F
from PIL import Image, ImageDraw
import numpy as np
import os
def hflip(image, target):
flipped_image = F.hflip(image)
w, h = image.size
target = target.copy()
if "boxes" in target:
boxes = target["boxes"]
boxes = boxes[:, [2, 1, 0, 3]] * torch.as_tensor([-1, 1, -1, 1]) + torch.as_tensor([w, 0, w, 0])
target["boxes"] = boxes
if "masks" in target:
target['masks'] = target['masks'].flip(-1)
return flipped_image, target | null |
155,814 | import copy
import random
import PIL
import cv2
import torch
import torchvision.transforms as T
import torchvision.transforms.functional as F
from PIL import Image, ImageDraw
import numpy as np
import os
def pad(image, target, padding):
# assumes that we only pad on the bottom right corners
padded_image = F.pad(image, (0, 0, padding[0], padding[1]))
if target is None:
return padded_image, None
target = target.copy()
# should we do something wrt the original size?
target["size"] = torch.tensor(padded_image[::-1])
if "masks" in target:
target['masks'] = torch.nn.functional.pad(target['masks'], (0, padding[0], 0, padding[1]))
return padded_image, target | null |
155,815 | import json
import os
import logging
import torch
from PIL import Image
from fvcore.common.timer import Timer
from collections import defaultdict
from detectron2.data import DatasetCatalog, MetadataCatalog
def get_dancetrack_mot_instances_meta(dataset_name, seqmap):
def register_dancetrack_mot_instances(name, metadata, image_root):
_PREDEFINED_SPLITS_DANCETRACK_MOT = {
"dancetrack": {
"dancetrack_train": ("train/", "train_seqmap.txt"),
"dancetrack_val": ("val/", 'val_seqmap.txt'),
"dancetrack_test": ("test/", "test_seqmap.txt"),
},
}
def register_dancetrack_mot(root):
for dataset_name, splits_per_dataset in _PREDEFINED_SPLITS_DANCETRACK_MOT.items():
for key, (image_root, seqmap) in splits_per_dataset.items():
register_dancetrack_mot_instances(
key,
get_dancetrack_mot_instances_meta(key, os.path.join(root, seqmap)),
os.path.join(root, image_root),
) | null |
155,816 | import copy
from typing import Optional, List
import math
import torch
import torch.nn.functional as F
from torch import nn, Tensor
from torch.nn.init import xavier_uniform_, constant_, uniform_, normal_
from detrex.utils import inverse_sigmoid
from detrex.layers import MultiScaleDeformableAttention
def relu_dropout(x, p=0, inplace=False, training=False):
if not training or p == 0:
return x.clamp_(min=0) if inplace else x.clamp(min=0)
mask = (x < 0) | (torch.rand_like(x) > 1 - p)
return x.masked_fill_(mask, 0).div_(1 - p) if inplace else x.masked_fill(mask, 0).div(1 - p) | null |
155,817 | import copy
from typing import Optional, List
import math
import torch
import torch.nn.functional as F
from torch import nn, Tensor
from torch.nn.init import xavier_uniform_, constant_, uniform_, normal_
from detrex.utils import inverse_sigmoid
from detrex.layers import MultiScaleDeformableAttention
def pos2posemb(pos, num_pos_feats=64, temperature=10000):
scale = 2 * math.pi
pos = pos * scale
dim_t = torch.arange(num_pos_feats, dtype=torch.float32, device=pos.device)
dim_t = temperature ** (
2 * torch.div(dim_t, 2, rounding_mode="floor") / num_pos_feats
)
posemb = pos[..., None] / dim_t
posemb = torch.stack((posemb[..., 0::2].sin(), posemb[..., 1::2].cos()), dim=-1).flatten(-3)
return posemb | null |
155,818 | import copy
from typing import Optional, List
import math
import torch
import torch.nn.functional as F
from torch import nn, Tensor
from torch.nn.init import xavier_uniform_, constant_, uniform_, normal_
from detrex.utils import inverse_sigmoid
from detrex.layers import MultiScaleDeformableAttention
def _get_clones(module, N):
return nn.ModuleList([copy.deepcopy(module) for i in range(N)]) | null |
155,819 | import copy
from typing import Optional, List
import math
import torch
import torch.nn.functional as F
from torch import nn, Tensor
from torch.nn.init import xavier_uniform_, constant_, uniform_, normal_
from detrex.utils import inverse_sigmoid
from detrex.layers import MultiScaleDeformableAttention
The provided code snippet includes necessary dependencies for implementing the `_get_activation_fn` function. Write a Python function `def _get_activation_fn(activation)` to solve the following problem:
Return an activation function given a string
Here is the function:
def _get_activation_fn(activation):
"""Return an activation function given a string"""
if activation == "relu":
return nn.ReLU(True)
if activation == "gelu":
return F.gelu
if activation == "glu":
return F.glu
raise RuntimeError(F"activation should be relu/gelu, not {activation}.") | Return an activation function given a string |
155,820 | import math
import numpy as np
from typing import List, Optional
import torch
import torch.nn as nn
import torch.nn.functional as F
from copy import copy
from copy import deepcopy
from collections import defaultdict
from detrex.layers import MLP, box_cxcywh_to_xyxy, box_xyxy_to_cxcywh, generalized_box_iou
from detrex.utils import inverse_sigmoid
from detrex.modeling import SetCriterion
from detrex.modeling.criterion.criterion import sigmoid_focal_loss
from detectron2.structures import Boxes, ImageList, Instances
from detectron2.structures.boxes import matched_pairwise_iou
from detrex.utils import get_world_size, is_dist_avail_and_initialized
from projects.co_mot.util import checkpoint
from projects.co_mot.util.misc import (NestedTensor, nested_tensor_from_tensor_list, accuracy)
def _filter_predictions_with_area(predictions, area_threshold=100):
if "track_instances" in predictions:
preds = predictions["track_instances"]
wh = preds.boxes[:, 2:4] - preds.boxes[:, 0:2]
areas = wh[:, 0] * wh[:, 1]
keep_idxs = areas > area_threshold
predictions = copy(predictions) # don't modify the original
predictions["track_instances"] = preds[keep_idxs]
return predictions | null |
155,821 | import math
import numpy as np
from typing import List, Optional
import torch
import torch.nn as nn
import torch.nn.functional as F
from copy import copy
from copy import deepcopy
from collections import defaultdict
from detrex.layers import MLP, box_cxcywh_to_xyxy, box_xyxy_to_cxcywh, generalized_box_iou
from detrex.utils import inverse_sigmoid
from detrex.modeling import SetCriterion
from detrex.modeling.criterion.criterion import sigmoid_focal_loss
from detectron2.structures import Boxes, ImageList, Instances
from detectron2.structures.boxes import matched_pairwise_iou
from detrex.utils import get_world_size, is_dist_avail_and_initialized
from projects.co_mot.util import checkpoint
from projects.co_mot.util.misc import (NestedTensor, nested_tensor_from_tensor_list, accuracy)
def _filter_predictions_with_confidence(predictions, confidence_threshold=0.5):
if "track_instances" in predictions:
preds = predictions["track_instances"]
keep_idxs = preds.scores > confidence_threshold
predictions = copy(predictions) # don't modify the original
predictions["track_instances"] = preds[keep_idxs]
return predictions | null |
155,822 | import math
import numpy as np
from typing import List, Optional
import torch
import torch.nn as nn
import torch.nn.functional as F
from copy import copy
from copy import deepcopy
from collections import defaultdict
from detrex.layers import MLP, box_cxcywh_to_xyxy, box_xyxy_to_cxcywh, generalized_box_iou
from detrex.utils import inverse_sigmoid
from detrex.modeling import SetCriterion
from detrex.modeling.criterion.criterion import sigmoid_focal_loss
from detectron2.structures import Boxes, ImageList, Instances
from detectron2.structures.boxes import matched_pairwise_iou
from detrex.utils import get_world_size, is_dist_avail_and_initialized
from projects.co_mot.util import checkpoint
from projects.co_mot.util.misc import (NestedTensor, nested_tensor_from_tensor_list, accuracy)
def img():
image = np.ascontiguousarray(((img.tensors[0].permute(1,2,0).cpu()*torch.tensor([0.229, 0.224, 0.225])+torch.tensor([0.485, 0.456, 0.406]))*255).numpy().astype(np.uint8))
img_h, img_w, _ = image.shape
bboxes = track_instances.ref_pts.cpu().numpy().reshape(-1, 2, 2)
bboxes[..., 0] *= img_w
bboxes[..., 1] *= img_h
bboxes[:, 0] -= bboxes[:, 1]/2
bboxes[:, 1] += bboxes[:, 0]
import cv2
for i in range(68):
image_copy = image.copy()
for box in bboxes[5*i:5*(i+1)]:
cv2.rectangle(image_copy, pt1 = (int(box[0, 0]), int(box[0, 1])), pt2 =(int(box[1, 0]), int(box[1, 1])), color = (0, 0, 255), thickness = 2)
cv2.imwrite('tmp2/%d.jpg'%i, image_copy) | null |
155,823 | import math
import torch
from torch import nn
from detrex.layers import box_cxcywh_to_xyxy
from detectron2.structures import Boxes, Instances, pairwise_iou
def random_drop_tracks(track_instances: Instances, drop_probability: float) -> Instances:
if drop_probability > 0 and len(track_instances) > 0:
keep_idxes = torch.rand_like(track_instances.scores) > drop_probability
track_instances = track_instances[keep_idxes]
return track_instances | null |
155,824 | import math
import torch
from torch import nn
from detrex.layers import box_cxcywh_to_xyxy
from detectron2.structures import Boxes, Instances, pairwise_iou
def pos2posemb(pos, num_pos_feats=64, temperature=10000):
scale = 2 * math.pi
pos = pos * scale
dim_t = torch.arange(num_pos_feats, dtype=torch.float32, device=pos.device)
dim_t = temperature ** (
2 * torch.div(dim_t, 2, rounding_mode="floor") / num_pos_feats
)
posemb = pos[..., None] / dim_t
posemb = torch.stack((posemb[..., 0::2].sin(), posemb[..., 1::2].cos()), dim=-1).flatten(-3)
return posemb | null |
155,825 | import contextlib
import copy
import io
import itertools
import json
import logging
import numpy as np
import os
import pickle
from collections import OrderedDict
import pycocotools.mask as mask_util
import torch
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
from tabulate import tabulate
import detectron2.utils.comm as comm
from detectron2.config import CfgNode
from detectron2.data import MetadataCatalog
from detectron2.data.datasets.coco import convert_to_coco_json
from detectron2.structures import Boxes, BoxMode, pairwise_iou
from detectron2.utils.file_io import PathManager
from detectron2.utils.logger import create_small_table
from detectron2.evaluation.evaluator import DatasetEvaluator
def _run_mot_challenge(**argc_dict):
freeze_support()
# Command line interface:
default_eval_config = trackeval.Evaluator.get_default_eval_config()
default_eval_config['DISPLAY_LESS_PROGRESS'] = False
default_dataset_config = trackeval.datasets.MotChallenge2DBox.get_default_dataset_config()
default_metrics_config = {'METRICS': ['HOTA', 'CLEAR', 'Identity'], 'THRESHOLD': 0.5}
config = {**default_eval_config, **default_dataset_config, **default_metrics_config} # Merge default configs
for setting in config.keys():
if setting in argc_dict:
if type(config[setting]) == type(True):
x = argc_dict[setting]
elif type(config[setting]) == type(1):
x = int(argc_dict[setting])
elif type(argc_dict[setting]) == type(None):
x = None
elif setting == 'SEQ_INFO':
x = dict(zip(argc_dict[setting], [None]*len(argc_dict[setting])))
else:
x = argc_dict[setting]
config[setting] = x
eval_config = {k: v for k, v in config.items() if k in default_eval_config.keys()}
dataset_config = {k: v for k, v in config.items() if k in default_dataset_config.keys()}
metrics_config = {k: v for k, v in config.items() if k in default_metrics_config.keys()}
# Run code
evaluator = trackeval.Evaluator(eval_config)
dataset_list = [trackeval.datasets.MotChallenge2DBox(dataset_config)]
metrics_list = []
for metric in [trackeval.metrics.HOTA, trackeval.metrics.CLEAR, trackeval.metrics.Identity, trackeval.metrics.VACE]:
if metric.get_name() in metrics_config['METRICS']:
metrics_list.append(metric(metrics_config))
if len(metrics_list) == 0:
raise Exception('No metrics selected for evaluation')
return evaluator.evaluate(dataset_list, metrics_list) | null |
155,826 | from fvcore.common.param_scheduler import MultiStepParamScheduler
from detectron2.config import LazyCall as L
from detectron2.solver import WarmupParamScheduler
The provided code snippet includes necessary dependencies for implementing the `default_dancetrack_scheduler` function. Write a Python function `def default_dancetrack_scheduler(epochs=50, decay_epochs=40, warmup_epochs=0, max_iter_epoch=5225)` to solve the following problem:
Returns the config for a default multi-step LR scheduler such as "50epochs", commonly referred to in papers, where every 1x has the total length of 1440k training images (~12 COCO epochs). LR is decayed once at the end of training. Args: epochs (int): total training epochs. decay_epochs (int): lr decay steps. warmup_epochs (int): warmup epochs. Returns: DictConfig: configs that define the multiplier for LR during training
Here is the function:
def default_dancetrack_scheduler(epochs=50, decay_epochs=40, warmup_epochs=0, max_iter_epoch=5225):
"""
Returns the config for a default multi-step LR scheduler such as "50epochs",
commonly referred to in papers, where every 1x has the total length of 1440k
training images (~12 COCO epochs). LR is decayed once at the end of training.
Args:
epochs (int): total training epochs.
decay_epochs (int): lr decay steps.
warmup_epochs (int): warmup epochs.
Returns:
DictConfig: configs that define the multiplier for LR during training
"""
# total number of iterations assuming 8 batch size, using 41796/8=5225
total_steps_16bs = epochs * max_iter_epoch
decay_steps = decay_epochs * max_iter_epoch
warmup_steps = warmup_epochs * max_iter_epoch
scheduler = L(MultiStepParamScheduler)(
values=[1.0, 0.1],
milestones=[decay_steps, total_steps_16bs],
)
return L(WarmupParamScheduler)(
scheduler=scheduler,
warmup_length=warmup_steps / total_steps_16bs,
warmup_method="linear",
warmup_factor=0.001,
) | Returns the config for a default multi-step LR scheduler such as "50epochs", commonly referred to in papers, where every 1x has the total length of 1440k training images (~12 COCO epochs). LR is decayed once at the end of training. Args: epochs (int): total training epochs. decay_epochs (int): lr decay steps. warmup_epochs (int): warmup epochs. Returns: DictConfig: configs that define the multiplier for LR during training |
155,827 | import logging
import os
import sys
import time
import torch
from torch.nn.parallel import DataParallel, DistributedDataParallel
from detectron2.checkpoint import DetectionCheckpointer
from detectron2.config import LazyConfig, instantiate
from detectron2.engine import (
SimpleTrainer,
default_argument_parser,
default_setup,
default_writers,
hooks,
launch,
)
from detectron2.engine.defaults import create_ddp_model
from detectron2.evaluation import inference_on_dataset, print_csv_format
from detectron2.utils import comm
from projects.co_mot.util.misc import data_dict_to_cuda
logger = logging.getLogger("detrex")
def match_name_keywords(n, name_keywords):
out = False
for b in name_keywords:
if b in n:
out = True
break
return out
class Trainer(SimpleTrainer):
"""
We've combine Simple and AMP Trainer together.
"""
def __init__(
self,
model,
dataloader,
optimizer,
amp=False,
clip_grad_params=None,
grad_scaler=None,
):
super().__init__(model=model, data_loader=dataloader, optimizer=optimizer)
unsupported = "AMPTrainer does not support single-process multi-device training!"
if isinstance(model, DistributedDataParallel):
assert not (model.device_ids and len(model.device_ids) > 1), unsupported
assert not isinstance(model, DataParallel), unsupported
if amp:
if grad_scaler is None:
from torch.cuda.amp import GradScaler
grad_scaler = GradScaler()
self.grad_scaler = grad_scaler
# set True to use amp training
self.amp = amp
# gradient clip hyper-params
self.clip_grad_params = clip_grad_params
self.device = self.model.device
def run_step(self):
"""
Implement the standard training logic described above.
"""
assert self.model.training, "[Trainer] model was changed to eval mode!"
assert torch.cuda.is_available(), "[Trainer] CUDA is required for AMP training!"
from torch.cuda.amp import autocast
start = time.perf_counter()
"""
If you want to do something with the data, you can wrap the dataloader.
"""
data = next(self._data_loader_iter)
data_time = time.perf_counter() - start
"""
If you want to do something with the losses, you can wrap the model.
"""
data = data_dict_to_cuda(data, self.device)
loss_dict = self.model(data)
with autocast(enabled=self.amp):
if isinstance(loss_dict, torch.Tensor):
losses = loss_dict
loss_dict = {"total_loss": loss_dict}
else:
losses = sum(loss_dict.values())
"""
If you need to accumulate gradients or do something similar, you can
wrap the optimizer with your custom `zero_grad()` method.
"""
self.optimizer.zero_grad()
if self.amp:
self.grad_scaler.scale(losses).backward()
if self.clip_grad_params is not None:
self.grad_scaler.unscale_(self.optimizer)
self.clip_grads(self.model.parameters())
self.grad_scaler.step(self.optimizer)
self.grad_scaler.update()
else:
losses.backward()
if self.clip_grad_params is not None:
self.clip_grads(self.model.parameters())
self.optimizer.step()
self._write_metrics(loss_dict, data_time)
def clip_grads(self, params):
params = list(filter(lambda p: p.requires_grad and p.grad is not None, params))
if len(params) > 0:
return torch.nn.utils.clip_grad_norm_(
parameters=params,
**self.clip_grad_params,
)
def do_test(cfg, model):
if "evaluator" in cfg.dataloader:
ret = inference_on_dataset(
model, instantiate(cfg.dataloader.test), instantiate(cfg.dataloader.evaluator)
)
print_csv_format(ret)
return ret
The provided code snippet includes necessary dependencies for implementing the `do_train` function. Write a Python function `def do_train(args, cfg)` to solve the following problem:
Args: cfg: an object with the following attributes: model: instantiate to a module dataloader.{train,test}: instantiate to dataloaders dataloader.evaluator: instantiate to evaluator for test set optimizer: instantaite to an optimizer lr_multiplier: instantiate to a fvcore scheduler train: other misc config defined in `configs/common/train.py`, including: output_dir (str) init_checkpoint (str) amp.enabled (bool) max_iter (int) eval_period, log_period (int) device (str) checkpointer (dict) ddp (dict)
Here is the function:
def do_train(args, cfg):
"""
Args:
cfg: an object with the following attributes:
model: instantiate to a module
dataloader.{train,test}: instantiate to dataloaders
dataloader.evaluator: instantiate to evaluator for test set
optimizer: instantaite to an optimizer
lr_multiplier: instantiate to a fvcore scheduler
train: other misc config defined in `configs/common/train.py`, including:
output_dir (str)
init_checkpoint (str)
amp.enabled (bool)
max_iter (int)
eval_period, log_period (int)
device (str)
checkpointer (dict)
ddp (dict)
"""
model = instantiate(cfg.model)
logger = logging.getLogger("detectron2")
logger.info("Model:\n{}".format(model))
model.to(cfg.train.device)
n_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad)
logger.info('number of params: {}'.format(n_parameters))
# this is an hack of train_net
param_dicts = [
{
"params": [
p
for n, p in model.named_parameters()
if not match_name_keywords(n, cfg.train.lr_backbone_names)
and not match_name_keywords(n, cfg.train.lr_linear_proj_names)
and p.requires_grad
],
"lr": cfg.optimizer.lr,
},
{
"params": [
p
for n, p in model.named_parameters()
if match_name_keywords(n, cfg.train.lr_backbone_names) and p.requires_grad
],
"lr": cfg.optimizer.lr_backbone,
},
{
"params": [
p
for n, p in model.named_parameters()
if match_name_keywords(n, cfg.train.lr_linear_proj_names)
and p.requires_grad
],
"lr": cfg.optimizer.lr * cfg.optimizer.lr_linear_proj_mult,
},
]
if cfg.optimizer.sgd:
optim = torch.optim.SGD(param_dicts, lr=cfg.optimizer.lr, momentum=0.9,
weight_decay=cfg.optimizer.weight_decay)
else:
optim = torch.optim.AdamW(param_dicts, lr=cfg.optimizer.lr,
weight_decay=cfg.optimizer.weight_decay)
train_loader = instantiate(cfg.dataloader.train)
model = create_ddp_model(model, **cfg.train.ddp)
trainer = Trainer(
model=model,
dataloader=train_loader,
optimizer=optim,
amp=cfg.train.amp.enabled, # default False
clip_grad_params=cfg.train.clip_grad.params if cfg.train.clip_grad.enabled else None, # default False
)
checkpointer = DetectionCheckpointer(
model,
cfg.train.output_dir,
trainer=trainer,
)
trainer.register_hooks(
[
hooks.IterationTimer(),
hooks.LRScheduler(scheduler=instantiate(cfg.lr_multiplier)),
hooks.PeriodicCheckpointer(checkpointer, **cfg.train.checkpointer) if comm.is_main_process() else None,
hooks.EvalHook(cfg.train.eval_period, lambda: do_test(cfg, model)),
hooks.PeriodicWriter(
default_writers(cfg.train.output_dir, cfg.train.max_iter),
period=cfg.train.log_period,
) if comm.is_main_process() else None,
]
)
checkpointer.resume_or_load(cfg.train.init_checkpoint, resume=args.resume)
if args.resume and checkpointer.has_checkpoint():
# The checkpoint stores the training iteration that just finished, thus we start
# at the next iteration
start_iter = trainer.iter + 1
else:
start_iter = 0
trainer.train(start_iter, cfg.train.max_iter) | Args: cfg: an object with the following attributes: model: instantiate to a module dataloader.{train,test}: instantiate to dataloaders dataloader.evaluator: instantiate to evaluator for test set optimizer: instantaite to an optimizer lr_multiplier: instantiate to a fvcore scheduler train: other misc config defined in `configs/common/train.py`, including: output_dir (str) init_checkpoint (str) amp.enabled (bool) max_iter (int) eval_period, log_period (int) device (str) checkpointer (dict) ddp (dict) |
155,828 | import torch
def check_require_grad(t):
return isinstance(t, torch.Tensor) and t.requires_grad | null |
155,829 | import os
import subprocess
import time
from collections import OrderedDict, defaultdict, deque
import datetime
import pickle
from typing import Optional, List
import torch
import torch.nn as nn
import torch.distributed as dist
from torch import Tensor
from functools import partial
from detectron2.structures import Instances
import torchvision
def _max_by_axis(the_list):
# type: (List[List[int]]) -> List[int]
maxes = the_list[0]
for sublist in the_list[1:]:
for index, item in enumerate(sublist):
maxes[index] = max(maxes[index], item)
return maxes
class NestedTensor(object):
def __init__(self, tensors, mask: Optional[Tensor]):
self.tensors = tensors
self.mask = mask
def to(self, device, non_blocking=False):
# type: (Device) -> NestedTensor # noqa
cast_tensor = self.tensors.to(device, non_blocking=non_blocking)
mask = self.mask
if mask is not None:
assert mask is not None
cast_mask = mask.to(device, non_blocking=non_blocking)
else:
cast_mask = None
return NestedTensor(cast_tensor, cast_mask)
def record_stream(self, *args, **kwargs):
self.tensors.record_stream(*args, **kwargs)
if self.mask is not None:
self.mask.record_stream(*args, **kwargs)
def decompose(self):
return self.tensors, self.mask
def __repr__(self):
return str(self.tensors)
def nested_tensor_from_tensor_list(tensor_list: List[Tensor], size_divisibility: int = 0):
# TODO make this more general
if tensor_list[0].ndim == 3:
# TODO make it support different-sized images
max_size = _max_by_axis([list(img.shape) for img in tensor_list])
if size_divisibility > 0:
stride = size_divisibility
# the last two dims are H,W, both subject to divisibility requirement
max_size[-1] = (max_size[-1] + (stride - 1)) // stride * stride
max_size[-2] = (max_size[-2] + (stride - 1)) // stride * stride
# min_size = tuple(min(s) for s in zip(*[img.shape for img in tensor_list]))
batch_shape = [len(tensor_list)] + max_size
b, c, h, w = batch_shape
dtype = tensor_list[0].dtype
device = tensor_list[0].device
tensor = torch.zeros(batch_shape, dtype=dtype, device=device)
mask = torch.ones((b, h, w), dtype=torch.bool, device=device)
for img, pad_img, m in zip(tensor_list, tensor, mask):
pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img)
m[: img.shape[1], :img.shape[2]] = False
else:
raise ValueError('not supported')
return NestedTensor(tensor, mask) | null |
155,830 | import os
import subprocess
import time
from collections import OrderedDict, defaultdict, deque
import datetime
import pickle
from typing import Optional, List
import torch
import torch.nn as nn
import torch.distributed as dist
from torch import Tensor
from functools import partial
from detectron2.structures import Instances
import torchvision
The provided code snippet includes necessary dependencies for implementing the `accuracy` function. Write a Python function `def accuracy(output, target, topk=(1,))` to solve the following problem:
Computes the precision@k for the specified values of k
Here is the function:
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
if target.numel() == 0:
return [torch.zeros([], device=output.device)]
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
return res | Computes the precision@k for the specified values of k |
155,831 | import os
import subprocess
import time
from collections import OrderedDict, defaultdict, deque
import datetime
import pickle
from typing import Optional, List
import torch
import torch.nn as nn
import torch.distributed as dist
from torch import Tensor
from functools import partial
from detectron2.structures import Instances
import torchvision
def to_cuda(samples, targets, device):
samples = samples.to(device, non_blocking=True)
targets = [{k: v.to(device, non_blocking=True) for k, v in t.items()} for t in targets]
return samples, targets | null |
155,832 | import os
import subprocess
import time
from collections import OrderedDict, defaultdict, deque
import datetime
import pickle
from typing import Optional, List
import torch
import torch.nn as nn
import torch.distributed as dist
from torch import Tensor
from functools import partial
from detectron2.structures import Instances
import torchvision
def tensor_to_cuda(tensor: torch.Tensor, device):
return tensor.to(device)
def is_tensor_or_instances(data):
return isinstance(data, torch.Tensor) or isinstance(data, Instances)
def data_apply(data, check_func, apply_func):
if isinstance(data, dict):
for k in data.keys():
if check_func(data[k]):
data[k] = apply_func(data[k])
elif isinstance(data[k], dict) or isinstance(data[k], list):
data_apply(data[k], check_func, apply_func)
elif isinstance(data[k], int):
pass
else:
raise ValueError()
elif isinstance(data, list):
for i in range(len(data)):
if check_func(data[i]):
data[i] = apply_func(data[i])
elif isinstance(data[i], dict) or isinstance(data[i], list):
data_apply(data[i], check_func, apply_func)
elif isinstance(data[i], int):
pass
else:
raise ValueError("invalid type {}".format(type(data[i])))
elif isinstance(data, int):
pass
else:
raise ValueError("invalid type {}".format(type(data)))
return data
def data_dict_to_cuda(data_dict, device):
return data_apply(data_dict, is_tensor_or_instances, partial(tensor_to_cuda, device=device)) | null |
155,833 | import argparse
import numpy as np
import torch
def parse_args():
parser = argparse.ArgumentParser("detrex model converter")
parser.add_argument(
"--source_model", default="", type=str, help="Path or url to the DETR model to convert"
)
parser.add_argument(
"--output_model", default="", type=str, help="Path where to save the converted model"
)
return parser.parse_args() | null |
155,834 | import logging
import os
import sys
import time
import torch
from torch.nn.parallel import DataParallel, DistributedDataParallel
from detectron2.checkpoint import DetectionCheckpointer
from detectron2.config import LazyConfig, instantiate
from detectron2.engine import (
SimpleTrainer,
default_argument_parser,
default_setup,
default_writers,
hooks,
launch,
)
from detectron2.engine.defaults import create_ddp_model
from detectron2.evaluation import inference_on_dataset, print_csv_format
from detectron2.utils import comm
logger = logging.getLogger("detrex")
def match_name_keywords(n, name_keywords):
out = False
for b in name_keywords:
if b in n:
out = True
break
return out
class Trainer(SimpleTrainer):
"""
We've combine Simple and AMP Trainer together.
"""
def __init__(
self,
model,
dataloader,
optimizer,
amp=False,
clip_grad_params=None,
grad_scaler=None,
):
super().__init__(model=model, data_loader=dataloader, optimizer=optimizer)
unsupported = "AMPTrainer does not support single-process multi-device training!"
if isinstance(model, DistributedDataParallel):
assert not (model.device_ids and len(model.device_ids) > 1), unsupported
assert not isinstance(model, DataParallel), unsupported
if amp:
if grad_scaler is None:
from torch.cuda.amp import GradScaler
grad_scaler = GradScaler()
self.grad_scaler = grad_scaler
# set True to use amp training
self.amp = amp
# gradient clip hyper-params
self.clip_grad_params = clip_grad_params
def run_step(self):
"""
Implement the standard training logic described above.
"""
assert self.model.training, "[Trainer] model was changed to eval mode!"
assert torch.cuda.is_available(), "[Trainer] CUDA is required for AMP training!"
from torch.cuda.amp import autocast
start = time.perf_counter()
"""
If you want to do something with the data, you can wrap the dataloader.
"""
data = next(self._data_loader_iter)
data_time = time.perf_counter() - start
"""
If you want to do something with the losses, you can wrap the model.
"""
loss_dict = self.model(data)
with autocast(enabled=self.amp):
if isinstance(loss_dict, torch.Tensor):
losses = loss_dict
loss_dict = {"total_loss": loss_dict}
else:
losses = sum(loss_dict.values())
"""
If you need to accumulate gradients or do something similar, you can
wrap the optimizer with your custom `zero_grad()` method.
"""
self.optimizer.zero_grad()
if self.amp:
self.grad_scaler.scale(losses).backward()
if self.clip_grad_params is not None:
self.grad_scaler.unscale_(self.optimizer)
self.clip_grads(self.model.parameters())
self.grad_scaler.step(self.optimizer)
self.grad_scaler.update()
else:
losses.backward()
if self.clip_grad_params is not None:
self.clip_grads(self.model.parameters())
self.optimizer.step()
self._write_metrics(loss_dict, data_time)
def clip_grads(self, params):
params = list(filter(lambda p: p.requires_grad and p.grad is not None, params))
if len(params) > 0:
return torch.nn.utils.clip_grad_norm_(
parameters=params,
**self.clip_grad_params,
)
def do_test(cfg, model):
if "evaluator" in cfg.dataloader:
ret = inference_on_dataset(
model, instantiate(cfg.dataloader.test), instantiate(cfg.dataloader.evaluator)
)
print_csv_format(ret)
return ret
The provided code snippet includes necessary dependencies for implementing the `do_train` function. Write a Python function `def do_train(args, cfg)` to solve the following problem:
Args: cfg: an object with the following attributes: model: instantiate to a module dataloader.{train,test}: instantiate to dataloaders dataloader.evaluator: instantiate to evaluator for test set optimizer: instantaite to an optimizer lr_multiplier: instantiate to a fvcore scheduler train: other misc config defined in `configs/common/train.py`, including: output_dir (str) init_checkpoint (str) amp.enabled (bool) max_iter (int) eval_period, log_period (int) device (str) checkpointer (dict) ddp (dict)
Here is the function:
def do_train(args, cfg):
"""
Args:
cfg: an object with the following attributes:
model: instantiate to a module
dataloader.{train,test}: instantiate to dataloaders
dataloader.evaluator: instantiate to evaluator for test set
optimizer: instantaite to an optimizer
lr_multiplier: instantiate to a fvcore scheduler
train: other misc config defined in `configs/common/train.py`, including:
output_dir (str)
init_checkpoint (str)
amp.enabled (bool)
max_iter (int)
eval_period, log_period (int)
device (str)
checkpointer (dict)
ddp (dict)
"""
model = instantiate(cfg.model)
logger = logging.getLogger("detectron2")
logger.info("Model:\n{}".format(model))
model.to(cfg.train.device)
# this is an hack of train_net
param_dicts = [
{
"params": [
p
for n, p in model.named_parameters()
if not match_name_keywords(n, ["backbone"])
and not match_name_keywords(n, ["reference_points", "sampling_offsets"])
and p.requires_grad
],
"lr": 2e-4,
},
{
"params": [
p
for n, p in model.named_parameters()
if match_name_keywords(n, ["backbone"]) and p.requires_grad
],
"lr": 2e-5,
},
{
"params": [
p
for n, p in model.named_parameters()
if match_name_keywords(n, ["reference_points", "sampling_offsets"])
and p.requires_grad
],
"lr": 2e-5,
},
]
optim = torch.optim.AdamW(param_dicts, 2e-4, weight_decay=1e-4)
train_loader = instantiate(cfg.dataloader.train)
model = create_ddp_model(model, **cfg.train.ddp)
trainer = Trainer(
model=model,
dataloader=train_loader,
optimizer=optim,
amp=cfg.train.amp.enabled,
clip_grad_params=cfg.train.clip_grad.params if cfg.train.clip_grad.enabled else None,
)
checkpointer = DetectionCheckpointer(
model,
cfg.train.output_dir,
trainer=trainer,
)
trainer.register_hooks(
[
hooks.IterationTimer(),
hooks.LRScheduler(scheduler=instantiate(cfg.lr_multiplier)),
hooks.PeriodicCheckpointer(checkpointer, **cfg.train.checkpointer)
if comm.is_main_process()
else None,
hooks.EvalHook(cfg.train.eval_period, lambda: do_test(cfg, model)),
hooks.PeriodicWriter(
default_writers(cfg.train.output_dir, cfg.train.max_iter),
period=cfg.train.log_period,
)
if comm.is_main_process()
else None,
]
)
checkpointer.resume_or_load(cfg.train.init_checkpoint, resume=args.resume)
if args.resume and checkpointer.has_checkpoint():
# The checkpoint stores the training iteration that just finished, thus we start
# at the next iteration
start_iter = trainer.iter + 1
else:
start_iter = 0
trainer.train(start_iter, cfg.train.max_iter) | Args: cfg: an object with the following attributes: model: instantiate to a module dataloader.{train,test}: instantiate to dataloaders dataloader.evaluator: instantiate to evaluator for test set optimizer: instantaite to an optimizer lr_multiplier: instantiate to a fvcore scheduler train: other misc config defined in `configs/common/train.py`, including: output_dir (str) init_checkpoint (str) amp.enabled (bool) max_iter (int) eval_period, log_period (int) device (str) checkpointer (dict) ddp (dict) |
155,836 | import torch.nn.functional as F
import torch
import copy
import torch.nn as nn
from detrex.layers import (
FFN,
MLP,
BaseTransformerLayer,
MultiheadAttention,
MultiScaleDeformableAttention,
TransformerLayerSequence,
get_sine_pos_embed,
)
from detrex.utils import inverse_sigmoid
from .transformer_layer import Focus_DETR_BaseTransformerLayer
def _get_clones(module, N, layer_share=False):
if layer_share:
return nn.ModuleList([module for i in range(N)])
else:
return nn.ModuleList([copy.deepcopy(module) for i in range(N)]) | null |
155,837 | import math, random
import copy
from typing import Optional
from detrex.utils import inverse_sigmoid
from detrex.layers.box_ops import box_cxcywh_to_xyxy
import torch
from typing import List
import torch.nn.functional as F
from torch import nn, Tensor
The provided code snippet includes necessary dependencies for implementing the `coords_fmap2orig` function. Write a Python function `def coords_fmap2orig(feature, stride)` to solve the following problem:
transfor one fmap coords to orig coords Args featurn [batch_size,h,w,c] stride int Returns coords [n,2]
Here is the function:
def coords_fmap2orig(feature, stride):
'''
transfor one fmap coords to orig coords
Args
featurn [batch_size,h,w,c]
stride int
Returns
coords [n,2]
'''
h, w = feature.shape[1:3]
# print(h)
# print(w)
shifts_x = torch.arange(0, w * stride, stride, dtype=torch.float32)
shifts_y = torch.arange(0, h * stride, stride, dtype=torch.float32)
shift_y, shift_x = torch.meshgrid(shifts_y, shifts_x)
# print(shift_y.shape)
# print(shift_x.shape)
shift_x = torch.reshape(shift_x, [-1])
shift_y = torch.reshape(shift_y, [-1])
coords = torch.stack([shift_x, shift_y], -1) + stride // 2
return coords | transfor one fmap coords to orig coords Args featurn [batch_size,h,w,c] stride int Returns coords [n,2] |
155,838 | import math, random
import copy
from typing import Optional
from detrex.utils import inverse_sigmoid
from detrex.layers.box_ops import box_cxcywh_to_xyxy
import torch
from typing import List
import torch.nn.functional as F
from torch import nn, Tensor
def focal_loss_from_logits(preds, targets, gamma=2.0, alpha=0.25):
'''
Args:
preds: [n,class_num]
targets: [n,class_num]
'''
# print(preds.shape)
# print(targets.shape)
preds = preds.sigmoid()
pt = preds * targets + (1.0 - preds) * (1.0 - targets)
w = alpha * targets + (1.0 - alpha) * (1.0 - targets)
loss = -w * torch.pow((1.0 - pt), gamma) * pt.log()
return loss.sum()
The provided code snippet includes necessary dependencies for implementing the `compute_cls_loss` function. Write a Python function `def compute_cls_loss(preds, targets, mask)` to solve the following problem:
Args preds: list contains five level pred [batch_size,class_num,_h,_w] targets: [batch_size,sum(_h*_w),1] mask: [batch_size,sum(_h*_w)]
Here is the function:
def compute_cls_loss(preds, targets, mask):
'''
Args
preds: list contains five level pred [batch_size,class_num,_h,_w]
targets: [batch_size,sum(_h*_w),1]
mask: [batch_size,sum(_h*_w)]
'''
batch_size = targets.shape[0]
class_num = preds[0].shape[1]
mask = mask.unsqueeze(dim=-1)
# mask=targets>-1#[batch_size,sum(_h*_w),1]
num_pos = torch.sum(mask, dim=[1, 2]).clamp_(min=1).float() # [batch_size,]
assert preds.shape[:2] == targets.shape[:2]
loss = []
for batch_index in range(batch_size):
pred_pos = preds[batch_index] # [sum(_h*_w),class_num]
target_pos = targets[batch_index] # [sum(_h*_w),1]
target_pos = (torch.arange(1, class_num + 1, device=target_pos.device)[None,
:] == target_pos).float() # sparse-->onehot
loss.append(focal_loss_from_logits(pred_pos, target_pos).view(1))
return torch.cat(loss, dim=0) / num_pos # [batch_size,] | Args preds: list contains five level pred [batch_size,class_num,_h,_w] targets: [batch_size,sum(_h*_w),1] mask: [batch_size,sum(_h*_w)] |
155,840 | import torch
import torch.nn as nn
from detectron2.modeling.matcher import Matcher
from detectron2.modeling.sampling import subsample_labels
from detrex.layers.box_ops import box_iou, box_cxcywh_to_xyxy
def sample_topk_per_gt(pr_inds, gt_inds, iou, k):
if len(gt_inds) == 0:
return pr_inds, gt_inds
# find topk matches for each gt
gt_inds2, counts = gt_inds.unique(return_counts=True)
scores, pr_inds2 = iou[gt_inds2].topk(k, dim=1)
gt_inds2 = gt_inds2[:,None].repeat(1, k)
# filter to as many matches that gt has
pr_inds3 = torch.cat([pr[:c] for c, pr in zip(counts, pr_inds2)])
gt_inds3 = torch.cat([gt[:c] for c, gt in zip(counts, gt_inds2)])
return pr_inds3, gt_inds3 | null |
155,841 | from fvcore.common.param_scheduler import MultiStepParamScheduler
from detectron2.config import LazyCall as L
from detectron2.solver import WarmupParamScheduler
The provided code snippet includes necessary dependencies for implementing the `default_coco_scheduler` function. Write a Python function `def default_coco_scheduler(epochs=50, decay_epochs=40, warmup_epochs=0)` to solve the following problem:
Returns the config for a default multi-step LR scheduler such as "50epochs", commonly referred to in papers, where every 1x has the total length of 1440k training images (~12 COCO epochs). LR is decayed once at the end of training. Args: epochs (int): total training epochs. decay_epochs (int): lr decay steps. warmup_epochs (int): warmup epochs. Returns: DictConfig: configs that define the multiplier for LR during training
Here is the function:
def default_coco_scheduler(epochs=50, decay_epochs=40, warmup_epochs=0):
"""
Returns the config for a default multi-step LR scheduler such as "50epochs",
commonly referred to in papers, where every 1x has the total length of 1440k
training images (~12 COCO epochs). LR is decayed once at the end of training.
Args:
epochs (int): total training epochs.
decay_epochs (int): lr decay steps.
warmup_epochs (int): warmup epochs.
Returns:
DictConfig: configs that define the multiplier for LR during training
"""
# total number of iterations assuming 16 batch size, using 1440000/16=90000
total_steps_16bs = epochs * 7500
decay_steps = decay_epochs * 7500
warmup_steps = warmup_epochs * 7500
scheduler = L(MultiStepParamScheduler)(
values=[1.0, 0.1],
milestones=[decay_steps, total_steps_16bs],
)
return L(WarmupParamScheduler)(
scheduler=scheduler,
warmup_length=warmup_steps / total_steps_16bs,
warmup_method="linear",
warmup_factor=0.001,
) | Returns the config for a default multi-step LR scheduler such as "50epochs", commonly referred to in papers, where every 1x has the total length of 1440k training images (~12 COCO epochs). LR is decayed once at the end of training. Args: epochs (int): total training epochs. decay_epochs (int): lr decay steps. warmup_epochs (int): warmup epochs. Returns: DictConfig: configs that define the multiplier for LR during training |
155,844 | import argparse
import numpy as np
import torch
def parse_args():
parser = argparse.ArgumentParser("detrex deformable-detr model converter")
parser.add_argument(
"--source_model", default="", type=str, help="Path or url to the DETR model to convert"
)
parser.add_argument(
"--output_model", default="", type=str, help="Path where to save the converted model"
)
return parser.parse_args() | null |
155,847 | import math
import torch
def pos2posemb2d(pos, num_pos_feats=128, temperature=10000):
scale = 2 * math.pi
pos = pos * scale
dim_t = torch.arange(num_pos_feats, dtype=torch.float32, device=pos.device)
dim_t = temperature ** (
2 * torch.div(dim_t, 2, rounding_mode="floor") / num_pos_feats
)
pos_x = pos[..., 0, None] / dim_t
pos_y = pos[..., 1, None] / dim_t
pos_x = torch.stack((pos_x[..., 0::2].sin(), pos_x[..., 1::2].cos()), dim=-1).flatten(-2)
pos_y = torch.stack((pos_y[..., 0::2].sin(), pos_y[..., 1::2].cos()), dim=-1).flatten(-2)
posemb = torch.cat((pos_y, pos_x), dim=-1)
return posemb | null |
155,848 | import math
import torch
def pos2posemb1d(pos, num_pos_feats=256, temperature=10000):
scale = 2 * math.pi
pos = pos * scale
dim_t = torch.arange(num_pos_feats, dtype=torch.float32, device=pos.device)
dim_t = temperature ** (
2 * torch.div(dim_t, 2, rounding_mode="floor") / num_pos_feats
)
pos_x = pos[..., None] / dim_t
posemb = torch.stack((pos_x[..., 0::2].sin(), pos_x[..., 1::2].cos()), dim=-1).flatten(-2)
return posemb | null |
155,849 | import math
import torch
def mask2pos(mask):
not_mask = ~mask
y_embed = not_mask[:, :, 0].cumsum(1, dtype=torch.float32)
x_embed = not_mask[:, 0, :].cumsum(1, dtype=torch.float32)
y_embed = (y_embed - 0.5) / y_embed[:, -1:]
x_embed = (x_embed - 0.5) / x_embed[:, -1:]
return y_embed, x_embed | null |
155,850 | from typing import Optional, Tuple
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
from torch.nn.parameter import Parameter
from torch.nn.modules import Module
The provided code snippet includes necessary dependencies for implementing the `multi_head_rcda_forward` function. Write a Python function `def multi_head_rcda_forward(query_row: torch.Tensor, query_col: torch.Tensor, key_row: torch.Tensor, key_col: torch.Tensor, value: torch.Tensor, embed_dim_to_check: int, num_heads: int, in_proj_weight: torch.Tensor, in_proj_bias: torch.Tensor, bias_k_row: Optional[torch.Tensor], bias_k_col: Optional[torch.Tensor], bias_v: Optional[torch.Tensor], add_zero_attn: bool, # type: bool dropout_p: float, # type: float out_proj_weight: torch.Tensor, # type: Tensor out_proj_bias: torch.Tensor, # type: Tensor training: bool=True, key_padding_mask: Optional[torch.Tensor] =None, need_weights: bool=True, # type: bool attn_mask: Optional[torch.Tensor]=None, use_separate_proj_weight: bool=False, q_row_proj_weight: Optional[torch.Tensor]=None, q_col_proj_weight: Optional[torch.Tensor]=None, k_row_proj_weight: Optional[torch.Tensor]=None, k_col_proj_weight: Optional[torch.Tensor]=None, v_proj_weight: Optional[torch.Tensor]=None, static_k: Optional[torch.Tensor]=None, static_v: Optional[torch.Tensor]=None ) -> Tuple[torch.Tensor, Optional[torch.Tensor]]` to solve the following problem:
r""" Args: query_row, query_col, key_row, key_col, value: map a query and a set of key-value pairs to an output. See "Anchor DETR: Query Design for Transformer-Based Detector" for more details. embed_dim_to_check: total dimension of the model. num_heads: parallel attention heads. in_proj_weight, in_proj_bias: input projection weight and bias. bias_k, bias_v: bias of the key and value sequences to be added at dim=0. add_zero_attn: add a new batch of zeros to the key and value sequences at dim=1. dropout_p: probability of an element to be zeroed. out_proj_weight, out_proj_bias: the output projection weight and bias. training: apply dropout if is ``True``. key_padding_mask: if provided, specified padding elements in the key will be ignored by the attention. This is an binary mask. When the value is True, the corresponding value on the attention layer will be filled with -inf. need_weights: output attn_output_weights. attn_mask: 2D or 3D mask that prevents attention to certain positions. This is an additive mask (i.e. the values will be added to the attention layer). A 2D mask will be broadcasted for all the batches while a 3D mask allows to specify a different mask for the entries of each batch. use_separate_proj_weight: the function accept the proj. weights for query, key, and value in different forms. If false, in_proj_weight will be used, which is a combination of q_row_proj_weight, q_col_proj_weight, k_row_proj_weight, k_col_proj_weight, v_proj_weight. q_row_proj_weight, q_col_proj_weight, k_row_proj_weight, k_col_proj_weight, v_proj_weight, in_proj_bias: input projection weight and bias. static_k, static_v: static key and value used for attention operators. Shape: Inputs: - query_row: :math:`(N, L, E)` where L is the target sequence length, N is the batch size, E is the embedding dimension. - query_col: :math:`(N, L, E)` where L is the target sequence length, N is the batch size, E is the embedding dimension. - key_row: :math:`(N, H, W, E)`, where W is the source sequence row length, N is the batch size, E is the embedding dimension. - key_col: :math:`(N, H, W, E)`, where H is the source sequence column length, N is the batch size, E is the embedding dimension. - value: :math:`(N, H, W, E)` where HW is the source sequence length, N is the batch size, E is the embedding dimension. - key_padding_mask: :math:`(N, H, W)`, ByteTensor, where N is the batch size, HW is the source sequence length. - attn_mask: Not Implemented - static_k: Not Implemented - static_v: Not Implemented Outputs: - attn_output: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, E is the embedding dimension. - attn_output_weights: :math:`(N, L, HW)` where N is the batch size, L is the target sequence length, HW is the source sequence length.
Here is the function:
def multi_head_rcda_forward(query_row: torch.Tensor,
query_col: torch.Tensor,
key_row: torch.Tensor,
key_col: torch.Tensor,
value: torch.Tensor,
embed_dim_to_check: int,
num_heads: int,
in_proj_weight: torch.Tensor,
in_proj_bias: torch.Tensor,
bias_k_row: Optional[torch.Tensor],
bias_k_col: Optional[torch.Tensor],
bias_v: Optional[torch.Tensor],
add_zero_attn: bool, # type: bool
dropout_p: float, # type: float
out_proj_weight: torch.Tensor, # type: Tensor
out_proj_bias: torch.Tensor, # type: Tensor
training: bool=True,
key_padding_mask: Optional[torch.Tensor] =None,
need_weights: bool=True, # type: bool
attn_mask: Optional[torch.Tensor]=None,
use_separate_proj_weight: bool=False,
q_row_proj_weight: Optional[torch.Tensor]=None,
q_col_proj_weight: Optional[torch.Tensor]=None,
k_row_proj_weight: Optional[torch.Tensor]=None,
k_col_proj_weight: Optional[torch.Tensor]=None,
v_proj_weight: Optional[torch.Tensor]=None,
static_k: Optional[torch.Tensor]=None,
static_v: Optional[torch.Tensor]=None
) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:
r"""
Args:
query_row, query_col, key_row, key_col, value: map a query and a set of key-value pairs to an output.
See "Anchor DETR: Query Design for Transformer-Based Detector" for more details.
embed_dim_to_check: total dimension of the model.
num_heads: parallel attention heads.
in_proj_weight, in_proj_bias: input projection weight and bias.
bias_k, bias_v: bias of the key and value sequences to be added at dim=0.
add_zero_attn: add a new batch of zeros to the key and
value sequences at dim=1.
dropout_p: probability of an element to be zeroed.
out_proj_weight, out_proj_bias: the output projection weight and bias.
training: apply dropout if is ``True``.
key_padding_mask: if provided, specified padding elements in the key will
be ignored by the attention. This is an binary mask. When the value is True,
the corresponding value on the attention layer will be filled with -inf.
need_weights: output attn_output_weights.
attn_mask: 2D or 3D mask that prevents attention to certain positions. This is an additive mask
(i.e. the values will be added to the attention layer). A 2D mask will be broadcasted for all
the batches while a 3D mask allows to specify a different mask for the entries of each batch.
use_separate_proj_weight: the function accept the proj. weights for query, key,
and value in different forms. If false, in_proj_weight will be used, which is
a combination of q_row_proj_weight, q_col_proj_weight, k_row_proj_weight, k_col_proj_weight, v_proj_weight.
q_row_proj_weight, q_col_proj_weight, k_row_proj_weight, k_col_proj_weight, v_proj_weight, in_proj_bias: input projection weight and bias.
static_k, static_v: static key and value used for attention operators.
Shape:
Inputs:
- query_row: :math:`(N, L, E)` where L is the target sequence length, N is the batch size, E is
the embedding dimension.
- query_col: :math:`(N, L, E)` where L is the target sequence length, N is the batch size, E is
the embedding dimension.
- key_row: :math:`(N, H, W, E)`, where W is the source sequence row length, N is the batch size, E is
the embedding dimension.
- key_col: :math:`(N, H, W, E)`, where H is the source sequence column length, N is the batch size, E is
the embedding dimension.
- value: :math:`(N, H, W, E)` where HW is the source sequence length, N is the batch size, E is
the embedding dimension.
- key_padding_mask: :math:`(N, H, W)`, ByteTensor, where N is the batch size, HW is the source sequence length.
- attn_mask: Not Implemented
- static_k: Not Implemented
- static_v: Not Implemented
Outputs:
- attn_output: :math:`(L, N, E)` where L is the target sequence length, N is the batch size,
E is the embedding dimension.
- attn_output_weights: :math:`(N, L, HW)` where N is the batch size,
L is the target sequence length, HW is the source sequence length.
"""
bsz, tgt_len, embed_dim = query_row.size()
src_len_row = key_row.size()[2]
src_len_col = key_col.size()[1]
assert embed_dim == embed_dim_to_check
# assert key.size() == value.size()
head_dim = embed_dim // num_heads
assert head_dim * num_heads == embed_dim, "embed_dim must be divisible by num_heads"
scaling = float(head_dim) ** -0.5
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = 0
_end = embed_dim
_w = in_proj_weight[_start:_end, :]
if _b is not None:
_b = _b[_start:_end]
q_row = F.linear(query_row, _w, _b)
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = embed_dim * 1
_end = embed_dim * 2
_w = in_proj_weight[_start:_end, :]
if _b is not None:
_b = _b[_start:_end]
q_col = F.linear(query_col, _w, _b)
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = embed_dim * 2
_end = embed_dim * 3
_w = in_proj_weight[_start:_end, :]
if _b is not None:
_b = _b[_start:_end]
k_row = F.linear(key_row, _w, _b)
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = embed_dim * 3
_end = embed_dim * 4
_w = in_proj_weight[_start:_end, :]
if _b is not None:
_b = _b[_start:_end]
k_col = F.linear(key_col, _w, _b)
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = embed_dim * 4
_end = None
_w = in_proj_weight[_start:, :]
if _b is not None:
_b = _b[_start:]
v = F.linear(value, _w, _b)
q_row = q_row.transpose(0, 1)
q_col = q_col.transpose(0, 1)
k_row = k_row.mean(1).transpose(0, 1)
k_col = k_col.mean(2).transpose(0, 1)
q_row = q_row * scaling
q_col = q_col * scaling
q_row = q_row.contiguous().view(tgt_len, bsz * num_heads, head_dim).transpose(0, 1)
q_col = q_col.contiguous().view(tgt_len, bsz * num_heads, head_dim).transpose(0, 1)
if k_row is not None:
k_row = k_row.contiguous().view(-1, bsz * num_heads, head_dim).transpose(0, 1)
if k_col is not None:
k_col = k_col.contiguous().view(-1, bsz * num_heads, head_dim).transpose(0, 1)
if v is not None:
v = v.contiguous().permute(1,2,0,3).reshape(src_len_col,src_len_row, bsz*num_heads, head_dim).permute(2,0,1,3)
attn_output_weights_row = torch.bmm(q_row, k_row.transpose(1, 2))
attn_output_weights_col = torch.bmm(q_col, k_col.transpose(1, 2))
assert list(attn_output_weights_row.size()) == [bsz * num_heads, tgt_len, src_len_row]
assert list(attn_output_weights_col.size()) == [bsz * num_heads, tgt_len, src_len_col]
if key_padding_mask is not None:
mask_row=key_padding_mask[:,0,:].unsqueeze(1).unsqueeze(2)
mask_col=key_padding_mask[:,:,0].unsqueeze(1).unsqueeze(2)
attn_output_weights_row = attn_output_weights_row.view(bsz, num_heads, tgt_len, src_len_row)
attn_output_weights_col = attn_output_weights_col.view(bsz, num_heads, tgt_len, src_len_col)
attn_output_weights_row = attn_output_weights_row.masked_fill(mask_row,float('-inf'))
attn_output_weights_col = attn_output_weights_col.masked_fill(mask_col, float('-inf'))
attn_output_weights_row = attn_output_weights_row.view(bsz * num_heads, tgt_len, src_len_row)
attn_output_weights_col = attn_output_weights_col.view(bsz * num_heads, tgt_len, src_len_col)
attn_output_weights_col = F.softmax(attn_output_weights_col, dim=-1)
attn_output_weights_row = F.softmax(attn_output_weights_row, dim=-1)
attn_output_weights_col = F.dropout(attn_output_weights_col, p=dropout_p, training=training)
attn_output_weights_row = F.dropout(attn_output_weights_row, p=dropout_p, training=training)
efficient_compute=True
# This config will not affect the performance.
# It will compute the short edge first which can save the memory and run slightly faster but both of them should get the same results.
# You can also set it "False" if your graph needs to be always the same.
if efficient_compute:
if src_len_col<src_len_row:
b_ein,q_ein,w_ein = attn_output_weights_row.shape
b_ein,h_ein,w_ein,c_ein = v.shape
attn_output_row = torch.matmul(attn_output_weights_row,v.permute(0,2,1,3).reshape(b_ein,w_ein,h_ein*c_ein)).reshape(b_ein,q_ein,h_ein,c_ein).permute(0,2,1,3)
attn_output = torch.matmul(attn_output_weights_col.permute(1,0,2)[:,:,None,:],attn_output_row.permute(2,0,1,3)).squeeze(-2).reshape(tgt_len,bsz,embed_dim)
### the following code base on einsum get the same results
# attn_output_row = torch.einsum("bqw,bhwc->bhqc",attn_output_weights_row,v)
# attn_output = torch.einsum("bqh,bhqc->qbc",attn_output_weights_col,attn_output_row).reshape(tgt_len,bsz,embed_dim)
else:
b_ein,q_ein,h_ein=attn_output_weights_col.shape
b_ein,h_ein,w_ein,c_ein = v.shape
attn_output_col = torch.matmul(attn_output_weights_col,v.reshape(b_ein,h_ein,w_ein*c_ein)).reshape(b_ein,q_ein,w_ein,c_ein)
attn_output = torch.matmul(attn_output_weights_row[:,:,None,:],attn_output_col).squeeze(-2).permute(1,0,2).reshape(tgt_len, bsz, embed_dim)
### the following code base on einsum get the same results
# attn_output_col = torch.einsum("bqh,bhwc->bqwc", attn_output_weights_col, v)
# attn_output = torch.einsum("bqw,bqwc->qbc", attn_output_weights_row, attn_output_col).reshape(tgt_len, bsz,embed_dim)
else:
b_ein, q_ein, h_ein = attn_output_weights_col.shape
b_ein, h_ein, w_ein, c_ein = v.shape
attn_output_col = torch.matmul(attn_output_weights_col, v.reshape(b_ein, h_ein, w_ein * c_ein)).reshape(b_ein, q_ein, w_ein, c_ein)
attn_output = torch.matmul(attn_output_weights_row[:, :, None, :], attn_output_col).squeeze(-2).permute(1, 0, 2).reshape(tgt_len, bsz, embed_dim)
### the following code base on einsum get the same results
# attn_output_col = torch.einsum("bqh,bhwc->bqwc", attn_output_weights_col, v)
# attn_output = torch.einsum("bqw,bqwc->qbc", attn_output_weights_row, attn_output_col).reshape(tgt_len, bsz,embed_dim)
attn_output = F.linear(attn_output, out_proj_weight, out_proj_bias)
if need_weights:
return attn_output,torch.einsum("bqw,bqh->qbhw",attn_output_weights_row,attn_output_weights_col).reshape(tgt_len,bsz,num_heads,src_len_col,src_len_row).mean(2)
else:
return attn_output, None | r""" Args: query_row, query_col, key_row, key_col, value: map a query and a set of key-value pairs to an output. See "Anchor DETR: Query Design for Transformer-Based Detector" for more details. embed_dim_to_check: total dimension of the model. num_heads: parallel attention heads. in_proj_weight, in_proj_bias: input projection weight and bias. bias_k, bias_v: bias of the key and value sequences to be added at dim=0. add_zero_attn: add a new batch of zeros to the key and value sequences at dim=1. dropout_p: probability of an element to be zeroed. out_proj_weight, out_proj_bias: the output projection weight and bias. training: apply dropout if is ``True``. key_padding_mask: if provided, specified padding elements in the key will be ignored by the attention. This is an binary mask. When the value is True, the corresponding value on the attention layer will be filled with -inf. need_weights: output attn_output_weights. attn_mask: 2D or 3D mask that prevents attention to certain positions. This is an additive mask (i.e. the values will be added to the attention layer). A 2D mask will be broadcasted for all the batches while a 3D mask allows to specify a different mask for the entries of each batch. use_separate_proj_weight: the function accept the proj. weights for query, key, and value in different forms. If false, in_proj_weight will be used, which is a combination of q_row_proj_weight, q_col_proj_weight, k_row_proj_weight, k_col_proj_weight, v_proj_weight. q_row_proj_weight, q_col_proj_weight, k_row_proj_weight, k_col_proj_weight, v_proj_weight, in_proj_bias: input projection weight and bias. static_k, static_v: static key and value used for attention operators. Shape: Inputs: - query_row: :math:`(N, L, E)` where L is the target sequence length, N is the batch size, E is the embedding dimension. - query_col: :math:`(N, L, E)` where L is the target sequence length, N is the batch size, E is the embedding dimension. - key_row: :math:`(N, H, W, E)`, where W is the source sequence row length, N is the batch size, E is the embedding dimension. - key_col: :math:`(N, H, W, E)`, where H is the source sequence column length, N is the batch size, E is the embedding dimension. - value: :math:`(N, H, W, E)` where HW is the source sequence length, N is the batch size, E is the embedding dimension. - key_padding_mask: :math:`(N, H, W)`, ByteTensor, where N is the batch size, HW is the source sequence length. - attn_mask: Not Implemented - static_k: Not Implemented - static_v: Not Implemented Outputs: - attn_output: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, E is the embedding dimension. - attn_output_weights: :math:`(N, L, HW)` where N is the batch size, L is the target sequence length, HW is the source sequence length. |
155,851 | import copy
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from detrex.utils import inverse_sigmoid
from .row_column_decoupled_attention import MultiheadRCDA
from .utils import pos2posemb1d, pos2posemb2d, mask2pos
def _get_clones(module, N):
return nn.ModuleList([copy.deepcopy(module) for i in range(N)]) | null |
155,852 | import copy
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from detrex.utils import inverse_sigmoid
from .row_column_decoupled_attention import MultiheadRCDA
from .utils import pos2posemb1d, pos2posemb2d, mask2pos
The provided code snippet includes necessary dependencies for implementing the `_get_activation_fn` function. Write a Python function `def _get_activation_fn(activation)` to solve the following problem:
Return an activation function given a string
Here is the function:
def _get_activation_fn(activation):
"""Return an activation function given a string"""
if activation == "relu":
return F.relu
if activation == "gelu":
return F.gelu
if activation == "glu":
return F.glu
raise RuntimeError(F"activation should be relu/gelu, not {activation}.") | Return an activation function given a string |
155,853 | from typing import List
import torch
import torch.nn as nn
import torch.nn.functional as F
from detrex.layers import box_cxcywh_to_xyxy, generalized_box_iou
from detrex.utils import get_world_size, is_dist_avail_and_initialized
The provided code snippet includes necessary dependencies for implementing the `sigmoid_focal_loss` function. Write a Python function `def sigmoid_focal_loss(inputs, targets, num_boxes, alpha: float = 0.25, gamma: float = 2)` to solve the following problem:
Loss used in RetinaNet for dense detection: https://arxiv.org/abs/1708.02002. Args: inputs (torch.Tensor): A float tensor of arbitrary shape. The predictions for each example. targets (torch.Tensor): A float tensor with the same shape as inputs. Stores the binary classification label for each element in inputs (0 for the negative class and 1 for the positive class). num_boxes (int): The number of boxes. alpha (float, optional): Weighting factor in range (0, 1) to balance positive vs negative examples. Default: 0.25. gamma (float): Exponent of the modulating factor (1 - p_t) to balance easy vs hard examples. Default: 2. Returns: torch.Tensor: The computed sigmoid focal loss.
Here is the function:
def sigmoid_focal_loss(inputs, targets, num_boxes, alpha: float = 0.25, gamma: float = 2):
"""
Loss used in RetinaNet for dense detection: https://arxiv.org/abs/1708.02002.
Args:
inputs (torch.Tensor): A float tensor of arbitrary shape.
The predictions for each example.
targets (torch.Tensor): A float tensor with the same shape as inputs. Stores the binary
classification label for each element in inputs
(0 for the negative class and 1 for the positive class).
num_boxes (int): The number of boxes.
alpha (float, optional): Weighting factor in range (0, 1) to balance
positive vs negative examples. Default: 0.25.
gamma (float): Exponent of the modulating factor (1 - p_t) to
balance easy vs hard examples. Default: 2.
Returns:
torch.Tensor: The computed sigmoid focal loss.
"""
prob = inputs.sigmoid()
ce_loss = F.binary_cross_entropy_with_logits(inputs, targets, reduction="none")
p_t = prob * targets + (1 - prob) * (1 - targets)
loss = ce_loss * ((1 - p_t) ** gamma)
if alpha >= 0:
alpha_t = alpha * targets + (1 - alpha) * (1 - targets)
loss = alpha_t * loss
return loss.mean(1).sum() / num_boxes | Loss used in RetinaNet for dense detection: https://arxiv.org/abs/1708.02002. Args: inputs (torch.Tensor): A float tensor of arbitrary shape. The predictions for each example. targets (torch.Tensor): A float tensor with the same shape as inputs. Stores the binary classification label for each element in inputs (0 for the negative class and 1 for the positive class). num_boxes (int): The number of boxes. alpha (float, optional): Weighting factor in range (0, 1) to balance positive vs negative examples. Default: 0.25. gamma (float): Exponent of the modulating factor (1 - p_t) to balance easy vs hard examples. Default: 2. Returns: torch.Tensor: The computed sigmoid focal loss. |
155,857 | from fvcore.common.param_scheduler import (
MultiStepParamScheduler,
StepParamScheduler,
StepWithFixedGammaParamScheduler,
ConstantParamScheduler,
CosineParamScheduler,
LinearParamScheduler,
ExponentialParamScheduler,
)
from detectron2.config import LazyCall as L
from detectron2.solver import WarmupParamScheduler
def multistep_lr_scheduler(
values=[1.0, 0.1],
warmup_steps=0,
num_updates=90000,
milestones=[82500],
warmup_method="linear",
warmup_factor=0.001,
):
# total steps default to num_updates, if None, will use milestones[-1].
if num_updates is None:
total_steps = milestones[-1]
else:
total_steps = num_updates
# define multi-step scheduler
scheduler = L(MultiStepParamScheduler)(
values=values,
milestones=milestones,
num_updates=num_updates,
)
# wrap with warmup scheduler
return L(WarmupParamScheduler)(
scheduler=scheduler,
warmup_length=warmup_steps / total_steps,
warmup_method=warmup_method,
warmup_factor=warmup_factor,
) | null |
155,858 | from fvcore.common.param_scheduler import (
MultiStepParamScheduler,
StepParamScheduler,
StepWithFixedGammaParamScheduler,
ConstantParamScheduler,
CosineParamScheduler,
LinearParamScheduler,
ExponentialParamScheduler,
)
from detectron2.config import LazyCall as L
from detectron2.solver import WarmupParamScheduler
def step_lr_scheduler(
values,
warmup_steps,
num_updates,
warmup_method="linear",
warmup_factor=0.001,
):
# define step scheduler
scheduler = L(StepParamScheduler)(
values=values,
num_updates=num_updates
)
# wrap with warmup scheduler
return L(WarmupParamScheduler)(
scheduler=scheduler,
warmup_length=warmup_steps / num_updates,
warmup_method=warmup_method,
warmup_factor=warmup_factor,
) | null |
155,859 | from fvcore.common.param_scheduler import (
MultiStepParamScheduler,
StepParamScheduler,
StepWithFixedGammaParamScheduler,
ConstantParamScheduler,
CosineParamScheduler,
LinearParamScheduler,
ExponentialParamScheduler,
)
from detectron2.config import LazyCall as L
from detectron2.solver import WarmupParamScheduler
def step_lr_scheduler_with_fixed_gamma(
base_value,
num_decays,
gamma,
num_updates,
warmup_steps,
warmup_method="linear",
warmup_factor=0.001,
):
# define step scheduler with fixed gamma
scheduler = L(StepWithFixedGammaParamScheduler)(
base_value=base_value,
num_decays=num_decays,
gamma=gamma,
num_updates=num_updates,
)
# wrap with warmup scheduler
return L(WarmupParamScheduler)(
scheduler=scheduler,
warmup_length=warmup_steps / num_updates,
warmup_method=warmup_method,
warmup_factor=warmup_factor,
) | null |
155,860 | from fvcore.common.param_scheduler import (
MultiStepParamScheduler,
StepParamScheduler,
StepWithFixedGammaParamScheduler,
ConstantParamScheduler,
CosineParamScheduler,
LinearParamScheduler,
ExponentialParamScheduler,
)
from detectron2.config import LazyCall as L
from detectron2.solver import WarmupParamScheduler
def cosine_lr_scheduler(
start_value,
end_value,
num_updates,
warmup_steps,
warmup_method="linear",
warmup_factor=0.001,
):
# define cosine scheduler
scheduler = L(CosineParamScheduler)(
start_value=start_value,
end_value=end_value,
)
# wrap with warmup scheduler
return L(WarmupParamScheduler)(
scheduler=scheduler,
warmup_length=warmup_steps / num_updates,
warmup_method=warmup_method,
warmup_factor=warmup_factor,
) | null |
155,861 | from fvcore.common.param_scheduler import (
MultiStepParamScheduler,
StepParamScheduler,
StepWithFixedGammaParamScheduler,
ConstantParamScheduler,
CosineParamScheduler,
LinearParamScheduler,
ExponentialParamScheduler,
)
from detectron2.config import LazyCall as L
from detectron2.solver import WarmupParamScheduler
def linear_lr_scheduler(
start_value,
end_value,
num_updates,
warmup_steps,
warmup_method="linear",
warmup_factor=0.001,
):
# define linear scheduler
scheduler = L(LinearParamScheduler)(
start_value=start_value,
end_value=end_value,
)
# wrap with warmup scheduler
return L(WarmupParamScheduler)(
scheduler=scheduler,
warmup_length=warmup_steps / num_updates,
warmup_method=warmup_method,
warmup_factor=warmup_factor,
) | null |
155,862 | from fvcore.common.param_scheduler import (
MultiStepParamScheduler,
StepParamScheduler,
StepWithFixedGammaParamScheduler,
ConstantParamScheduler,
CosineParamScheduler,
LinearParamScheduler,
ExponentialParamScheduler,
)
from detectron2.config import LazyCall as L
from detectron2.solver import WarmupParamScheduler
def constant_lr_scheduler(
value,
num_updates,
warmup_steps,
warmup_method="linear",
warmup_factor=0.001,
):
# define constant scheduler
scheduler = L(ConstantParamScheduler)(
value=value
)
# wrap with warmup scheduler
return L(WarmupParamScheduler)(
scheduler=scheduler,
warmup_length=warmup_steps / num_updates,
warmup_method=warmup_method,
warmup_factor=warmup_factor,
) | null |
155,863 | from fvcore.common.param_scheduler import (
MultiStepParamScheduler,
StepParamScheduler,
StepWithFixedGammaParamScheduler,
ConstantParamScheduler,
CosineParamScheduler,
LinearParamScheduler,
ExponentialParamScheduler,
)
from detectron2.config import LazyCall as L
from detectron2.solver import WarmupParamScheduler
def exponential_lr_scheduler(
start_value,
decay,
num_updates,
warmup_steps,
warmup_method="linear",
warmup_factor=0.001,
):
# define exponential scheduler
scheduler = L(ExponentialParamScheduler)(
start_value=start_value,
decay=decay,
)
# wrap with warmup scheduler
return L(WarmupParamScheduler)(
scheduler=scheduler,
warmup_length=warmup_steps / num_updates,
warmup_method=warmup_method,
warmup_factor=warmup_factor,
) | null |
155,864 | import argparse
import glob
import multiprocessing as mp
import numpy as np
import os
import sys
import tempfile
import time
import warnings
import cv2
import tqdm
from demo.mot_predictors import VisualizationDemo
from detectron2.checkpoint import DetectionCheckpointer
from detectron2.config import LazyConfig, instantiate
from detectron2.data.detection_utils import read_image
from detectron2.utils.logger import setup_logger
def setup(args):
cfg = LazyConfig.load(args.config_file)
cfg = LazyConfig.apply_overrides(cfg, args.opts)
return cfg | null |
155,865 | import argparse
import glob
import multiprocessing as mp
import numpy as np
import os
import sys
import tempfile
import time
import warnings
import cv2
import tqdm
from demo.mot_predictors import VisualizationDemo
from detectron2.checkpoint import DetectionCheckpointer
from detectron2.config import LazyConfig, instantiate
from detectron2.data.detection_utils import read_image
from detectron2.utils.logger import setup_logger
def get_parser():
parser = argparse.ArgumentParser(description="detrex demo for visualizing customized inputs")
parser.add_argument(
"--config-file",
default="projects/dino/configs/dino_r50_4scale_12ep.py",
metavar="FILE",
help="path to config file",
)
parser.add_argument("--webcam", action="store_true", help="Take inputs from webcam.")
parser.add_argument("--video-input", help="Path to video file.")
parser.add_argument(
"--input",
nargs="+",
help="A list of space separated input images; "
"or a single glob pattern such as 'directory/*.jpg'",
)
parser.add_argument(
"--output",
help="A file or directory to save output visualizations. "
"If not given, will show output in an OpenCV window.",
)
parser.add_argument(
"--min_size_test",
type=int,
default=800,
help="Size of the smallest side of the image during testing. Set to zero to disable resize in testing.",
)
parser.add_argument(
"--max_size_test",
type=float,
default=1333,
help="Maximum size of the side of the image during testing.",
)
parser.add_argument(
"--img_format",
type=str,
default="RGB",
help="The format of the loading images.",
)
parser.add_argument(
"--metadata_dataset",
type=str,
default="coco_2017_val",
help="The metadata infomation to be used. Default to COCO val metadata.",
)
parser.add_argument(
"--confidence-threshold",
type=float,
default=0.5,
help="Minimum score for instance predictions to be shown",
)
parser.add_argument(
"--opts",
help="Modify config options using the command-line",
default=None,
nargs=argparse.REMAINDER,
)
return parser | null |
155,866 | import argparse
import glob
import multiprocessing as mp
import numpy as np
import os
import sys
import tempfile
import time
import warnings
import cv2
import tqdm
from demo.mot_predictors import VisualizationDemo
from detectron2.checkpoint import DetectionCheckpointer
from detectron2.config import LazyConfig, instantiate
from detectron2.data.detection_utils import read_image
from detectron2.utils.logger import setup_logger
def test_opencv_video_format(codec, file_ext):
with tempfile.TemporaryDirectory(prefix="video_format_test") as dir:
filename = os.path.join(dir, "test_file" + file_ext)
writer = cv2.VideoWriter(
filename=filename,
fourcc=cv2.VideoWriter_fourcc(*codec),
fps=float(30),
frameSize=(10, 10),
isColor=True,
)
[writer.write(np.zeros((10, 10, 3), np.uint8)) for _ in range(30)]
writer.release()
if os.path.isfile(filename):
return True
return False | null |
155,867 | import atexit
import bisect
from copy import copy
import multiprocessing as mp
from collections import deque
from copy import deepcopy
import cv2
import torch
import torchvision.transforms.functional as F
import detectron2.data.transforms as T
from detectron2.data import MetadataCatalog
from detectron2.structures import Instances
from detectron2.utils.visualizer import (
ColorMode,
Visualizer,
_create_text_labels,
)
from detectron2.utils.video_visualizer import (
_DetectedInstance,
VideoVisualizer,
)
def filter_predictions_with_area(predictions, area_threshold=100):
if "track_instances" in predictions:
preds = predictions["track_instances"]
wh = preds.boxes[:, 2:4] - preds.boxes[:, 0:2]
areas = wh[:, 0] * wh[:, 1]
keep_idxs = areas > area_threshold
predictions = copy(predictions) # don't modify the original
predictions["track_instances"] = preds[keep_idxs]
return predictions | null |
155,868 | import atexit
import bisect
from copy import copy
import multiprocessing as mp
from collections import deque
from copy import deepcopy
import cv2
import torch
import torchvision.transforms.functional as F
import detectron2.data.transforms as T
from detectron2.data import MetadataCatalog
from detectron2.structures import Instances
from detectron2.utils.visualizer import (
ColorMode,
Visualizer,
_create_text_labels,
)
from detectron2.utils.video_visualizer import (
_DetectedInstance,
VideoVisualizer,
)
def filter_predictions_with_confidence(predictions, confidence_threshold=0.5):
if "track_instances" in predictions:
preds = predictions["track_instances"]
keep_idxs = preds.scores > confidence_threshold
predictions = copy(predictions) # don't modify the original
predictions["track_instances"] = preds[keep_idxs]
return predictions | null |
155,869 | import atexit
import bisect
from copy import copy
import multiprocessing as mp
from collections import deque
import cv2
import torch
import detectron2.data.transforms as T
from detectron2.data import MetadataCatalog
from detectron2.structures import Instances
from detectron2.utils.video_visualizer import VideoVisualizer
from detectron2.utils.visualizer import ColorMode, Visualizer
def filter_predictions_with_confidence(predictions, confidence_threshold=0.5):
if "instances" in predictions:
preds = predictions["instances"]
keep_idxs = preds.scores > confidence_threshold
predictions = copy(predictions) # don't modify the original
predictions["instances"] = preds[keep_idxs]
return predictions | null |
155,870 | import argparse
import glob
import multiprocessing as mp
import numpy as np
import os
import sys
import tempfile
import time
import warnings
import cv2
import tqdm
from demo.predictors import VisualizationDemo
from detectron2.checkpoint import DetectionCheckpointer
from detectron2.config import LazyConfig, instantiate
from detectron2.data.detection_utils import read_image
from detectron2.utils.logger import setup_logger
def setup(args):
cfg = LazyConfig.load(args.config_file)
cfg = LazyConfig.apply_overrides(cfg, args.opts)
return cfg | null |
155,871 | import argparse
import glob
import multiprocessing as mp
import numpy as np
import os
import sys
import tempfile
import time
import warnings
import cv2
import tqdm
from demo.predictors import VisualizationDemo
from detectron2.checkpoint import DetectionCheckpointer
from detectron2.config import LazyConfig, instantiate
from detectron2.data.detection_utils import read_image
from detectron2.utils.logger import setup_logger
def get_parser():
parser = argparse.ArgumentParser(description="detrex demo for visualizing customized inputs")
parser.add_argument(
"--config-file",
default="projects/dino/configs/dino_r50_4scale_12ep.py",
metavar="FILE",
help="path to config file",
)
parser.add_argument("--webcam", action="store_true", help="Take inputs from webcam.")
parser.add_argument("--video-input", help="Path to video file.")
parser.add_argument(
"--input",
nargs="+",
help="A list of space separated input images; "
"or a single glob pattern such as 'directory/*.jpg'",
)
parser.add_argument(
"--output",
help="A file or directory to save output visualizations. "
"If not given, will show output in an OpenCV window.",
)
parser.add_argument(
"--min_size_test",
type=int,
default=800,
help="Size of the smallest side of the image during testing. Set to zero to disable resize in testing.",
)
parser.add_argument(
"--max_size_test",
type=float,
default=1333,
help="Maximum size of the side of the image during testing.",
)
parser.add_argument(
"--img_format",
type=str,
default="RGB",
help="The format of the loading images.",
)
parser.add_argument(
"--metadata_dataset",
type=str,
default="coco_2017_val",
help="The metadata infomation to be used. Default to COCO val metadata.",
)
parser.add_argument(
"--confidence-threshold",
type=float,
default=0.5,
help="Minimum score for instance predictions to be shown",
)
parser.add_argument(
"--opts",
help="Modify config options using the command-line",
default=None,
nargs=argparse.REMAINDER,
)
return parser | null |
155,872 | import argparse
import glob
import multiprocessing as mp
import numpy as np
import os
import sys
import tempfile
import time
import warnings
import cv2
import tqdm
from demo.predictors import VisualizationDemo
from detectron2.checkpoint import DetectionCheckpointer
from detectron2.config import LazyConfig, instantiate
from detectron2.data.detection_utils import read_image
from detectron2.utils.logger import setup_logger
def test_opencv_video_format(codec, file_ext):
with tempfile.TemporaryDirectory(prefix="video_format_test") as dir:
filename = os.path.join(dir, "test_file" + file_ext)
writer = cv2.VideoWriter(
filename=filename,
fourcc=cv2.VideoWriter_fourcc(*codec),
fps=float(30),
frameSize=(10, 10),
isColor=True,
)
[writer.write(np.zeros((10, 10, 3), np.uint8)) for _ in range(30)]
writer.release()
if os.path.isfile(filename):
return True
return False | null |
155,873 | import logging
import os
import pickle
import torch
import torch.nn as nn
from termcolor import colored
from collections import defaultdict
from typing import Any, Dict, Iterable, List, NamedTuple, Optional, Tuple
from fvcore.common.checkpoint import Checkpointer, _IncompatibleKeys
from torch.nn.parallel import DistributedDataParallel
import detectron2.utils.comm as comm
from detectron2.utils.file_io import PathManager
from .c2_model_loading import align_and_update_state_dicts
def _named_modules_with_dup(
model: nn.Module, prefix: str = ""
) -> Iterable[Tuple[str, nn.Module]]:
"""
The same as `model.named_modules()`, except that it includes
duplicated modules that have more than one name.
"""
yield prefix, model
for name, module in model._modules.items():
if module is None:
continue
submodule_prefix = prefix + ("." if prefix else "") + name
yield from _named_modules_with_dup(module, submodule_prefix)
The provided code snippet includes necessary dependencies for implementing the `_filter_reused_missing_keys` function. Write a Python function `def _filter_reused_missing_keys(model: nn.Module, keys: List[str]) -> List[str]` to solve the following problem:
Filter "missing keys" to not include keys that have been loaded with another name.
Here is the function:
def _filter_reused_missing_keys(model: nn.Module, keys: List[str]) -> List[str]:
"""
Filter "missing keys" to not include keys that have been loaded with another name.
"""
keyset = set(keys)
param_to_names = defaultdict(set) # param -> names that points to it
for module_prefix, module in _named_modules_with_dup(model):
for name, param in list(module.named_parameters(recurse=False)) + list(
module.named_buffers(recurse=False)
):
full_name = (module_prefix + "." if module_prefix else "") + name
param_to_names[param].add(full_name)
for names in param_to_names.values():
# if one name appears missing but its alias exists, then this
# name is not considered missing
if any(n in keyset for n in names) and not all(n in keyset for n in names):
[keyset.remove(n) for n in names if n in keyset]
return list(keyset) | Filter "missing keys" to not include keys that have been loaded with another name. |
155,874 | import logging
import os
import pickle
import torch
import torch.nn as nn
from termcolor import colored
from collections import defaultdict
from typing import Any, Dict, Iterable, List, NamedTuple, Optional, Tuple
from fvcore.common.checkpoint import Checkpointer, _IncompatibleKeys
from torch.nn.parallel import DistributedDataParallel
import detectron2.utils.comm as comm
from detectron2.utils.file_io import PathManager
from .c2_model_loading import align_and_update_state_dicts
def _group_checkpoint_keys(keys: List[str]) -> Dict[str, List[str]]:
"""
Group keys based on common prefixes. A prefix is the string up to the final
"." in each key.
Args:
keys (list[str]): list of parameter names, i.e. keys in the model
checkpoint dict.
Returns:
dict[list]: keys with common prefixes are grouped into lists.
"""
groups = defaultdict(list)
for key in keys:
pos = key.rfind(".")
if pos >= 0:
head, tail = key[:pos], [key[pos + 1 :]]
else:
head, tail = key, []
groups[head].extend(tail)
return groups
def _group_to_str(group: List[str]) -> str:
"""
Format a group of parameter name suffixes into a loggable string.
Args:
group (list[str]): list of parameter name suffixes.
Returns:
str: formated string.
"""
if len(group) == 0:
return ""
if len(group) == 1:
return "." + group[0]
return ".{" + ", ".join(sorted(group)) + "}"
The provided code snippet includes necessary dependencies for implementing the `get_missing_parameters_message` function. Write a Python function `def get_missing_parameters_message(keys: List[str]) -> str` to solve the following problem:
Get a logging-friendly message to report parameter names (keys) that are in the model but not found in a checkpoint. Args: keys (list[str]): List of keys that were not found in the checkpoint. Returns: str: message.
Here is the function:
def get_missing_parameters_message(keys: List[str]) -> str:
"""
Get a logging-friendly message to report parameter names (keys) that are in
the model but not found in a checkpoint.
Args:
keys (list[str]): List of keys that were not found in the checkpoint.
Returns:
str: message.
"""
groups = _group_checkpoint_keys(keys)
msg_per_group = sorted(k + _group_to_str(v) for k, v in groups.items())
msg = "Some model parameters or buffers are not found in the checkpoint:\n"
msg += "\n".join([colored(x, "blue") for x in msg_per_group])
return msg | Get a logging-friendly message to report parameter names (keys) that are in the model but not found in a checkpoint. Args: keys (list[str]): List of keys that were not found in the checkpoint. Returns: str: message. |
155,875 | import logging
import os
import pickle
import torch
import torch.nn as nn
from termcolor import colored
from collections import defaultdict
from typing import Any, Dict, Iterable, List, NamedTuple, Optional, Tuple
from fvcore.common.checkpoint import Checkpointer, _IncompatibleKeys
from torch.nn.parallel import DistributedDataParallel
import detectron2.utils.comm as comm
from detectron2.utils.file_io import PathManager
from .c2_model_loading import align_and_update_state_dicts
def _group_checkpoint_keys(keys: List[str]) -> Dict[str, List[str]]:
"""
Group keys based on common prefixes. A prefix is the string up to the final
"." in each key.
Args:
keys (list[str]): list of parameter names, i.e. keys in the model
checkpoint dict.
Returns:
dict[list]: keys with common prefixes are grouped into lists.
"""
groups = defaultdict(list)
for key in keys:
pos = key.rfind(".")
if pos >= 0:
head, tail = key[:pos], [key[pos + 1 :]]
else:
head, tail = key, []
groups[head].extend(tail)
return groups
def _group_to_str(group: List[str]) -> str:
"""
Format a group of parameter name suffixes into a loggable string.
Args:
group (list[str]): list of parameter name suffixes.
Returns:
str: formated string.
"""
if len(group) == 0:
return ""
if len(group) == 1:
return "." + group[0]
return ".{" + ", ".join(sorted(group)) + "}"
The provided code snippet includes necessary dependencies for implementing the `get_unexpected_parameters_message` function. Write a Python function `def get_unexpected_parameters_message(keys: List[str]) -> str` to solve the following problem:
Get a logging-friendly message to report parameter names (keys) that are in the checkpoint but not found in the model. Args: keys (list[str]): List of keys that were not found in the model. Returns: str: message.
Here is the function:
def get_unexpected_parameters_message(keys: List[str]) -> str:
"""
Get a logging-friendly message to report parameter names (keys) that are in
the checkpoint but not found in the model.
Args:
keys (list[str]): List of keys that were not found in the model.
Returns:
str: message.
"""
groups = _group_checkpoint_keys(keys)
msg = "The checkpoint state_dict contains keys that are not used by the model:\n"
msg += "\n".join(
" " + colored(k + _group_to_str(v), "magenta") for k, v in groups.items()
)
return msg | Get a logging-friendly message to report parameter names (keys) that are in the checkpoint but not found in the model. Args: keys (list[str]): List of keys that were not found in the model. Returns: str: message. |
155,876 | import copy
import logging
import re
from typing import Dict, List
import torch
from tabulate import tabulate
from detectron2.utils.logger import setup_logger
def convert_c2_detectron_names(weights):
"""
Map Caffe2 Detectron weight names to Detectron2 names.
Args:
weights (dict): name -> tensor
Returns:
dict: detectron2 names -> tensor
dict: detectron2 names -> C2 names
"""
logger = logging.getLogger(__name__)
logger.info("Renaming Caffe2 weights ......")
original_keys = sorted(weights.keys())
layer_keys = copy.deepcopy(original_keys)
layer_keys = convert_basic_c2_names(layer_keys)
# --------------------------------------------------------------------------
# RPN hidden representation conv
# --------------------------------------------------------------------------
# FPN case
# In the C2 model, the RPN hidden layer conv is defined for FPN level 2 and then
# shared for all other levels, hence the appearance of "fpn2"
layer_keys = [
k.replace("conv.rpn.fpn2", "proposal_generator.rpn_head.conv") for k in layer_keys
]
# Non-FPN case
layer_keys = [k.replace("conv.rpn", "proposal_generator.rpn_head.conv") for k in layer_keys]
# --------------------------------------------------------------------------
# RPN box transformation conv
# --------------------------------------------------------------------------
# FPN case (see note above about "fpn2")
layer_keys = [
k.replace("rpn.bbox.pred.fpn2", "proposal_generator.rpn_head.anchor_deltas")
for k in layer_keys
]
layer_keys = [
k.replace("rpn.cls.logits.fpn2", "proposal_generator.rpn_head.objectness_logits")
for k in layer_keys
]
# Non-FPN case
layer_keys = [
k.replace("rpn.bbox.pred", "proposal_generator.rpn_head.anchor_deltas") for k in layer_keys
]
layer_keys = [
k.replace("rpn.cls.logits", "proposal_generator.rpn_head.objectness_logits")
for k in layer_keys
]
# --------------------------------------------------------------------------
# Fast R-CNN box head
# --------------------------------------------------------------------------
layer_keys = [re.sub("^bbox\\.pred", "bbox_pred", k) for k in layer_keys]
layer_keys = [re.sub("^cls\\.score", "cls_score", k) for k in layer_keys]
layer_keys = [re.sub("^fc6\\.", "box_head.fc1.", k) for k in layer_keys]
layer_keys = [re.sub("^fc7\\.", "box_head.fc2.", k) for k in layer_keys]
# 4conv1fc head tensor names: head_conv1_w, head_conv1_gn_s
layer_keys = [re.sub("^head\\.conv", "box_head.conv", k) for k in layer_keys]
# --------------------------------------------------------------------------
# FPN lateral and output convolutions
# --------------------------------------------------------------------------
def fpn_map(name):
"""
Look for keys with the following patterns:
1) Starts with "fpn.inner."
Example: "fpn.inner.res2.2.sum.lateral.weight"
Meaning: These are lateral pathway convolutions
2) Starts with "fpn.res"
Example: "fpn.res2.2.sum.weight"
Meaning: These are FPN output convolutions
"""
splits = name.split(".")
norm = ".norm" if "norm" in splits else ""
if name.startswith("fpn.inner."):
# splits example: ['fpn', 'inner', 'res2', '2', 'sum', 'lateral', 'weight']
stage = int(splits[2][len("res") :])
return "fpn_lateral{}{}.{}".format(stage, norm, splits[-1])
elif name.startswith("fpn.res"):
# splits example: ['fpn', 'res2', '2', 'sum', 'weight']
stage = int(splits[1][len("res") :])
return "fpn_output{}{}.{}".format(stage, norm, splits[-1])
return name
layer_keys = [fpn_map(k) for k in layer_keys]
# --------------------------------------------------------------------------
# Mask R-CNN mask head
# --------------------------------------------------------------------------
# roi_heads.StandardROIHeads case
layer_keys = [k.replace(".[mask].fcn", "mask_head.mask_fcn") for k in layer_keys]
layer_keys = [re.sub("^\\.mask\\.fcn", "mask_head.mask_fcn", k) for k in layer_keys]
layer_keys = [k.replace("mask.fcn.logits", "mask_head.predictor") for k in layer_keys]
# roi_heads.Res5ROIHeads case
layer_keys = [k.replace("conv5.mask", "mask_head.deconv") for k in layer_keys]
# --------------------------------------------------------------------------
# Keypoint R-CNN head
# --------------------------------------------------------------------------
# interestingly, the keypoint head convs have blob names that are simply "conv_fcnX"
layer_keys = [k.replace("conv.fcn", "roi_heads.keypoint_head.conv_fcn") for k in layer_keys]
layer_keys = [
k.replace("kps.score.lowres", "roi_heads.keypoint_head.score_lowres") for k in layer_keys
]
layer_keys = [k.replace("kps.score.", "roi_heads.keypoint_head.score.") for k in layer_keys]
# --------------------------------------------------------------------------
# Done with replacements
# --------------------------------------------------------------------------
assert len(set(layer_keys)) == len(layer_keys)
assert len(original_keys) == len(layer_keys)
new_weights = {}
new_keys_to_original_keys = {}
for orig, renamed in zip(original_keys, layer_keys):
new_keys_to_original_keys[renamed] = orig
if renamed.startswith("bbox_pred.") or renamed.startswith("mask_head.predictor."):
# remove the meaningless prediction weight for background class
new_start_idx = 4 if renamed.startswith("bbox_pred.") else 1
new_weights[renamed] = weights[orig][new_start_idx:]
logger.info(
"Remove prediction weight for background class in {}. The shape changes from "
"{} to {}.".format(
renamed, tuple(weights[orig].shape), tuple(new_weights[renamed].shape)
)
)
elif renamed.startswith("cls_score."):
# move weights of bg class from original index 0 to last index
logger.info(
"Move classification weights for background class in {} from index 0 to "
"index {}.".format(renamed, weights[orig].shape[0] - 1)
)
new_weights[renamed] = torch.cat([weights[orig][1:], weights[orig][:1]])
else:
new_weights[renamed] = weights[orig]
return new_weights, new_keys_to_original_keys
def _longest_common_prefix(names: List[str]) -> str:
"""
["abc.zfg", "abc.zef"] -> "abc."
"""
names = [n.split(".") for n in names]
m1, m2 = min(names), max(names)
ret = [a for a, b in zip(m1, m2) if a == b]
ret = ".".join(ret) + "." if len(ret) else ""
return ret
The provided code snippet includes necessary dependencies for implementing the `align_and_update_state_dicts` function. Write a Python function `def align_and_update_state_dicts(model_state_dict, ckpt_state_dict, c2_conversion=True)` to solve the following problem:
Match names between the two state-dict, and returns a new chkpt_state_dict with names converted to match model_state_dict with heuristics. The returned dict can be later loaded with fvcore checkpointer. If `c2_conversion==True`, `ckpt_state_dict` is assumed to be a Caffe2 model and will be renamed at first. Strategy: suppose that the models that we will create will have prefixes appended to each of its keys, for example due to an extra level of nesting that the original pre-trained weights from ImageNet won't contain. For example, model.state_dict() might return backbone[0].body.res2.conv1.weight, while the pre-trained model contains res2.conv1.weight. We thus want to match both parameters together. For that, we look for each model weight, look among all loaded keys if there is one that is a suffix of the current weight name, and use it if that's the case. If multiple matches exist, take the one with longest size of the corresponding name. For example, for the same model as before, the pretrained weight file can contain both res2.conv1.weight, as well as conv1.weight. In this case, we want to match backbone[0].body.conv1.weight to conv1.weight, and backbone[0].body.res2.conv1.weight to res2.conv1.weight.
Here is the function:
def align_and_update_state_dicts(model_state_dict, ckpt_state_dict, c2_conversion=True):
"""
Match names between the two state-dict, and returns a new chkpt_state_dict with names
converted to match model_state_dict with heuristics. The returned dict can be later
loaded with fvcore checkpointer.
If `c2_conversion==True`, `ckpt_state_dict` is assumed to be a Caffe2
model and will be renamed at first.
Strategy: suppose that the models that we will create will have prefixes appended
to each of its keys, for example due to an extra level of nesting that the original
pre-trained weights from ImageNet won't contain. For example, model.state_dict()
might return backbone[0].body.res2.conv1.weight, while the pre-trained model contains
res2.conv1.weight. We thus want to match both parameters together.
For that, we look for each model weight, look among all loaded keys if there is one
that is a suffix of the current weight name, and use it if that's the case.
If multiple matches exist, take the one with longest size
of the corresponding name. For example, for the same model as before, the pretrained
weight file can contain both res2.conv1.weight, as well as conv1.weight. In this case,
we want to match backbone[0].body.conv1.weight to conv1.weight, and
backbone[0].body.res2.conv1.weight to res2.conv1.weight.
"""
model_keys = sorted(model_state_dict.keys())
if c2_conversion:
ckpt_state_dict, original_keys = convert_c2_detectron_names(ckpt_state_dict)
# original_keys: the name in the original dict (before renaming)
else:
original_keys = {x: x for x in ckpt_state_dict.keys()}
ckpt_keys = sorted(ckpt_state_dict.keys())
def match(a, b):
# Matched ckpt_key should be a complete (starts with '.') suffix.
# For example, roi_heads.mesh_head.whatever_conv1 does not match conv1,
# but matches whatever_conv1 or mesh_head.whatever_conv1.
return a == b or a.endswith("." + b)
# get a matrix of string matches, where each (i, j) entry correspond to the size of the
# ckpt_key string, if it matches
match_matrix = [len(j) if match(i, j) else 0 for i in model_keys for j in ckpt_keys]
match_matrix = torch.as_tensor(match_matrix).view(len(model_keys), len(ckpt_keys))
# use the matched one with longest size in case of multiple matches
max_match_size, idxs = match_matrix.max(1)
# remove indices that correspond to no-match
idxs[max_match_size == 0] = -1
logger = setup_logger(name=__name__)
# matched_pairs (matched checkpoint key --> matched model key)
matched_keys = {}
result_state_dict = {}
for idx_model, idx_ckpt in enumerate(idxs.tolist()):
if idx_ckpt == -1:
continue
key_model = model_keys[idx_model]
key_ckpt = ckpt_keys[idx_ckpt]
value_ckpt = ckpt_state_dict[key_ckpt]
shape_in_model = model_state_dict[key_model].shape
if shape_in_model != value_ckpt.shape:
logger.warning(
"Shape of {} in checkpoint is {}, while shape of {} in model is {}.".format(
key_ckpt, value_ckpt.shape, key_model, shape_in_model
)
)
logger.warning(
"{} will not be loaded. Please double check and see if this is desired.".format(
key_ckpt
)
)
continue
assert key_model not in result_state_dict
result_state_dict[key_model] = value_ckpt
if key_ckpt in matched_keys: # already added to matched_keys
logger.error(
"Ambiguity found for {} in checkpoint!"
"It matches at least two keys in the model ({} and {}).".format(
key_ckpt, key_model, matched_keys[key_ckpt]
)
)
raise ValueError("Cannot match one checkpoint key to multiple keys in the model.")
matched_keys[key_ckpt] = key_model
# logging:
matched_model_keys = sorted(matched_keys.values())
if len(matched_model_keys) == 0:
logger.warning("No weights in checkpoint matched with model.")
return ckpt_state_dict
common_prefix = _longest_common_prefix(matched_model_keys)
rev_matched_keys = {v: k for k, v in matched_keys.items()}
original_keys = {k: original_keys[rev_matched_keys[k]] for k in matched_model_keys}
table = []
for key_model in matched_model_keys:
shape = model_state_dict[key_model].shape
table.append(
(
key_model,
original_keys[key_model],
shape,
)
)
table_str = tabulate(
table, tablefmt="pipe", headers=["Names in Model", "Names in Checkpoint", "Shapes"]
)
logger.info(
"Following weights matched with "
+ (f"submodule {common_prefix[:-1]}" if common_prefix else "model")
+ ":\n"
+ table_str
)
unmatched_ckpt_keys = [k for k in ckpt_keys if k not in set(matched_keys.keys())]
for k in unmatched_ckpt_keys:
result_state_dict[k] = ckpt_state_dict[k]
return result_state_dict | Match names between the two state-dict, and returns a new chkpt_state_dict with names converted to match model_state_dict with heuristics. The returned dict can be later loaded with fvcore checkpointer. If `c2_conversion==True`, `ckpt_state_dict` is assumed to be a Caffe2 model and will be renamed at first. Strategy: suppose that the models that we will create will have prefixes appended to each of its keys, for example due to an extra level of nesting that the original pre-trained weights from ImageNet won't contain. For example, model.state_dict() might return backbone[0].body.res2.conv1.weight, while the pre-trained model contains res2.conv1.weight. We thus want to match both parameters together. For that, we look for each model weight, look among all loaded keys if there is one that is a suffix of the current weight name, and use it if that's the case. If multiple matches exist, take the one with longest size of the corresponding name. For example, for the same model as before, the pretrained weight file can contain both res2.conv1.weight, as well as conv1.weight. In this case, we want to match backbone[0].body.conv1.weight to conv1.weight, and backbone[0].body.res2.conv1.weight to res2.conv1.weight. |
155,877 | import copy
import logging
import re
from typing import Dict, List
import torch
from tabulate import tabulate
from detectron2.utils.logger import setup_logger
def _longest_common_prefix_str(names: List[str]) -> str:
m1, m2 = min(names), max(names)
lcp = [a for a, b in zip(m1, m2) if a == b]
lcp = "".join(lcp)
return lcp
The provided code snippet includes necessary dependencies for implementing the `_group_keys_by_module` function. Write a Python function `def _group_keys_by_module(keys: List[str], original_names: Dict[str, str])` to solve the following problem:
Params in the same submodule are grouped together. Args: keys: names of all parameters original_names: mapping from parameter name to their name in the checkpoint Returns: dict[name -> all other names in the same group]
Here is the function:
def _group_keys_by_module(keys: List[str], original_names: Dict[str, str]):
"""
Params in the same submodule are grouped together.
Args:
keys: names of all parameters
original_names: mapping from parameter name to their name in the checkpoint
Returns:
dict[name -> all other names in the same group]
"""
def _submodule_name(key):
pos = key.rfind(".")
if pos < 0:
return None
prefix = key[: pos + 1]
return prefix
all_submodules = [_submodule_name(k) for k in keys]
all_submodules = [x for x in all_submodules if x]
all_submodules = sorted(all_submodules, key=len)
ret = {}
for prefix in all_submodules:
group = [k for k in keys if k.startswith(prefix)]
if len(group) <= 1:
continue
original_name_lcp = _longest_common_prefix_str([original_names[k] for k in group])
if len(original_name_lcp) == 0:
# don't group weights if original names don't share prefix
continue
for k in group:
if k in ret:
continue
ret[k] = group
return ret | Params in the same submodule are grouped together. Args: keys: names of all parameters original_names: mapping from parameter name to their name in the checkpoint Returns: dict[name -> all other names in the same group] |
155,878 | from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import warnings
import torch
from torch import nn
import torch.nn.functional as F
from torch.autograd import Function
from torch.autograd.function import once_differentiable
from torch.cuda.amp import custom_bwd, custom_fwd
from torch.nn.init import xavier_uniform_, constant_
def _get_reference_points(spatial_shapes, device, kernel_h, kernel_w, dilation_h, dilation_w, pad_h=0, pad_w=0, stride_h=1, stride_w=1):
_, H_, W_, _ = spatial_shapes
H_out = (H_ - (dilation_h * (kernel_h - 1) + 1)) // stride_h + 1
W_out = (W_ - (dilation_w * (kernel_w - 1) + 1)) // stride_w + 1
ref_y, ref_x = torch.meshgrid(
torch.linspace(
# pad_h + 0.5,
# H_ - pad_h - 0.5,
(dilation_h * (kernel_h - 1)) // 2 + 0.5,
(dilation_h * (kernel_h - 1)) // 2 + 0.5 + (H_out - 1) * stride_h,
H_out,
dtype=torch.float32,
device=device),
torch.linspace(
# pad_w + 0.5,
# W_ - pad_w - 0.5,
(dilation_w * (kernel_w - 1)) // 2 + 0.5,
(dilation_w * (kernel_w - 1)) // 2 + 0.5 + (W_out - 1) * stride_w,
W_out,
dtype=torch.float32,
device=device))
ref_y = ref_y.reshape(-1)[None] / H_
ref_x = ref_x.reshape(-1)[None] / W_
ref = torch.stack((ref_x, ref_y), -1).reshape(
1, H_out, W_out, 1, 2)
return ref
def _generate_dilation_grids(spatial_shapes, kernel_h, kernel_w, dilation_h, dilation_w, group, device):
_, H_, W_, _ = spatial_shapes
points_list = []
x, y = torch.meshgrid(
torch.linspace(
-((dilation_w * (kernel_w - 1)) // 2),
-((dilation_w * (kernel_w - 1)) // 2) +
(kernel_w - 1) * dilation_w, kernel_w,
dtype=torch.float32,
device=device),
torch.linspace(
-((dilation_h * (kernel_h - 1)) // 2),
-((dilation_h * (kernel_h - 1)) // 2) +
(kernel_h - 1) * dilation_h, kernel_h,
dtype=torch.float32,
device=device))
points_list.extend([x / W_, y / H_])
grid = torch.stack(points_list, -1).reshape(-1, 1, 2).\
repeat(1, group, 1).permute(1, 0, 2)
grid = grid.reshape(1, 1, 1, group * kernel_h * kernel_w, 2)
return grid
import warnings
import torch
from torch import nn
import torch.nn.functional as F
from torch.nn.init import xavier_uniform_, constant_
def dcnv3_core_pytorch(
input, offset, mask, kernel_h,
kernel_w, stride_h, stride_w, pad_h,
pad_w, dilation_h, dilation_w, group,
group_channels, offset_scale):
# for debug and test only,
# need to use cuda version instead
input = F.pad(
input,
[0, 0, pad_h, pad_h, pad_w, pad_w])
N_, H_in, W_in, _ = input.shape
_, H_out, W_out, _ = offset.shape
ref = _get_reference_points(
input.shape, input.device, kernel_h, kernel_w, dilation_h, dilation_w, pad_h, pad_w, stride_h, stride_w)
grid = _generate_dilation_grids(
input.shape, kernel_h, kernel_w, dilation_h, dilation_w, group, input.device)
spatial_norm = torch.tensor([W_in, H_in]).reshape(1, 1, 1, 2).\
repeat(1, 1, 1, group*kernel_h*kernel_w).to(input.device)
sampling_locations = (ref + grid * offset_scale).repeat(N_, 1, 1, 1, 1).flatten(3, 4) + \
offset * offset_scale / spatial_norm
P_ = kernel_h * kernel_w
sampling_grids = 2 * sampling_locations - 1
# N_, H_in, W_in, group*group_channels -> N_, H_in*W_in, group*group_channels -> N_, group*group_channels, H_in*W_in -> N_*group, group_channels, H_in, W_in
input_ = input.view(N_, H_in*W_in, group*group_channels).transpose(1, 2).\
reshape(N_*group, group_channels, H_in, W_in)
# N_, H_out, W_out, group*P_*2 -> N_, H_out*W_out, group, P_, 2 -> N_, group, H_out*W_out, P_, 2 -> N_*group, H_out*W_out, P_, 2
sampling_grid_ = sampling_grids.view(N_, H_out*W_out, group, P_, 2).transpose(1, 2).\
flatten(0, 1)
# N_*group, group_channels, H_out*W_out, P_
sampling_input_ = F.grid_sample(
input_, sampling_grid_, mode='bilinear', padding_mode='zeros', align_corners=False)
# (N_, H_out, W_out, group*P_) -> N_, H_out*W_out, group, P_ -> (N_, group, H_out*W_out, P_) -> (N_*group, 1, H_out*W_out, P_)
mask = mask.view(N_, H_out*W_out, group, P_).transpose(1, 2).\
reshape(N_*group, 1, H_out*W_out, P_)
output = (sampling_input_ * mask).sum(-1).view(N_,
group*group_channels, H_out*W_out)
return output.transpose(1, 2).reshape(N_, H_out, W_out, -1).contiguous() | null |
155,879 | from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import warnings
import torch
from torch import nn
import torch.nn.functional as F
from torch.autograd import Function
from torch.autograd.function import once_differentiable
from torch.cuda.amp import custom_bwd, custom_fwd
from torch.nn.init import xavier_uniform_, constant_
import warnings
import torch
from torch import nn
import torch.nn.functional as F
from torch.nn.init import xavier_uniform_, constant_
class to_channels_first(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
return x.permute(0, 3, 1, 2)
class to_channels_last(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
return x.permute(0, 2, 3, 1)
def build_norm_layer(dim,
norm_layer,
in_format='channels_last',
out_format='channels_last',
eps=1e-6):
layers = []
if norm_layer == 'BN':
if in_format == 'channels_last':
layers.append(to_channels_first())
layers.append(nn.BatchNorm2d(dim))
if out_format == 'channels_last':
layers.append(to_channels_last())
elif norm_layer == 'LN':
if in_format == 'channels_first':
layers.append(to_channels_last())
layers.append(nn.LayerNorm(dim, eps=eps))
if out_format == 'channels_first':
layers.append(to_channels_first())
else:
raise NotImplementedError(
f'build_norm_layer does not support {norm_layer}')
return nn.Sequential(*layers) | null |
155,880 | from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import warnings
import torch
from torch import nn
import torch.nn.functional as F
from torch.autograd import Function
from torch.autograd.function import once_differentiable
from torch.cuda.amp import custom_bwd, custom_fwd
from torch.nn.init import xavier_uniform_, constant_
import warnings
import torch
from torch import nn
import torch.nn.functional as F
from torch.nn.init import xavier_uniform_, constant_
def build_act_layer(act_layer):
if act_layer == 'ReLU':
return nn.ReLU(inplace=True)
elif act_layer == 'SiLU':
return nn.SiLU(inplace=True)
elif act_layer == 'GELU':
return nn.GELU()
raise NotImplementedError(f'build_act_layer does not support {act_layer}') | null |
155,881 | from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import warnings
import torch
from torch import nn
import torch.nn.functional as F
from torch.autograd import Function
from torch.autograd.function import once_differentiable
from torch.cuda.amp import custom_bwd, custom_fwd
from torch.nn.init import xavier_uniform_, constant_
import warnings
import torch
from torch import nn
import torch.nn.functional as F
from torch.nn.init import xavier_uniform_, constant_
def _is_power_of_2(n):
if (not isinstance(n, int)) or (n < 0):
raise ValueError(
"invalid input for _is_power_of_2: {} (type: {})".format(n, type(n)))
return (n & (n - 1) == 0) and n != 0 | null |
155,882 | from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import warnings
import torch
from torch import nn
import torch.nn.functional as F
from torch.autograd import Function
from torch.autograd.function import once_differentiable
from torch.cuda.amp import custom_bwd, custom_fwd
from torch.nn.init import xavier_uniform_, constant_
import warnings
import torch
from torch import nn
import torch.nn.functional as F
from torch.nn.init import xavier_uniform_, constant_
The provided code snippet includes necessary dependencies for implementing the `create_dummy_class` function. Write a Python function `def create_dummy_class(klass, dependency, message="")` to solve the following problem:
When a dependency of a class is not available, create a dummy class which throws ImportError when used. Args: klass (str): name of the class. dependency (str): name of the dependency. message: extra message to print Returns: class: a class object
Here is the function:
def create_dummy_class(klass, dependency, message=""):
"""
When a dependency of a class is not available, create a dummy class which throws ImportError
when used.
Args:
klass (str): name of the class.
dependency (str): name of the dependency.
message: extra message to print
Returns:
class: a class object
"""
err = "Cannot import '{}', therefore '{}' is not available.".format(dependency, klass)
if message:
err = err + " " + message
class _DummyMetaClass(type):
# throw error on class attribute access
def __getattr__(_, __): # noqa: B902
raise ImportError(err)
class _Dummy(object, metaclass=_DummyMetaClass):
# throw error on constructor
def __init__(self, *args, **kwargs):
raise ImportError(err)
return _Dummy | When a dependency of a class is not available, create a dummy class which throws ImportError when used. Args: klass (str): name of the class. dependency (str): name of the dependency. message: extra message to print Returns: class: a class object |
155,883 | from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import warnings
import torch
from torch import nn
import torch.nn.functional as F
from torch.autograd import Function
from torch.autograd.function import once_differentiable
from torch.cuda.amp import custom_bwd, custom_fwd
from torch.nn.init import xavier_uniform_, constant_
import warnings
import torch
from torch import nn
import torch.nn.functional as F
from torch.nn.init import xavier_uniform_, constant_
The provided code snippet includes necessary dependencies for implementing the `create_dummy_func` function. Write a Python function `def create_dummy_func(func, dependency, message="")` to solve the following problem:
When a dependency of a function is not available, create a dummy function which throws ImportError when used. Args: func (str): name of the function. dependency (str or list[str]): name(s) of the dependency. message: extra message to print Returns: function: a function object
Here is the function:
def create_dummy_func(func, dependency, message=""):
"""
When a dependency of a function is not available, create a dummy function which throws
ImportError when used.
Args:
func (str): name of the function.
dependency (str or list[str]): name(s) of the dependency.
message: extra message to print
Returns:
function: a function object
"""
err = "Cannot import '{}', therefore '{}' is not available.".format(dependency, func)
if message:
err = err + " " + message
if isinstance(dependency, (list, tuple)):
dependency = ",".join(dependency)
def _dummy(*args, **kwargs):
raise ImportError(err)
return _dummy | When a dependency of a function is not available, create a dummy function which throws ImportError when used. Args: func (str): name of the function. dependency (str or list[str]): name(s) of the dependency. message: extra message to print Returns: function: a function object |
155,884 | from typing import Tuple
import torch
from torchvision.ops.boxes import box_area
The provided code snippet includes necessary dependencies for implementing the `box_cxcywh_to_xyxy` function. Write a Python function `def box_cxcywh_to_xyxy(bbox) -> torch.Tensor` to solve the following problem:
Convert bbox coordinates from (cx, cy, w, h) to (x1, y1, x2, y2) Args: bbox (torch.Tensor): Shape (n, 4) for bboxes. Returns: torch.Tensor: Converted bboxes.
Here is the function:
def box_cxcywh_to_xyxy(bbox) -> torch.Tensor:
"""Convert bbox coordinates from (cx, cy, w, h) to (x1, y1, x2, y2)
Args:
bbox (torch.Tensor): Shape (n, 4) for bboxes.
Returns:
torch.Tensor: Converted bboxes.
"""
cx, cy, w, h = bbox.unbind(-1)
new_bbox = [(cx - 0.5 * w), (cy - 0.5 * h), (cx + 0.5 * w), (cy + 0.5 * h)]
return torch.stack(new_bbox, dim=-1) | Convert bbox coordinates from (cx, cy, w, h) to (x1, y1, x2, y2) Args: bbox (torch.Tensor): Shape (n, 4) for bboxes. Returns: torch.Tensor: Converted bboxes. |
155,885 | from typing import Tuple
import torch
from torchvision.ops.boxes import box_area
The provided code snippet includes necessary dependencies for implementing the `box_xyxy_to_cxcywh` function. Write a Python function `def box_xyxy_to_cxcywh(bbox) -> torch.Tensor` to solve the following problem:
Convert bbox coordinates from (x1, y1, x2, y2) to (cx, cy, w, h) Args: bbox (torch.Tensor): Shape (n, 4) for bboxes. Returns: torch.Tensor: Converted bboxes.
Here is the function:
def box_xyxy_to_cxcywh(bbox) -> torch.Tensor:
"""Convert bbox coordinates from (x1, y1, x2, y2) to (cx, cy, w, h)
Args:
bbox (torch.Tensor): Shape (n, 4) for bboxes.
Returns:
torch.Tensor: Converted bboxes.
"""
x0, y0, x1, y1 = bbox.unbind(-1)
new_bbox = [(x0 + x1) / 2, (y0 + y1) / 2, (x1 - x0), (y1 - y0)]
return torch.stack(new_bbox, dim=-1) | Convert bbox coordinates from (x1, y1, x2, y2) to (cx, cy, w, h) Args: bbox (torch.Tensor): Shape (n, 4) for bboxes. Returns: torch.Tensor: Converted bboxes. |
155,886 | from typing import Tuple
import torch
from torchvision.ops.boxes import box_area
def box_iou(boxes1, boxes2) -> Tuple[torch.Tensor]:
"""Modified from ``torchvision.ops.box_iou``
Return both intersection-over-union (Jaccard index) and union between
two sets of boxes.
Args:
boxes1: (torch.Tensor[N, 4]): first set of boxes
boxes2: (torch.Tensor[M, 4]): second set of boxes
Returns:
Tuple: A tuple of NxM matrix, with shape `(torch.Tensor[N, M], torch.Tensor[N, M])`,
containing the pairwise IoU and union values
for every element in boxes1 and boxes2.
"""
area1 = box_area(boxes1)
area2 = box_area(boxes2)
lt = torch.max(boxes1[:, None, :2], boxes2[:, :2]) # [N,M,2]
rb = torch.min(boxes1[:, None, 2:], boxes2[:, 2:]) # [N,M,2]
wh = (rb - lt).clamp(min=0) # [N,M,2]
inter = wh[:, :, 0] * wh[:, :, 1] # [N,M]
union = area1[:, None] + area2 - inter
iou = inter / (union + 1e-6)
return iou, union
The provided code snippet includes necessary dependencies for implementing the `generalized_box_iou` function. Write a Python function `def generalized_box_iou(boxes1, boxes2) -> torch.Tensor` to solve the following problem:
Generalized IoU from https://giou.stanford.edu/ The input boxes should be in (x0, y0, x1, y1) format Args: boxes1: (torch.Tensor[N, 4]): first set of boxes boxes2: (torch.Tensor[M, 4]): second set of boxes Returns: torch.Tensor: a NxM pairwise matrix containing the pairwise Generalized IoU for every element in boxes1 and boxes2.
Here is the function:
def generalized_box_iou(boxes1, boxes2) -> torch.Tensor:
"""
Generalized IoU from https://giou.stanford.edu/
The input boxes should be in (x0, y0, x1, y1) format
Args:
boxes1: (torch.Tensor[N, 4]): first set of boxes
boxes2: (torch.Tensor[M, 4]): second set of boxes
Returns:
torch.Tensor: a NxM pairwise matrix containing the pairwise Generalized IoU
for every element in boxes1 and boxes2.
"""
# degenerate boxes gives inf / nan results
# so do an early check
assert (boxes1[:, 2:] >= boxes1[:, :2]).all()
assert (boxes2[:, 2:] >= boxes2[:, :2]).all()
iou, union = box_iou(boxes1, boxes2)
lt = torch.min(boxes1[:, None, :2], boxes2[:, :2])
rb = torch.max(boxes1[:, None, 2:], boxes2[:, 2:])
wh = (rb - lt).clamp(min=0) # [N,M,2]
area = wh[:, :, 0] * wh[:, :, 1]
return iou - (area - union) / (area + 1e-6) | Generalized IoU from https://giou.stanford.edu/ The input boxes should be in (x0, y0, x1, y1) format Args: boxes1: (torch.Tensor[N, 4]): first set of boxes boxes2: (torch.Tensor[M, 4]): second set of boxes Returns: torch.Tensor: a NxM pairwise matrix containing the pairwise Generalized IoU for every element in boxes1 and boxes2. |
155,887 | from typing import Tuple
import torch
from torchvision.ops.boxes import box_area
The provided code snippet includes necessary dependencies for implementing the `masks_to_boxes` function. Write a Python function `def masks_to_boxes(masks) -> torch.Tensor` to solve the following problem:
Compute the bounding boxes around the provided masks The masks should be in format [N, H, W] where N is the number of masks, (H, W) are the spatial dimensions. Returns: torch.Tensor: a [N, 4] tensor with the boxes in (x0, y0, x1, y1) format.
Here is the function:
def masks_to_boxes(masks) -> torch.Tensor:
"""Compute the bounding boxes around the provided masks
The masks should be in format [N, H, W] where N is
the number of masks, (H, W) are the spatial dimensions.
Returns:
torch.Tensor: a [N, 4] tensor with
the boxes in (x0, y0, x1, y1) format.
"""
if masks.numel() == 0:
return torch.zeros((0, 4), device=masks.device)
h, w = masks.shape[-2:]
y = torch.arange(0, h, dtype=torch.float)
x = torch.arange(0, w, dtype=torch.float)
y, x = torch.meshgrid(y, x)
x_mask = masks * x.unsqueeze(0)
x_max = x_mask.flatten(1).max(-1)[0]
x_min = x_mask.masked_fill(~(masks.bool()), 1e8).flatten(1).min(-1)[0]
y_mask = masks * y.unsqueeze(0)
y_max = y_mask.flatten(1).max(-1)[0]
y_min = y_mask.masked_fill(~(masks.bool()), 1e8).flatten(1).min(-1)[0]
return torch.stack([x_min, y_min, x_max, y_max], 1) | Compute the bounding boxes around the provided masks The masks should be in format [N, H, W] where N is the number of masks, (H, W) are the spatial dimensions. Returns: torch.Tensor: a [N, 4] tensor with the boxes in (x0, y0, x1, y1) format. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.