Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- RAVE-main/annotator/mmpkg/mmcv/cnn/__init__.py +41 -0
- RAVE-main/annotator/mmpkg/mmcv/cnn/alexnet.py +61 -0
- RAVE-main/annotator/mmpkg/mmcv/cnn/bricks/__init__.py +35 -0
- RAVE-main/annotator/mmpkg/mmcv/cnn/bricks/activation.py +92 -0
- RAVE-main/annotator/mmpkg/mmcv/cnn/bricks/context_block.py +125 -0
- RAVE-main/annotator/mmpkg/mmcv/cnn/bricks/conv.py +44 -0
- RAVE-main/annotator/mmpkg/mmcv/cnn/bricks/conv2d_adaptive_padding.py +62 -0
- RAVE-main/annotator/mmpkg/mmcv/cnn/bricks/conv_module.py +206 -0
- RAVE-main/annotator/mmpkg/mmcv/cnn/bricks/depthwise_separable_conv_module.py +96 -0
- RAVE-main/annotator/mmpkg/mmcv/cnn/bricks/drop.py +65 -0
- RAVE-main/annotator/mmpkg/mmcv/cnn/bricks/generalized_attention.py +412 -0
- RAVE-main/annotator/mmpkg/mmcv/cnn/bricks/hsigmoid.py +34 -0
- RAVE-main/annotator/mmpkg/mmcv/cnn/bricks/hswish.py +29 -0
- RAVE-main/annotator/mmpkg/mmcv/cnn/bricks/non_local.py +306 -0
- RAVE-main/annotator/mmpkg/mmcv/cnn/bricks/norm.py +144 -0
- RAVE-main/annotator/mmpkg/mmcv/cnn/bricks/padding.py +36 -0
- RAVE-main/annotator/mmpkg/mmcv/cnn/bricks/plugin.py +88 -0
- RAVE-main/annotator/mmpkg/mmcv/cnn/bricks/registry.py +16 -0
- RAVE-main/annotator/mmpkg/mmcv/cnn/bricks/scale.py +21 -0
- RAVE-main/annotator/mmpkg/mmcv/cnn/bricks/swish.py +25 -0
- RAVE-main/annotator/mmpkg/mmcv/cnn/bricks/transformer.py +595 -0
- RAVE-main/annotator/mmpkg/mmcv/cnn/bricks/upsample.py +84 -0
- RAVE-main/annotator/mmpkg/mmcv/cnn/bricks/wrappers.py +180 -0
- RAVE-main/annotator/mmpkg/mmcv/cnn/builder.py +30 -0
- RAVE-main/annotator/mmpkg/mmcv/cnn/resnet.py +316 -0
- RAVE-main/annotator/mmpkg/mmcv/cnn/vgg.py +175 -0
- RAVE-main/annotator/mmpkg/mmcv/engine/test.py +202 -0
- RAVE-main/annotator/mmpkg/mmcv/model_zoo/deprecated.json +6 -0
- RAVE-main/annotator/mmpkg/mmcv/model_zoo/mmcls.json +31 -0
- RAVE-main/annotator/mmpkg/mmcv/model_zoo/open_mmlab.json +50 -0
- RAVE-main/annotator/mmpkg/mmcv/ops/__init__.py +81 -0
- RAVE-main/annotator/mmpkg/mmcv/ops/assign_score_withk.py +123 -0
- RAVE-main/annotator/mmpkg/mmcv/ops/border_align.py +109 -0
- RAVE-main/annotator/mmpkg/mmcv/ops/box_iou_rotated.py +45 -0
- RAVE-main/annotator/mmpkg/mmcv/ops/cc_attention.py +83 -0
- RAVE-main/annotator/mmpkg/mmcv/ops/contour_expand.py +49 -0
- RAVE-main/annotator/mmpkg/mmcv/ops/correlation.py +196 -0
- RAVE-main/annotator/mmpkg/mmcv/ops/focal_loss.py +212 -0
- RAVE-main/annotator/mmpkg/mmcv/ops/gather_points.py +57 -0
- RAVE-main/annotator/mmpkg/mmcv/ops/iou3d.py +85 -0
- RAVE-main/annotator/mmpkg/mmcv/ops/knn.py +77 -0
- RAVE-main/annotator/mmpkg/mmcv/ops/multi_scale_deform_attn.py +358 -0
- RAVE-main/annotator/mmpkg/mmcv/ops/pixel_group.py +75 -0
- RAVE-main/annotator/mmpkg/mmcv/ops/points_sampler.py +177 -0
- RAVE-main/annotator/mmpkg/mmcv/ops/scatter_points.py +135 -0
- RAVE-main/annotator/mmpkg/mmcv/ops/upfirdn2d.py +330 -0
- RAVE-main/annotator/mmpkg/mmcv/parallel/__init__.py +13 -0
- RAVE-main/annotator/mmpkg/mmcv/parallel/_functions.py +79 -0
- RAVE-main/annotator/mmpkg/mmcv/parallel/collate.py +84 -0
- RAVE-main/annotator/mmpkg/mmcv/parallel/data_parallel.py +89 -0
RAVE-main/annotator/mmpkg/mmcv/cnn/__init__.py
ADDED
|
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
from .alexnet import AlexNet
|
| 3 |
+
# yapf: disable
|
| 4 |
+
from .bricks import (ACTIVATION_LAYERS, CONV_LAYERS, NORM_LAYERS,
|
| 5 |
+
PADDING_LAYERS, PLUGIN_LAYERS, UPSAMPLE_LAYERS,
|
| 6 |
+
ContextBlock, Conv2d, Conv3d, ConvAWS2d, ConvModule,
|
| 7 |
+
ConvTranspose2d, ConvTranspose3d, ConvWS2d,
|
| 8 |
+
DepthwiseSeparableConvModule, GeneralizedAttention,
|
| 9 |
+
HSigmoid, HSwish, Linear, MaxPool2d, MaxPool3d,
|
| 10 |
+
NonLocal1d, NonLocal2d, NonLocal3d, Scale, Swish,
|
| 11 |
+
build_activation_layer, build_conv_layer,
|
| 12 |
+
build_norm_layer, build_padding_layer, build_plugin_layer,
|
| 13 |
+
build_upsample_layer, conv_ws_2d, is_norm)
|
| 14 |
+
from .builder import MODELS, build_model_from_cfg
|
| 15 |
+
# yapf: enable
|
| 16 |
+
from .resnet import ResNet, make_res_layer
|
| 17 |
+
from .utils import (INITIALIZERS, Caffe2XavierInit, ConstantInit, KaimingInit,
|
| 18 |
+
NormalInit, PretrainedInit, TruncNormalInit, UniformInit,
|
| 19 |
+
XavierInit, bias_init_with_prob, caffe2_xavier_init,
|
| 20 |
+
constant_init, fuse_conv_bn, get_model_complexity_info,
|
| 21 |
+
initialize, kaiming_init, normal_init, trunc_normal_init,
|
| 22 |
+
uniform_init, xavier_init)
|
| 23 |
+
from .vgg import VGG, make_vgg_layer
|
| 24 |
+
|
| 25 |
+
__all__ = [
|
| 26 |
+
'AlexNet', 'VGG', 'make_vgg_layer', 'ResNet', 'make_res_layer',
|
| 27 |
+
'constant_init', 'xavier_init', 'normal_init', 'trunc_normal_init',
|
| 28 |
+
'uniform_init', 'kaiming_init', 'caffe2_xavier_init',
|
| 29 |
+
'bias_init_with_prob', 'ConvModule', 'build_activation_layer',
|
| 30 |
+
'build_conv_layer', 'build_norm_layer', 'build_padding_layer',
|
| 31 |
+
'build_upsample_layer', 'build_plugin_layer', 'is_norm', 'NonLocal1d',
|
| 32 |
+
'NonLocal2d', 'NonLocal3d', 'ContextBlock', 'HSigmoid', 'Swish', 'HSwish',
|
| 33 |
+
'GeneralizedAttention', 'ACTIVATION_LAYERS', 'CONV_LAYERS', 'NORM_LAYERS',
|
| 34 |
+
'PADDING_LAYERS', 'UPSAMPLE_LAYERS', 'PLUGIN_LAYERS', 'Scale',
|
| 35 |
+
'get_model_complexity_info', 'conv_ws_2d', 'ConvAWS2d', 'ConvWS2d',
|
| 36 |
+
'fuse_conv_bn', 'DepthwiseSeparableConvModule', 'Linear', 'Conv2d',
|
| 37 |
+
'ConvTranspose2d', 'MaxPool2d', 'ConvTranspose3d', 'MaxPool3d', 'Conv3d',
|
| 38 |
+
'initialize', 'INITIALIZERS', 'ConstantInit', 'XavierInit', 'NormalInit',
|
| 39 |
+
'TruncNormalInit', 'UniformInit', 'KaimingInit', 'PretrainedInit',
|
| 40 |
+
'Caffe2XavierInit', 'MODELS', 'build_model_from_cfg'
|
| 41 |
+
]
|
RAVE-main/annotator/mmpkg/mmcv/cnn/alexnet.py
ADDED
|
@@ -0,0 +1,61 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
import logging
|
| 3 |
+
|
| 4 |
+
import torch.nn as nn
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
class AlexNet(nn.Module):
|
| 8 |
+
"""AlexNet backbone.
|
| 9 |
+
|
| 10 |
+
Args:
|
| 11 |
+
num_classes (int): number of classes for classification.
|
| 12 |
+
"""
|
| 13 |
+
|
| 14 |
+
def __init__(self, num_classes=-1):
|
| 15 |
+
super(AlexNet, self).__init__()
|
| 16 |
+
self.num_classes = num_classes
|
| 17 |
+
self.features = nn.Sequential(
|
| 18 |
+
nn.Conv2d(3, 64, kernel_size=11, stride=4, padding=2),
|
| 19 |
+
nn.ReLU(inplace=True),
|
| 20 |
+
nn.MaxPool2d(kernel_size=3, stride=2),
|
| 21 |
+
nn.Conv2d(64, 192, kernel_size=5, padding=2),
|
| 22 |
+
nn.ReLU(inplace=True),
|
| 23 |
+
nn.MaxPool2d(kernel_size=3, stride=2),
|
| 24 |
+
nn.Conv2d(192, 384, kernel_size=3, padding=1),
|
| 25 |
+
nn.ReLU(inplace=True),
|
| 26 |
+
nn.Conv2d(384, 256, kernel_size=3, padding=1),
|
| 27 |
+
nn.ReLU(inplace=True),
|
| 28 |
+
nn.Conv2d(256, 256, kernel_size=3, padding=1),
|
| 29 |
+
nn.ReLU(inplace=True),
|
| 30 |
+
nn.MaxPool2d(kernel_size=3, stride=2),
|
| 31 |
+
)
|
| 32 |
+
if self.num_classes > 0:
|
| 33 |
+
self.classifier = nn.Sequential(
|
| 34 |
+
nn.Dropout(),
|
| 35 |
+
nn.Linear(256 * 6 * 6, 4096),
|
| 36 |
+
nn.ReLU(inplace=True),
|
| 37 |
+
nn.Dropout(),
|
| 38 |
+
nn.Linear(4096, 4096),
|
| 39 |
+
nn.ReLU(inplace=True),
|
| 40 |
+
nn.Linear(4096, num_classes),
|
| 41 |
+
)
|
| 42 |
+
|
| 43 |
+
def init_weights(self, pretrained=None):
|
| 44 |
+
if isinstance(pretrained, str):
|
| 45 |
+
logger = logging.getLogger()
|
| 46 |
+
from ..runner import load_checkpoint
|
| 47 |
+
load_checkpoint(self, pretrained, strict=False, logger=logger)
|
| 48 |
+
elif pretrained is None:
|
| 49 |
+
# use default initializer
|
| 50 |
+
pass
|
| 51 |
+
else:
|
| 52 |
+
raise TypeError('pretrained must be a str or None')
|
| 53 |
+
|
| 54 |
+
def forward(self, x):
|
| 55 |
+
|
| 56 |
+
x = self.features(x)
|
| 57 |
+
if self.num_classes > 0:
|
| 58 |
+
x = x.view(x.size(0), 256 * 6 * 6)
|
| 59 |
+
x = self.classifier(x)
|
| 60 |
+
|
| 61 |
+
return x
|
RAVE-main/annotator/mmpkg/mmcv/cnn/bricks/__init__.py
ADDED
|
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
from .activation import build_activation_layer
|
| 3 |
+
from .context_block import ContextBlock
|
| 4 |
+
from .conv import build_conv_layer
|
| 5 |
+
from .conv2d_adaptive_padding import Conv2dAdaptivePadding
|
| 6 |
+
from .conv_module import ConvModule
|
| 7 |
+
from .conv_ws import ConvAWS2d, ConvWS2d, conv_ws_2d
|
| 8 |
+
from .depthwise_separable_conv_module import DepthwiseSeparableConvModule
|
| 9 |
+
from .drop import Dropout, DropPath
|
| 10 |
+
from .generalized_attention import GeneralizedAttention
|
| 11 |
+
from .hsigmoid import HSigmoid
|
| 12 |
+
from .hswish import HSwish
|
| 13 |
+
from .non_local import NonLocal1d, NonLocal2d, NonLocal3d
|
| 14 |
+
from .norm import build_norm_layer, is_norm
|
| 15 |
+
from .padding import build_padding_layer
|
| 16 |
+
from .plugin import build_plugin_layer
|
| 17 |
+
from .registry import (ACTIVATION_LAYERS, CONV_LAYERS, NORM_LAYERS,
|
| 18 |
+
PADDING_LAYERS, PLUGIN_LAYERS, UPSAMPLE_LAYERS)
|
| 19 |
+
from .scale import Scale
|
| 20 |
+
from .swish import Swish
|
| 21 |
+
from .upsample import build_upsample_layer
|
| 22 |
+
from .wrappers import (Conv2d, Conv3d, ConvTranspose2d, ConvTranspose3d,
|
| 23 |
+
Linear, MaxPool2d, MaxPool3d)
|
| 24 |
+
|
| 25 |
+
__all__ = [
|
| 26 |
+
'ConvModule', 'build_activation_layer', 'build_conv_layer',
|
| 27 |
+
'build_norm_layer', 'build_padding_layer', 'build_upsample_layer',
|
| 28 |
+
'build_plugin_layer', 'is_norm', 'HSigmoid', 'HSwish', 'NonLocal1d',
|
| 29 |
+
'NonLocal2d', 'NonLocal3d', 'ContextBlock', 'GeneralizedAttention',
|
| 30 |
+
'ACTIVATION_LAYERS', 'CONV_LAYERS', 'NORM_LAYERS', 'PADDING_LAYERS',
|
| 31 |
+
'UPSAMPLE_LAYERS', 'PLUGIN_LAYERS', 'Scale', 'ConvAWS2d', 'ConvWS2d',
|
| 32 |
+
'conv_ws_2d', 'DepthwiseSeparableConvModule', 'Swish', 'Linear',
|
| 33 |
+
'Conv2dAdaptivePadding', 'Conv2d', 'ConvTranspose2d', 'MaxPool2d',
|
| 34 |
+
'ConvTranspose3d', 'MaxPool3d', 'Conv3d', 'Dropout', 'DropPath'
|
| 35 |
+
]
|
RAVE-main/annotator/mmpkg/mmcv/cnn/bricks/activation.py
ADDED
|
@@ -0,0 +1,92 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
import torch
|
| 3 |
+
import torch.nn as nn
|
| 4 |
+
import torch.nn.functional as F
|
| 5 |
+
|
| 6 |
+
from annotator.mmpkg.mmcv.utils import TORCH_VERSION, build_from_cfg, digit_version
|
| 7 |
+
from .registry import ACTIVATION_LAYERS
|
| 8 |
+
|
| 9 |
+
for module in [
|
| 10 |
+
nn.ReLU, nn.LeakyReLU, nn.PReLU, nn.RReLU, nn.ReLU6, nn.ELU,
|
| 11 |
+
nn.Sigmoid, nn.Tanh
|
| 12 |
+
]:
|
| 13 |
+
ACTIVATION_LAYERS.register_module(module=module)
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
@ACTIVATION_LAYERS.register_module(name='Clip')
|
| 17 |
+
@ACTIVATION_LAYERS.register_module()
|
| 18 |
+
class Clamp(nn.Module):
|
| 19 |
+
"""Clamp activation layer.
|
| 20 |
+
|
| 21 |
+
This activation function is to clamp the feature map value within
|
| 22 |
+
:math:`[min, max]`. More details can be found in ``torch.clamp()``.
|
| 23 |
+
|
| 24 |
+
Args:
|
| 25 |
+
min (Number | optional): Lower-bound of the range to be clamped to.
|
| 26 |
+
Default to -1.
|
| 27 |
+
max (Number | optional): Upper-bound of the range to be clamped to.
|
| 28 |
+
Default to 1.
|
| 29 |
+
"""
|
| 30 |
+
|
| 31 |
+
def __init__(self, min=-1., max=1.):
|
| 32 |
+
super(Clamp, self).__init__()
|
| 33 |
+
self.min = min
|
| 34 |
+
self.max = max
|
| 35 |
+
|
| 36 |
+
def forward(self, x):
|
| 37 |
+
"""Forward function.
|
| 38 |
+
|
| 39 |
+
Args:
|
| 40 |
+
x (torch.Tensor): The input tensor.
|
| 41 |
+
|
| 42 |
+
Returns:
|
| 43 |
+
torch.Tensor: Clamped tensor.
|
| 44 |
+
"""
|
| 45 |
+
return torch.clamp(x, min=self.min, max=self.max)
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
class GELU(nn.Module):
|
| 49 |
+
r"""Applies the Gaussian Error Linear Units function:
|
| 50 |
+
|
| 51 |
+
.. math::
|
| 52 |
+
\text{GELU}(x) = x * \Phi(x)
|
| 53 |
+
where :math:`\Phi(x)` is the Cumulative Distribution Function for
|
| 54 |
+
Gaussian Distribution.
|
| 55 |
+
|
| 56 |
+
Shape:
|
| 57 |
+
- Input: :math:`(N, *)` where `*` means, any number of additional
|
| 58 |
+
dimensions
|
| 59 |
+
- Output: :math:`(N, *)`, same shape as the input
|
| 60 |
+
|
| 61 |
+
.. image:: scripts/activation_images/GELU.png
|
| 62 |
+
|
| 63 |
+
Examples::
|
| 64 |
+
|
| 65 |
+
>>> m = nn.GELU()
|
| 66 |
+
>>> input = torch.randn(2)
|
| 67 |
+
>>> output = m(input)
|
| 68 |
+
"""
|
| 69 |
+
|
| 70 |
+
def forward(self, input):
|
| 71 |
+
return F.gelu(input)
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
if (TORCH_VERSION == 'parrots'
|
| 75 |
+
or digit_version(TORCH_VERSION) < digit_version('1.4')):
|
| 76 |
+
ACTIVATION_LAYERS.register_module(module=GELU)
|
| 77 |
+
else:
|
| 78 |
+
ACTIVATION_LAYERS.register_module(module=nn.GELU)
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
def build_activation_layer(cfg):
|
| 82 |
+
"""Build activation layer.
|
| 83 |
+
|
| 84 |
+
Args:
|
| 85 |
+
cfg (dict): The activation layer config, which should contain:
|
| 86 |
+
- type (str): Layer type.
|
| 87 |
+
- layer args: Args needed to instantiate an activation layer.
|
| 88 |
+
|
| 89 |
+
Returns:
|
| 90 |
+
nn.Module: Created activation layer.
|
| 91 |
+
"""
|
| 92 |
+
return build_from_cfg(cfg, ACTIVATION_LAYERS)
|
RAVE-main/annotator/mmpkg/mmcv/cnn/bricks/context_block.py
ADDED
|
@@ -0,0 +1,125 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
import torch
|
| 3 |
+
from torch import nn
|
| 4 |
+
|
| 5 |
+
from ..utils import constant_init, kaiming_init
|
| 6 |
+
from .registry import PLUGIN_LAYERS
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
def last_zero_init(m):
|
| 10 |
+
if isinstance(m, nn.Sequential):
|
| 11 |
+
constant_init(m[-1], val=0)
|
| 12 |
+
else:
|
| 13 |
+
constant_init(m, val=0)
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
@PLUGIN_LAYERS.register_module()
|
| 17 |
+
class ContextBlock(nn.Module):
|
| 18 |
+
"""ContextBlock module in GCNet.
|
| 19 |
+
|
| 20 |
+
See 'GCNet: Non-local Networks Meet Squeeze-Excitation Networks and Beyond'
|
| 21 |
+
(https://arxiv.org/abs/1904.11492) for details.
|
| 22 |
+
|
| 23 |
+
Args:
|
| 24 |
+
in_channels (int): Channels of the input feature map.
|
| 25 |
+
ratio (float): Ratio of channels of transform bottleneck
|
| 26 |
+
pooling_type (str): Pooling method for context modeling.
|
| 27 |
+
Options are 'att' and 'avg', stand for attention pooling and
|
| 28 |
+
average pooling respectively. Default: 'att'.
|
| 29 |
+
fusion_types (Sequence[str]): Fusion method for feature fusion,
|
| 30 |
+
Options are 'channels_add', 'channel_mul', stand for channelwise
|
| 31 |
+
addition and multiplication respectively. Default: ('channel_add',)
|
| 32 |
+
"""
|
| 33 |
+
|
| 34 |
+
_abbr_ = 'context_block'
|
| 35 |
+
|
| 36 |
+
def __init__(self,
|
| 37 |
+
in_channels,
|
| 38 |
+
ratio,
|
| 39 |
+
pooling_type='att',
|
| 40 |
+
fusion_types=('channel_add', )):
|
| 41 |
+
super(ContextBlock, self).__init__()
|
| 42 |
+
assert pooling_type in ['avg', 'att']
|
| 43 |
+
assert isinstance(fusion_types, (list, tuple))
|
| 44 |
+
valid_fusion_types = ['channel_add', 'channel_mul']
|
| 45 |
+
assert all([f in valid_fusion_types for f in fusion_types])
|
| 46 |
+
assert len(fusion_types) > 0, 'at least one fusion should be used'
|
| 47 |
+
self.in_channels = in_channels
|
| 48 |
+
self.ratio = ratio
|
| 49 |
+
self.planes = int(in_channels * ratio)
|
| 50 |
+
self.pooling_type = pooling_type
|
| 51 |
+
self.fusion_types = fusion_types
|
| 52 |
+
if pooling_type == 'att':
|
| 53 |
+
self.conv_mask = nn.Conv2d(in_channels, 1, kernel_size=1)
|
| 54 |
+
self.softmax = nn.Softmax(dim=2)
|
| 55 |
+
else:
|
| 56 |
+
self.avg_pool = nn.AdaptiveAvgPool2d(1)
|
| 57 |
+
if 'channel_add' in fusion_types:
|
| 58 |
+
self.channel_add_conv = nn.Sequential(
|
| 59 |
+
nn.Conv2d(self.in_channels, self.planes, kernel_size=1),
|
| 60 |
+
nn.LayerNorm([self.planes, 1, 1]),
|
| 61 |
+
nn.ReLU(inplace=True), # yapf: disable
|
| 62 |
+
nn.Conv2d(self.planes, self.in_channels, kernel_size=1))
|
| 63 |
+
else:
|
| 64 |
+
self.channel_add_conv = None
|
| 65 |
+
if 'channel_mul' in fusion_types:
|
| 66 |
+
self.channel_mul_conv = nn.Sequential(
|
| 67 |
+
nn.Conv2d(self.in_channels, self.planes, kernel_size=1),
|
| 68 |
+
nn.LayerNorm([self.planes, 1, 1]),
|
| 69 |
+
nn.ReLU(inplace=True), # yapf: disable
|
| 70 |
+
nn.Conv2d(self.planes, self.in_channels, kernel_size=1))
|
| 71 |
+
else:
|
| 72 |
+
self.channel_mul_conv = None
|
| 73 |
+
self.reset_parameters()
|
| 74 |
+
|
| 75 |
+
def reset_parameters(self):
|
| 76 |
+
if self.pooling_type == 'att':
|
| 77 |
+
kaiming_init(self.conv_mask, mode='fan_in')
|
| 78 |
+
self.conv_mask.inited = True
|
| 79 |
+
|
| 80 |
+
if self.channel_add_conv is not None:
|
| 81 |
+
last_zero_init(self.channel_add_conv)
|
| 82 |
+
if self.channel_mul_conv is not None:
|
| 83 |
+
last_zero_init(self.channel_mul_conv)
|
| 84 |
+
|
| 85 |
+
def spatial_pool(self, x):
|
| 86 |
+
batch, channel, height, width = x.size()
|
| 87 |
+
if self.pooling_type == 'att':
|
| 88 |
+
input_x = x
|
| 89 |
+
# [N, C, H * W]
|
| 90 |
+
input_x = input_x.view(batch, channel, height * width)
|
| 91 |
+
# [N, 1, C, H * W]
|
| 92 |
+
input_x = input_x.unsqueeze(1)
|
| 93 |
+
# [N, 1, H, W]
|
| 94 |
+
context_mask = self.conv_mask(x)
|
| 95 |
+
# [N, 1, H * W]
|
| 96 |
+
context_mask = context_mask.view(batch, 1, height * width)
|
| 97 |
+
# [N, 1, H * W]
|
| 98 |
+
context_mask = self.softmax(context_mask)
|
| 99 |
+
# [N, 1, H * W, 1]
|
| 100 |
+
context_mask = context_mask.unsqueeze(-1)
|
| 101 |
+
# [N, 1, C, 1]
|
| 102 |
+
context = torch.matmul(input_x, context_mask)
|
| 103 |
+
# [N, C, 1, 1]
|
| 104 |
+
context = context.view(batch, channel, 1, 1)
|
| 105 |
+
else:
|
| 106 |
+
# [N, C, 1, 1]
|
| 107 |
+
context = self.avg_pool(x)
|
| 108 |
+
|
| 109 |
+
return context
|
| 110 |
+
|
| 111 |
+
def forward(self, x):
|
| 112 |
+
# [N, C, 1, 1]
|
| 113 |
+
context = self.spatial_pool(x)
|
| 114 |
+
|
| 115 |
+
out = x
|
| 116 |
+
if self.channel_mul_conv is not None:
|
| 117 |
+
# [N, C, 1, 1]
|
| 118 |
+
channel_mul_term = torch.sigmoid(self.channel_mul_conv(context))
|
| 119 |
+
out = out * channel_mul_term
|
| 120 |
+
if self.channel_add_conv is not None:
|
| 121 |
+
# [N, C, 1, 1]
|
| 122 |
+
channel_add_term = self.channel_add_conv(context)
|
| 123 |
+
out = out + channel_add_term
|
| 124 |
+
|
| 125 |
+
return out
|
RAVE-main/annotator/mmpkg/mmcv/cnn/bricks/conv.py
ADDED
|
@@ -0,0 +1,44 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
from torch import nn
|
| 3 |
+
|
| 4 |
+
from .registry import CONV_LAYERS
|
| 5 |
+
|
| 6 |
+
CONV_LAYERS.register_module('Conv1d', module=nn.Conv1d)
|
| 7 |
+
CONV_LAYERS.register_module('Conv2d', module=nn.Conv2d)
|
| 8 |
+
CONV_LAYERS.register_module('Conv3d', module=nn.Conv3d)
|
| 9 |
+
CONV_LAYERS.register_module('Conv', module=nn.Conv2d)
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
def build_conv_layer(cfg, *args, **kwargs):
|
| 13 |
+
"""Build convolution layer.
|
| 14 |
+
|
| 15 |
+
Args:
|
| 16 |
+
cfg (None or dict): The conv layer config, which should contain:
|
| 17 |
+
- type (str): Layer type.
|
| 18 |
+
- layer args: Args needed to instantiate an conv layer.
|
| 19 |
+
args (argument list): Arguments passed to the `__init__`
|
| 20 |
+
method of the corresponding conv layer.
|
| 21 |
+
kwargs (keyword arguments): Keyword arguments passed to the `__init__`
|
| 22 |
+
method of the corresponding conv layer.
|
| 23 |
+
|
| 24 |
+
Returns:
|
| 25 |
+
nn.Module: Created conv layer.
|
| 26 |
+
"""
|
| 27 |
+
if cfg is None:
|
| 28 |
+
cfg_ = dict(type='Conv2d')
|
| 29 |
+
else:
|
| 30 |
+
if not isinstance(cfg, dict):
|
| 31 |
+
raise TypeError('cfg must be a dict')
|
| 32 |
+
if 'type' not in cfg:
|
| 33 |
+
raise KeyError('the cfg dict must contain the key "type"')
|
| 34 |
+
cfg_ = cfg.copy()
|
| 35 |
+
|
| 36 |
+
layer_type = cfg_.pop('type')
|
| 37 |
+
if layer_type not in CONV_LAYERS:
|
| 38 |
+
raise KeyError(f'Unrecognized norm type {layer_type}')
|
| 39 |
+
else:
|
| 40 |
+
conv_layer = CONV_LAYERS.get(layer_type)
|
| 41 |
+
|
| 42 |
+
layer = conv_layer(*args, **kwargs, **cfg_)
|
| 43 |
+
|
| 44 |
+
return layer
|
RAVE-main/annotator/mmpkg/mmcv/cnn/bricks/conv2d_adaptive_padding.py
ADDED
|
@@ -0,0 +1,62 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
import math
|
| 3 |
+
|
| 4 |
+
from torch import nn
|
| 5 |
+
from torch.nn import functional as F
|
| 6 |
+
|
| 7 |
+
from .registry import CONV_LAYERS
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
@CONV_LAYERS.register_module()
|
| 11 |
+
class Conv2dAdaptivePadding(nn.Conv2d):
|
| 12 |
+
"""Implementation of 2D convolution in tensorflow with `padding` as "same",
|
| 13 |
+
which applies padding to input (if needed) so that input image gets fully
|
| 14 |
+
covered by filter and stride you specified. For stride 1, this will ensure
|
| 15 |
+
that output image size is same as input. For stride of 2, output dimensions
|
| 16 |
+
will be half, for example.
|
| 17 |
+
|
| 18 |
+
Args:
|
| 19 |
+
in_channels (int): Number of channels in the input image
|
| 20 |
+
out_channels (int): Number of channels produced by the convolution
|
| 21 |
+
kernel_size (int or tuple): Size of the convolving kernel
|
| 22 |
+
stride (int or tuple, optional): Stride of the convolution. Default: 1
|
| 23 |
+
padding (int or tuple, optional): Zero-padding added to both sides of
|
| 24 |
+
the input. Default: 0
|
| 25 |
+
dilation (int or tuple, optional): Spacing between kernel elements.
|
| 26 |
+
Default: 1
|
| 27 |
+
groups (int, optional): Number of blocked connections from input
|
| 28 |
+
channels to output channels. Default: 1
|
| 29 |
+
bias (bool, optional): If ``True``, adds a learnable bias to the
|
| 30 |
+
output. Default: ``True``
|
| 31 |
+
"""
|
| 32 |
+
|
| 33 |
+
def __init__(self,
|
| 34 |
+
in_channels,
|
| 35 |
+
out_channels,
|
| 36 |
+
kernel_size,
|
| 37 |
+
stride=1,
|
| 38 |
+
padding=0,
|
| 39 |
+
dilation=1,
|
| 40 |
+
groups=1,
|
| 41 |
+
bias=True):
|
| 42 |
+
super().__init__(in_channels, out_channels, kernel_size, stride, 0,
|
| 43 |
+
dilation, groups, bias)
|
| 44 |
+
|
| 45 |
+
def forward(self, x):
|
| 46 |
+
img_h, img_w = x.size()[-2:]
|
| 47 |
+
kernel_h, kernel_w = self.weight.size()[-2:]
|
| 48 |
+
stride_h, stride_w = self.stride
|
| 49 |
+
output_h = math.ceil(img_h / stride_h)
|
| 50 |
+
output_w = math.ceil(img_w / stride_w)
|
| 51 |
+
pad_h = (
|
| 52 |
+
max((output_h - 1) * self.stride[0] +
|
| 53 |
+
(kernel_h - 1) * self.dilation[0] + 1 - img_h, 0))
|
| 54 |
+
pad_w = (
|
| 55 |
+
max((output_w - 1) * self.stride[1] +
|
| 56 |
+
(kernel_w - 1) * self.dilation[1] + 1 - img_w, 0))
|
| 57 |
+
if pad_h > 0 or pad_w > 0:
|
| 58 |
+
x = F.pad(x, [
|
| 59 |
+
pad_w // 2, pad_w - pad_w // 2, pad_h // 2, pad_h - pad_h // 2
|
| 60 |
+
])
|
| 61 |
+
return F.conv2d(x, self.weight, self.bias, self.stride, self.padding,
|
| 62 |
+
self.dilation, self.groups)
|
RAVE-main/annotator/mmpkg/mmcv/cnn/bricks/conv_module.py
ADDED
|
@@ -0,0 +1,206 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
import warnings
|
| 3 |
+
|
| 4 |
+
import torch.nn as nn
|
| 5 |
+
|
| 6 |
+
from annotator.mmpkg.mmcv.utils import _BatchNorm, _InstanceNorm
|
| 7 |
+
from ..utils import constant_init, kaiming_init
|
| 8 |
+
from .activation import build_activation_layer
|
| 9 |
+
from .conv import build_conv_layer
|
| 10 |
+
from .norm import build_norm_layer
|
| 11 |
+
from .padding import build_padding_layer
|
| 12 |
+
from .registry import PLUGIN_LAYERS
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
@PLUGIN_LAYERS.register_module()
|
| 16 |
+
class ConvModule(nn.Module):
|
| 17 |
+
"""A conv block that bundles conv/norm/activation layers.
|
| 18 |
+
|
| 19 |
+
This block simplifies the usage of convolution layers, which are commonly
|
| 20 |
+
used with a norm layer (e.g., BatchNorm) and activation layer (e.g., ReLU).
|
| 21 |
+
It is based upon three build methods: `build_conv_layer()`,
|
| 22 |
+
`build_norm_layer()` and `build_activation_layer()`.
|
| 23 |
+
|
| 24 |
+
Besides, we add some additional features in this module.
|
| 25 |
+
1. Automatically set `bias` of the conv layer.
|
| 26 |
+
2. Spectral norm is supported.
|
| 27 |
+
3. More padding modes are supported. Before PyTorch 1.5, nn.Conv2d only
|
| 28 |
+
supports zero and circular padding, and we add "reflect" padding mode.
|
| 29 |
+
|
| 30 |
+
Args:
|
| 31 |
+
in_channels (int): Number of channels in the input feature map.
|
| 32 |
+
Same as that in ``nn._ConvNd``.
|
| 33 |
+
out_channels (int): Number of channels produced by the convolution.
|
| 34 |
+
Same as that in ``nn._ConvNd``.
|
| 35 |
+
kernel_size (int | tuple[int]): Size of the convolving kernel.
|
| 36 |
+
Same as that in ``nn._ConvNd``.
|
| 37 |
+
stride (int | tuple[int]): Stride of the convolution.
|
| 38 |
+
Same as that in ``nn._ConvNd``.
|
| 39 |
+
padding (int | tuple[int]): Zero-padding added to both sides of
|
| 40 |
+
the input. Same as that in ``nn._ConvNd``.
|
| 41 |
+
dilation (int | tuple[int]): Spacing between kernel elements.
|
| 42 |
+
Same as that in ``nn._ConvNd``.
|
| 43 |
+
groups (int): Number of blocked connections from input channels to
|
| 44 |
+
output channels. Same as that in ``nn._ConvNd``.
|
| 45 |
+
bias (bool | str): If specified as `auto`, it will be decided by the
|
| 46 |
+
norm_cfg. Bias will be set as True if `norm_cfg` is None, otherwise
|
| 47 |
+
False. Default: "auto".
|
| 48 |
+
conv_cfg (dict): Config dict for convolution layer. Default: None,
|
| 49 |
+
which means using conv2d.
|
| 50 |
+
norm_cfg (dict): Config dict for normalization layer. Default: None.
|
| 51 |
+
act_cfg (dict): Config dict for activation layer.
|
| 52 |
+
Default: dict(type='ReLU').
|
| 53 |
+
inplace (bool): Whether to use inplace mode for activation.
|
| 54 |
+
Default: True.
|
| 55 |
+
with_spectral_norm (bool): Whether use spectral norm in conv module.
|
| 56 |
+
Default: False.
|
| 57 |
+
padding_mode (str): If the `padding_mode` has not been supported by
|
| 58 |
+
current `Conv2d` in PyTorch, we will use our own padding layer
|
| 59 |
+
instead. Currently, we support ['zeros', 'circular'] with official
|
| 60 |
+
implementation and ['reflect'] with our own implementation.
|
| 61 |
+
Default: 'zeros'.
|
| 62 |
+
order (tuple[str]): The order of conv/norm/activation layers. It is a
|
| 63 |
+
sequence of "conv", "norm" and "act". Common examples are
|
| 64 |
+
("conv", "norm", "act") and ("act", "conv", "norm").
|
| 65 |
+
Default: ('conv', 'norm', 'act').
|
| 66 |
+
"""
|
| 67 |
+
|
| 68 |
+
_abbr_ = 'conv_block'
|
| 69 |
+
|
| 70 |
+
def __init__(self,
|
| 71 |
+
in_channels,
|
| 72 |
+
out_channels,
|
| 73 |
+
kernel_size,
|
| 74 |
+
stride=1,
|
| 75 |
+
padding=0,
|
| 76 |
+
dilation=1,
|
| 77 |
+
groups=1,
|
| 78 |
+
bias='auto',
|
| 79 |
+
conv_cfg=None,
|
| 80 |
+
norm_cfg=None,
|
| 81 |
+
act_cfg=dict(type='ReLU'),
|
| 82 |
+
inplace=True,
|
| 83 |
+
with_spectral_norm=False,
|
| 84 |
+
padding_mode='zeros',
|
| 85 |
+
order=('conv', 'norm', 'act')):
|
| 86 |
+
super(ConvModule, self).__init__()
|
| 87 |
+
assert conv_cfg is None or isinstance(conv_cfg, dict)
|
| 88 |
+
assert norm_cfg is None or isinstance(norm_cfg, dict)
|
| 89 |
+
assert act_cfg is None or isinstance(act_cfg, dict)
|
| 90 |
+
official_padding_mode = ['zeros', 'circular']
|
| 91 |
+
self.conv_cfg = conv_cfg
|
| 92 |
+
self.norm_cfg = norm_cfg
|
| 93 |
+
self.act_cfg = act_cfg
|
| 94 |
+
self.inplace = inplace
|
| 95 |
+
self.with_spectral_norm = with_spectral_norm
|
| 96 |
+
self.with_explicit_padding = padding_mode not in official_padding_mode
|
| 97 |
+
self.order = order
|
| 98 |
+
assert isinstance(self.order, tuple) and len(self.order) == 3
|
| 99 |
+
assert set(order) == set(['conv', 'norm', 'act'])
|
| 100 |
+
|
| 101 |
+
self.with_norm = norm_cfg is not None
|
| 102 |
+
self.with_activation = act_cfg is not None
|
| 103 |
+
# if the conv layer is before a norm layer, bias is unnecessary.
|
| 104 |
+
if bias == 'auto':
|
| 105 |
+
bias = not self.with_norm
|
| 106 |
+
self.with_bias = bias
|
| 107 |
+
|
| 108 |
+
if self.with_explicit_padding:
|
| 109 |
+
pad_cfg = dict(type=padding_mode)
|
| 110 |
+
self.padding_layer = build_padding_layer(pad_cfg, padding)
|
| 111 |
+
|
| 112 |
+
# reset padding to 0 for conv module
|
| 113 |
+
conv_padding = 0 if self.with_explicit_padding else padding
|
| 114 |
+
# build convolution layer
|
| 115 |
+
self.conv = build_conv_layer(
|
| 116 |
+
conv_cfg,
|
| 117 |
+
in_channels,
|
| 118 |
+
out_channels,
|
| 119 |
+
kernel_size,
|
| 120 |
+
stride=stride,
|
| 121 |
+
padding=conv_padding,
|
| 122 |
+
dilation=dilation,
|
| 123 |
+
groups=groups,
|
| 124 |
+
bias=bias)
|
| 125 |
+
# export the attributes of self.conv to a higher level for convenience
|
| 126 |
+
self.in_channels = self.conv.in_channels
|
| 127 |
+
self.out_channels = self.conv.out_channels
|
| 128 |
+
self.kernel_size = self.conv.kernel_size
|
| 129 |
+
self.stride = self.conv.stride
|
| 130 |
+
self.padding = padding
|
| 131 |
+
self.dilation = self.conv.dilation
|
| 132 |
+
self.transposed = self.conv.transposed
|
| 133 |
+
self.output_padding = self.conv.output_padding
|
| 134 |
+
self.groups = self.conv.groups
|
| 135 |
+
|
| 136 |
+
if self.with_spectral_norm:
|
| 137 |
+
self.conv = nn.utils.spectral_norm(self.conv)
|
| 138 |
+
|
| 139 |
+
# build normalization layers
|
| 140 |
+
if self.with_norm:
|
| 141 |
+
# norm layer is after conv layer
|
| 142 |
+
if order.index('norm') > order.index('conv'):
|
| 143 |
+
norm_channels = out_channels
|
| 144 |
+
else:
|
| 145 |
+
norm_channels = in_channels
|
| 146 |
+
self.norm_name, norm = build_norm_layer(norm_cfg, norm_channels)
|
| 147 |
+
self.add_module(self.norm_name, norm)
|
| 148 |
+
if self.with_bias:
|
| 149 |
+
if isinstance(norm, (_BatchNorm, _InstanceNorm)):
|
| 150 |
+
warnings.warn(
|
| 151 |
+
'Unnecessary conv bias before batch/instance norm')
|
| 152 |
+
else:
|
| 153 |
+
self.norm_name = None
|
| 154 |
+
|
| 155 |
+
# build activation layer
|
| 156 |
+
if self.with_activation:
|
| 157 |
+
act_cfg_ = act_cfg.copy()
|
| 158 |
+
# nn.Tanh has no 'inplace' argument
|
| 159 |
+
if act_cfg_['type'] not in [
|
| 160 |
+
'Tanh', 'PReLU', 'Sigmoid', 'HSigmoid', 'Swish'
|
| 161 |
+
]:
|
| 162 |
+
act_cfg_.setdefault('inplace', inplace)
|
| 163 |
+
self.activate = build_activation_layer(act_cfg_)
|
| 164 |
+
|
| 165 |
+
# Use msra init by default
|
| 166 |
+
self.init_weights()
|
| 167 |
+
|
| 168 |
+
@property
|
| 169 |
+
def norm(self):
|
| 170 |
+
if self.norm_name:
|
| 171 |
+
return getattr(self, self.norm_name)
|
| 172 |
+
else:
|
| 173 |
+
return None
|
| 174 |
+
|
| 175 |
+
def init_weights(self):
|
| 176 |
+
# 1. It is mainly for customized conv layers with their own
|
| 177 |
+
# initialization manners by calling their own ``init_weights()``,
|
| 178 |
+
# and we do not want ConvModule to override the initialization.
|
| 179 |
+
# 2. For customized conv layers without their own initialization
|
| 180 |
+
# manners (that is, they don't have their own ``init_weights()``)
|
| 181 |
+
# and PyTorch's conv layers, they will be initialized by
|
| 182 |
+
# this method with default ``kaiming_init``.
|
| 183 |
+
# Note: For PyTorch's conv layers, they will be overwritten by our
|
| 184 |
+
# initialization implementation using default ``kaiming_init``.
|
| 185 |
+
if not hasattr(self.conv, 'init_weights'):
|
| 186 |
+
if self.with_activation and self.act_cfg['type'] == 'LeakyReLU':
|
| 187 |
+
nonlinearity = 'leaky_relu'
|
| 188 |
+
a = self.act_cfg.get('negative_slope', 0.01)
|
| 189 |
+
else:
|
| 190 |
+
nonlinearity = 'relu'
|
| 191 |
+
a = 0
|
| 192 |
+
kaiming_init(self.conv, a=a, nonlinearity=nonlinearity)
|
| 193 |
+
if self.with_norm:
|
| 194 |
+
constant_init(self.norm, 1, bias=0)
|
| 195 |
+
|
| 196 |
+
def forward(self, x, activate=True, norm=True):
|
| 197 |
+
for layer in self.order:
|
| 198 |
+
if layer == 'conv':
|
| 199 |
+
if self.with_explicit_padding:
|
| 200 |
+
x = self.padding_layer(x)
|
| 201 |
+
x = self.conv(x)
|
| 202 |
+
elif layer == 'norm' and norm and self.with_norm:
|
| 203 |
+
x = self.norm(x)
|
| 204 |
+
elif layer == 'act' and activate and self.with_activation:
|
| 205 |
+
x = self.activate(x)
|
| 206 |
+
return x
|
RAVE-main/annotator/mmpkg/mmcv/cnn/bricks/depthwise_separable_conv_module.py
ADDED
|
@@ -0,0 +1,96 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
import torch.nn as nn
|
| 3 |
+
|
| 4 |
+
from .conv_module import ConvModule
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
class DepthwiseSeparableConvModule(nn.Module):
|
| 8 |
+
"""Depthwise separable convolution module.
|
| 9 |
+
|
| 10 |
+
See https://arxiv.org/pdf/1704.04861.pdf for details.
|
| 11 |
+
|
| 12 |
+
This module can replace a ConvModule with the conv block replaced by two
|
| 13 |
+
conv block: depthwise conv block and pointwise conv block. The depthwise
|
| 14 |
+
conv block contains depthwise-conv/norm/activation layers. The pointwise
|
| 15 |
+
conv block contains pointwise-conv/norm/activation layers. It should be
|
| 16 |
+
noted that there will be norm/activation layer in the depthwise conv block
|
| 17 |
+
if `norm_cfg` and `act_cfg` are specified.
|
| 18 |
+
|
| 19 |
+
Args:
|
| 20 |
+
in_channels (int): Number of channels in the input feature map.
|
| 21 |
+
Same as that in ``nn._ConvNd``.
|
| 22 |
+
out_channels (int): Number of channels produced by the convolution.
|
| 23 |
+
Same as that in ``nn._ConvNd``.
|
| 24 |
+
kernel_size (int | tuple[int]): Size of the convolving kernel.
|
| 25 |
+
Same as that in ``nn._ConvNd``.
|
| 26 |
+
stride (int | tuple[int]): Stride of the convolution.
|
| 27 |
+
Same as that in ``nn._ConvNd``. Default: 1.
|
| 28 |
+
padding (int | tuple[int]): Zero-padding added to both sides of
|
| 29 |
+
the input. Same as that in ``nn._ConvNd``. Default: 0.
|
| 30 |
+
dilation (int | tuple[int]): Spacing between kernel elements.
|
| 31 |
+
Same as that in ``nn._ConvNd``. Default: 1.
|
| 32 |
+
norm_cfg (dict): Default norm config for both depthwise ConvModule and
|
| 33 |
+
pointwise ConvModule. Default: None.
|
| 34 |
+
act_cfg (dict): Default activation config for both depthwise ConvModule
|
| 35 |
+
and pointwise ConvModule. Default: dict(type='ReLU').
|
| 36 |
+
dw_norm_cfg (dict): Norm config of depthwise ConvModule. If it is
|
| 37 |
+
'default', it will be the same as `norm_cfg`. Default: 'default'.
|
| 38 |
+
dw_act_cfg (dict): Activation config of depthwise ConvModule. If it is
|
| 39 |
+
'default', it will be the same as `act_cfg`. Default: 'default'.
|
| 40 |
+
pw_norm_cfg (dict): Norm config of pointwise ConvModule. If it is
|
| 41 |
+
'default', it will be the same as `norm_cfg`. Default: 'default'.
|
| 42 |
+
pw_act_cfg (dict): Activation config of pointwise ConvModule. If it is
|
| 43 |
+
'default', it will be the same as `act_cfg`. Default: 'default'.
|
| 44 |
+
kwargs (optional): Other shared arguments for depthwise and pointwise
|
| 45 |
+
ConvModule. See ConvModule for ref.
|
| 46 |
+
"""
|
| 47 |
+
|
| 48 |
+
def __init__(self,
|
| 49 |
+
in_channels,
|
| 50 |
+
out_channels,
|
| 51 |
+
kernel_size,
|
| 52 |
+
stride=1,
|
| 53 |
+
padding=0,
|
| 54 |
+
dilation=1,
|
| 55 |
+
norm_cfg=None,
|
| 56 |
+
act_cfg=dict(type='ReLU'),
|
| 57 |
+
dw_norm_cfg='default',
|
| 58 |
+
dw_act_cfg='default',
|
| 59 |
+
pw_norm_cfg='default',
|
| 60 |
+
pw_act_cfg='default',
|
| 61 |
+
**kwargs):
|
| 62 |
+
super(DepthwiseSeparableConvModule, self).__init__()
|
| 63 |
+
assert 'groups' not in kwargs, 'groups should not be specified'
|
| 64 |
+
|
| 65 |
+
# if norm/activation config of depthwise/pointwise ConvModule is not
|
| 66 |
+
# specified, use default config.
|
| 67 |
+
dw_norm_cfg = dw_norm_cfg if dw_norm_cfg != 'default' else norm_cfg
|
| 68 |
+
dw_act_cfg = dw_act_cfg if dw_act_cfg != 'default' else act_cfg
|
| 69 |
+
pw_norm_cfg = pw_norm_cfg if pw_norm_cfg != 'default' else norm_cfg
|
| 70 |
+
pw_act_cfg = pw_act_cfg if pw_act_cfg != 'default' else act_cfg
|
| 71 |
+
|
| 72 |
+
# depthwise convolution
|
| 73 |
+
self.depthwise_conv = ConvModule(
|
| 74 |
+
in_channels,
|
| 75 |
+
in_channels,
|
| 76 |
+
kernel_size,
|
| 77 |
+
stride=stride,
|
| 78 |
+
padding=padding,
|
| 79 |
+
dilation=dilation,
|
| 80 |
+
groups=in_channels,
|
| 81 |
+
norm_cfg=dw_norm_cfg,
|
| 82 |
+
act_cfg=dw_act_cfg,
|
| 83 |
+
**kwargs)
|
| 84 |
+
|
| 85 |
+
self.pointwise_conv = ConvModule(
|
| 86 |
+
in_channels,
|
| 87 |
+
out_channels,
|
| 88 |
+
1,
|
| 89 |
+
norm_cfg=pw_norm_cfg,
|
| 90 |
+
act_cfg=pw_act_cfg,
|
| 91 |
+
**kwargs)
|
| 92 |
+
|
| 93 |
+
def forward(self, x):
|
| 94 |
+
x = self.depthwise_conv(x)
|
| 95 |
+
x = self.pointwise_conv(x)
|
| 96 |
+
return x
|
RAVE-main/annotator/mmpkg/mmcv/cnn/bricks/drop.py
ADDED
|
@@ -0,0 +1,65 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
import torch
|
| 3 |
+
import torch.nn as nn
|
| 4 |
+
|
| 5 |
+
from annotator.mmpkg.mmcv import build_from_cfg
|
| 6 |
+
from .registry import DROPOUT_LAYERS
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
def drop_path(x, drop_prob=0., training=False):
|
| 10 |
+
"""Drop paths (Stochastic Depth) per sample (when applied in main path of
|
| 11 |
+
residual blocks).
|
| 12 |
+
|
| 13 |
+
We follow the implementation
|
| 14 |
+
https://github.com/rwightman/pytorch-image-models/blob/a2727c1bf78ba0d7b5727f5f95e37fb7f8866b1f/timm/models/layers/drop.py # noqa: E501
|
| 15 |
+
"""
|
| 16 |
+
if drop_prob == 0. or not training:
|
| 17 |
+
return x
|
| 18 |
+
keep_prob = 1 - drop_prob
|
| 19 |
+
# handle tensors with different dimensions, not just 4D tensors.
|
| 20 |
+
shape = (x.shape[0], ) + (1, ) * (x.ndim - 1)
|
| 21 |
+
random_tensor = keep_prob + torch.rand(
|
| 22 |
+
shape, dtype=x.dtype, device=x.device)
|
| 23 |
+
output = x.div(keep_prob) * random_tensor.floor()
|
| 24 |
+
return output
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
@DROPOUT_LAYERS.register_module()
|
| 28 |
+
class DropPath(nn.Module):
|
| 29 |
+
"""Drop paths (Stochastic Depth) per sample (when applied in main path of
|
| 30 |
+
residual blocks).
|
| 31 |
+
|
| 32 |
+
We follow the implementation
|
| 33 |
+
https://github.com/rwightman/pytorch-image-models/blob/a2727c1bf78ba0d7b5727f5f95e37fb7f8866b1f/timm/models/layers/drop.py # noqa: E501
|
| 34 |
+
|
| 35 |
+
Args:
|
| 36 |
+
drop_prob (float): Probability of the path to be zeroed. Default: 0.1
|
| 37 |
+
"""
|
| 38 |
+
|
| 39 |
+
def __init__(self, drop_prob=0.1):
|
| 40 |
+
super(DropPath, self).__init__()
|
| 41 |
+
self.drop_prob = drop_prob
|
| 42 |
+
|
| 43 |
+
def forward(self, x):
|
| 44 |
+
return drop_path(x, self.drop_prob, self.training)
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
@DROPOUT_LAYERS.register_module()
|
| 48 |
+
class Dropout(nn.Dropout):
|
| 49 |
+
"""A wrapper for ``torch.nn.Dropout``, We rename the ``p`` of
|
| 50 |
+
``torch.nn.Dropout`` to ``drop_prob`` so as to be consistent with
|
| 51 |
+
``DropPath``
|
| 52 |
+
|
| 53 |
+
Args:
|
| 54 |
+
drop_prob (float): Probability of the elements to be
|
| 55 |
+
zeroed. Default: 0.5.
|
| 56 |
+
inplace (bool): Do the operation inplace or not. Default: False.
|
| 57 |
+
"""
|
| 58 |
+
|
| 59 |
+
def __init__(self, drop_prob=0.5, inplace=False):
|
| 60 |
+
super().__init__(p=drop_prob, inplace=inplace)
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
def build_dropout(cfg, default_args=None):
|
| 64 |
+
"""Builder for drop out layers."""
|
| 65 |
+
return build_from_cfg(cfg, DROPOUT_LAYERS, default_args)
|
RAVE-main/annotator/mmpkg/mmcv/cnn/bricks/generalized_attention.py
ADDED
|
@@ -0,0 +1,412 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
import math
|
| 3 |
+
|
| 4 |
+
import numpy as np
|
| 5 |
+
import torch
|
| 6 |
+
import torch.nn as nn
|
| 7 |
+
import torch.nn.functional as F
|
| 8 |
+
|
| 9 |
+
from ..utils import kaiming_init
|
| 10 |
+
from .registry import PLUGIN_LAYERS
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
@PLUGIN_LAYERS.register_module()
|
| 14 |
+
class GeneralizedAttention(nn.Module):
|
| 15 |
+
"""GeneralizedAttention module.
|
| 16 |
+
|
| 17 |
+
See 'An Empirical Study of Spatial Attention Mechanisms in Deep Networks'
|
| 18 |
+
(https://arxiv.org/abs/1711.07971) for details.
|
| 19 |
+
|
| 20 |
+
Args:
|
| 21 |
+
in_channels (int): Channels of the input feature map.
|
| 22 |
+
spatial_range (int): The spatial range. -1 indicates no spatial range
|
| 23 |
+
constraint. Default: -1.
|
| 24 |
+
num_heads (int): The head number of empirical_attention module.
|
| 25 |
+
Default: 9.
|
| 26 |
+
position_embedding_dim (int): The position embedding dimension.
|
| 27 |
+
Default: -1.
|
| 28 |
+
position_magnitude (int): A multiplier acting on coord difference.
|
| 29 |
+
Default: 1.
|
| 30 |
+
kv_stride (int): The feature stride acting on key/value feature map.
|
| 31 |
+
Default: 2.
|
| 32 |
+
q_stride (int): The feature stride acting on query feature map.
|
| 33 |
+
Default: 1.
|
| 34 |
+
attention_type (str): A binary indicator string for indicating which
|
| 35 |
+
items in generalized empirical_attention module are used.
|
| 36 |
+
Default: '1111'.
|
| 37 |
+
|
| 38 |
+
- '1000' indicates 'query and key content' (appr - appr) item,
|
| 39 |
+
- '0100' indicates 'query content and relative position'
|
| 40 |
+
(appr - position) item,
|
| 41 |
+
- '0010' indicates 'key content only' (bias - appr) item,
|
| 42 |
+
- '0001' indicates 'relative position only' (bias - position) item.
|
| 43 |
+
"""
|
| 44 |
+
|
| 45 |
+
_abbr_ = 'gen_attention_block'
|
| 46 |
+
|
| 47 |
+
def __init__(self,
|
| 48 |
+
in_channels,
|
| 49 |
+
spatial_range=-1,
|
| 50 |
+
num_heads=9,
|
| 51 |
+
position_embedding_dim=-1,
|
| 52 |
+
position_magnitude=1,
|
| 53 |
+
kv_stride=2,
|
| 54 |
+
q_stride=1,
|
| 55 |
+
attention_type='1111'):
|
| 56 |
+
|
| 57 |
+
super(GeneralizedAttention, self).__init__()
|
| 58 |
+
|
| 59 |
+
# hard range means local range for non-local operation
|
| 60 |
+
self.position_embedding_dim = (
|
| 61 |
+
position_embedding_dim
|
| 62 |
+
if position_embedding_dim > 0 else in_channels)
|
| 63 |
+
|
| 64 |
+
self.position_magnitude = position_magnitude
|
| 65 |
+
self.num_heads = num_heads
|
| 66 |
+
self.in_channels = in_channels
|
| 67 |
+
self.spatial_range = spatial_range
|
| 68 |
+
self.kv_stride = kv_stride
|
| 69 |
+
self.q_stride = q_stride
|
| 70 |
+
self.attention_type = [bool(int(_)) for _ in attention_type]
|
| 71 |
+
self.qk_embed_dim = in_channels // num_heads
|
| 72 |
+
out_c = self.qk_embed_dim * num_heads
|
| 73 |
+
|
| 74 |
+
if self.attention_type[0] or self.attention_type[1]:
|
| 75 |
+
self.query_conv = nn.Conv2d(
|
| 76 |
+
in_channels=in_channels,
|
| 77 |
+
out_channels=out_c,
|
| 78 |
+
kernel_size=1,
|
| 79 |
+
bias=False)
|
| 80 |
+
self.query_conv.kaiming_init = True
|
| 81 |
+
|
| 82 |
+
if self.attention_type[0] or self.attention_type[2]:
|
| 83 |
+
self.key_conv = nn.Conv2d(
|
| 84 |
+
in_channels=in_channels,
|
| 85 |
+
out_channels=out_c,
|
| 86 |
+
kernel_size=1,
|
| 87 |
+
bias=False)
|
| 88 |
+
self.key_conv.kaiming_init = True
|
| 89 |
+
|
| 90 |
+
self.v_dim = in_channels // num_heads
|
| 91 |
+
self.value_conv = nn.Conv2d(
|
| 92 |
+
in_channels=in_channels,
|
| 93 |
+
out_channels=self.v_dim * num_heads,
|
| 94 |
+
kernel_size=1,
|
| 95 |
+
bias=False)
|
| 96 |
+
self.value_conv.kaiming_init = True
|
| 97 |
+
|
| 98 |
+
if self.attention_type[1] or self.attention_type[3]:
|
| 99 |
+
self.appr_geom_fc_x = nn.Linear(
|
| 100 |
+
self.position_embedding_dim // 2, out_c, bias=False)
|
| 101 |
+
self.appr_geom_fc_x.kaiming_init = True
|
| 102 |
+
|
| 103 |
+
self.appr_geom_fc_y = nn.Linear(
|
| 104 |
+
self.position_embedding_dim // 2, out_c, bias=False)
|
| 105 |
+
self.appr_geom_fc_y.kaiming_init = True
|
| 106 |
+
|
| 107 |
+
if self.attention_type[2]:
|
| 108 |
+
stdv = 1.0 / math.sqrt(self.qk_embed_dim * 2)
|
| 109 |
+
appr_bias_value = -2 * stdv * torch.rand(out_c) + stdv
|
| 110 |
+
self.appr_bias = nn.Parameter(appr_bias_value)
|
| 111 |
+
|
| 112 |
+
if self.attention_type[3]:
|
| 113 |
+
stdv = 1.0 / math.sqrt(self.qk_embed_dim * 2)
|
| 114 |
+
geom_bias_value = -2 * stdv * torch.rand(out_c) + stdv
|
| 115 |
+
self.geom_bias = nn.Parameter(geom_bias_value)
|
| 116 |
+
|
| 117 |
+
self.proj_conv = nn.Conv2d(
|
| 118 |
+
in_channels=self.v_dim * num_heads,
|
| 119 |
+
out_channels=in_channels,
|
| 120 |
+
kernel_size=1,
|
| 121 |
+
bias=True)
|
| 122 |
+
self.proj_conv.kaiming_init = True
|
| 123 |
+
self.gamma = nn.Parameter(torch.zeros(1))
|
| 124 |
+
|
| 125 |
+
if self.spatial_range >= 0:
|
| 126 |
+
# only works when non local is after 3*3 conv
|
| 127 |
+
if in_channels == 256:
|
| 128 |
+
max_len = 84
|
| 129 |
+
elif in_channels == 512:
|
| 130 |
+
max_len = 42
|
| 131 |
+
|
| 132 |
+
max_len_kv = int((max_len - 1.0) / self.kv_stride + 1)
|
| 133 |
+
local_constraint_map = np.ones(
|
| 134 |
+
(max_len, max_len, max_len_kv, max_len_kv), dtype=np.int)
|
| 135 |
+
for iy in range(max_len):
|
| 136 |
+
for ix in range(max_len):
|
| 137 |
+
local_constraint_map[
|
| 138 |
+
iy, ix,
|
| 139 |
+
max((iy - self.spatial_range) //
|
| 140 |
+
self.kv_stride, 0):min((iy + self.spatial_range +
|
| 141 |
+
1) // self.kv_stride +
|
| 142 |
+
1, max_len),
|
| 143 |
+
max((ix - self.spatial_range) //
|
| 144 |
+
self.kv_stride, 0):min((ix + self.spatial_range +
|
| 145 |
+
1) // self.kv_stride +
|
| 146 |
+
1, max_len)] = 0
|
| 147 |
+
|
| 148 |
+
self.local_constraint_map = nn.Parameter(
|
| 149 |
+
torch.from_numpy(local_constraint_map).byte(),
|
| 150 |
+
requires_grad=False)
|
| 151 |
+
|
| 152 |
+
if self.q_stride > 1:
|
| 153 |
+
self.q_downsample = nn.AvgPool2d(
|
| 154 |
+
kernel_size=1, stride=self.q_stride)
|
| 155 |
+
else:
|
| 156 |
+
self.q_downsample = None
|
| 157 |
+
|
| 158 |
+
if self.kv_stride > 1:
|
| 159 |
+
self.kv_downsample = nn.AvgPool2d(
|
| 160 |
+
kernel_size=1, stride=self.kv_stride)
|
| 161 |
+
else:
|
| 162 |
+
self.kv_downsample = None
|
| 163 |
+
|
| 164 |
+
self.init_weights()
|
| 165 |
+
|
| 166 |
+
def get_position_embedding(self,
|
| 167 |
+
h,
|
| 168 |
+
w,
|
| 169 |
+
h_kv,
|
| 170 |
+
w_kv,
|
| 171 |
+
q_stride,
|
| 172 |
+
kv_stride,
|
| 173 |
+
device,
|
| 174 |
+
dtype,
|
| 175 |
+
feat_dim,
|
| 176 |
+
wave_length=1000):
|
| 177 |
+
# the default type of Tensor is float32, leading to type mismatch
|
| 178 |
+
# in fp16 mode. Cast it to support fp16 mode.
|
| 179 |
+
h_idxs = torch.linspace(0, h - 1, h).to(device=device, dtype=dtype)
|
| 180 |
+
h_idxs = h_idxs.view((h, 1)) * q_stride
|
| 181 |
+
|
| 182 |
+
w_idxs = torch.linspace(0, w - 1, w).to(device=device, dtype=dtype)
|
| 183 |
+
w_idxs = w_idxs.view((w, 1)) * q_stride
|
| 184 |
+
|
| 185 |
+
h_kv_idxs = torch.linspace(0, h_kv - 1, h_kv).to(
|
| 186 |
+
device=device, dtype=dtype)
|
| 187 |
+
h_kv_idxs = h_kv_idxs.view((h_kv, 1)) * kv_stride
|
| 188 |
+
|
| 189 |
+
w_kv_idxs = torch.linspace(0, w_kv - 1, w_kv).to(
|
| 190 |
+
device=device, dtype=dtype)
|
| 191 |
+
w_kv_idxs = w_kv_idxs.view((w_kv, 1)) * kv_stride
|
| 192 |
+
|
| 193 |
+
# (h, h_kv, 1)
|
| 194 |
+
h_diff = h_idxs.unsqueeze(1) - h_kv_idxs.unsqueeze(0)
|
| 195 |
+
h_diff *= self.position_magnitude
|
| 196 |
+
|
| 197 |
+
# (w, w_kv, 1)
|
| 198 |
+
w_diff = w_idxs.unsqueeze(1) - w_kv_idxs.unsqueeze(0)
|
| 199 |
+
w_diff *= self.position_magnitude
|
| 200 |
+
|
| 201 |
+
feat_range = torch.arange(0, feat_dim / 4).to(
|
| 202 |
+
device=device, dtype=dtype)
|
| 203 |
+
|
| 204 |
+
dim_mat = torch.Tensor([wave_length]).to(device=device, dtype=dtype)
|
| 205 |
+
dim_mat = dim_mat**((4. / feat_dim) * feat_range)
|
| 206 |
+
dim_mat = dim_mat.view((1, 1, -1))
|
| 207 |
+
|
| 208 |
+
embedding_x = torch.cat(
|
| 209 |
+
((w_diff / dim_mat).sin(), (w_diff / dim_mat).cos()), dim=2)
|
| 210 |
+
|
| 211 |
+
embedding_y = torch.cat(
|
| 212 |
+
((h_diff / dim_mat).sin(), (h_diff / dim_mat).cos()), dim=2)
|
| 213 |
+
|
| 214 |
+
return embedding_x, embedding_y
|
| 215 |
+
|
| 216 |
+
def forward(self, x_input):
|
| 217 |
+
num_heads = self.num_heads
|
| 218 |
+
|
| 219 |
+
# use empirical_attention
|
| 220 |
+
if self.q_downsample is not None:
|
| 221 |
+
x_q = self.q_downsample(x_input)
|
| 222 |
+
else:
|
| 223 |
+
x_q = x_input
|
| 224 |
+
n, _, h, w = x_q.shape
|
| 225 |
+
|
| 226 |
+
if self.kv_downsample is not None:
|
| 227 |
+
x_kv = self.kv_downsample(x_input)
|
| 228 |
+
else:
|
| 229 |
+
x_kv = x_input
|
| 230 |
+
_, _, h_kv, w_kv = x_kv.shape
|
| 231 |
+
|
| 232 |
+
if self.attention_type[0] or self.attention_type[1]:
|
| 233 |
+
proj_query = self.query_conv(x_q).view(
|
| 234 |
+
(n, num_heads, self.qk_embed_dim, h * w))
|
| 235 |
+
proj_query = proj_query.permute(0, 1, 3, 2)
|
| 236 |
+
|
| 237 |
+
if self.attention_type[0] or self.attention_type[2]:
|
| 238 |
+
proj_key = self.key_conv(x_kv).view(
|
| 239 |
+
(n, num_heads, self.qk_embed_dim, h_kv * w_kv))
|
| 240 |
+
|
| 241 |
+
if self.attention_type[1] or self.attention_type[3]:
|
| 242 |
+
position_embed_x, position_embed_y = self.get_position_embedding(
|
| 243 |
+
h, w, h_kv, w_kv, self.q_stride, self.kv_stride,
|
| 244 |
+
x_input.device, x_input.dtype, self.position_embedding_dim)
|
| 245 |
+
# (n, num_heads, w, w_kv, dim)
|
| 246 |
+
position_feat_x = self.appr_geom_fc_x(position_embed_x).\
|
| 247 |
+
view(1, w, w_kv, num_heads, self.qk_embed_dim).\
|
| 248 |
+
permute(0, 3, 1, 2, 4).\
|
| 249 |
+
repeat(n, 1, 1, 1, 1)
|
| 250 |
+
|
| 251 |
+
# (n, num_heads, h, h_kv, dim)
|
| 252 |
+
position_feat_y = self.appr_geom_fc_y(position_embed_y).\
|
| 253 |
+
view(1, h, h_kv, num_heads, self.qk_embed_dim).\
|
| 254 |
+
permute(0, 3, 1, 2, 4).\
|
| 255 |
+
repeat(n, 1, 1, 1, 1)
|
| 256 |
+
|
| 257 |
+
position_feat_x /= math.sqrt(2)
|
| 258 |
+
position_feat_y /= math.sqrt(2)
|
| 259 |
+
|
| 260 |
+
# accelerate for saliency only
|
| 261 |
+
if (np.sum(self.attention_type) == 1) and self.attention_type[2]:
|
| 262 |
+
appr_bias = self.appr_bias.\
|
| 263 |
+
view(1, num_heads, 1, self.qk_embed_dim).\
|
| 264 |
+
repeat(n, 1, 1, 1)
|
| 265 |
+
|
| 266 |
+
energy = torch.matmul(appr_bias, proj_key).\
|
| 267 |
+
view(n, num_heads, 1, h_kv * w_kv)
|
| 268 |
+
|
| 269 |
+
h = 1
|
| 270 |
+
w = 1
|
| 271 |
+
else:
|
| 272 |
+
# (n, num_heads, h*w, h_kv*w_kv), query before key, 540mb for
|
| 273 |
+
if not self.attention_type[0]:
|
| 274 |
+
energy = torch.zeros(
|
| 275 |
+
n,
|
| 276 |
+
num_heads,
|
| 277 |
+
h,
|
| 278 |
+
w,
|
| 279 |
+
h_kv,
|
| 280 |
+
w_kv,
|
| 281 |
+
dtype=x_input.dtype,
|
| 282 |
+
device=x_input.device)
|
| 283 |
+
|
| 284 |
+
# attention_type[0]: appr - appr
|
| 285 |
+
# attention_type[1]: appr - position
|
| 286 |
+
# attention_type[2]: bias - appr
|
| 287 |
+
# attention_type[3]: bias - position
|
| 288 |
+
if self.attention_type[0] or self.attention_type[2]:
|
| 289 |
+
if self.attention_type[0] and self.attention_type[2]:
|
| 290 |
+
appr_bias = self.appr_bias.\
|
| 291 |
+
view(1, num_heads, 1, self.qk_embed_dim)
|
| 292 |
+
energy = torch.matmul(proj_query + appr_bias, proj_key).\
|
| 293 |
+
view(n, num_heads, h, w, h_kv, w_kv)
|
| 294 |
+
|
| 295 |
+
elif self.attention_type[0]:
|
| 296 |
+
energy = torch.matmul(proj_query, proj_key).\
|
| 297 |
+
view(n, num_heads, h, w, h_kv, w_kv)
|
| 298 |
+
|
| 299 |
+
elif self.attention_type[2]:
|
| 300 |
+
appr_bias = self.appr_bias.\
|
| 301 |
+
view(1, num_heads, 1, self.qk_embed_dim).\
|
| 302 |
+
repeat(n, 1, 1, 1)
|
| 303 |
+
|
| 304 |
+
energy += torch.matmul(appr_bias, proj_key).\
|
| 305 |
+
view(n, num_heads, 1, 1, h_kv, w_kv)
|
| 306 |
+
|
| 307 |
+
if self.attention_type[1] or self.attention_type[3]:
|
| 308 |
+
if self.attention_type[1] and self.attention_type[3]:
|
| 309 |
+
geom_bias = self.geom_bias.\
|
| 310 |
+
view(1, num_heads, 1, self.qk_embed_dim)
|
| 311 |
+
|
| 312 |
+
proj_query_reshape = (proj_query + geom_bias).\
|
| 313 |
+
view(n, num_heads, h, w, self.qk_embed_dim)
|
| 314 |
+
|
| 315 |
+
energy_x = torch.matmul(
|
| 316 |
+
proj_query_reshape.permute(0, 1, 3, 2, 4),
|
| 317 |
+
position_feat_x.permute(0, 1, 2, 4, 3))
|
| 318 |
+
energy_x = energy_x.\
|
| 319 |
+
permute(0, 1, 3, 2, 4).unsqueeze(4)
|
| 320 |
+
|
| 321 |
+
energy_y = torch.matmul(
|
| 322 |
+
proj_query_reshape,
|
| 323 |
+
position_feat_y.permute(0, 1, 2, 4, 3))
|
| 324 |
+
energy_y = energy_y.unsqueeze(5)
|
| 325 |
+
|
| 326 |
+
energy += energy_x + energy_y
|
| 327 |
+
|
| 328 |
+
elif self.attention_type[1]:
|
| 329 |
+
proj_query_reshape = proj_query.\
|
| 330 |
+
view(n, num_heads, h, w, self.qk_embed_dim)
|
| 331 |
+
proj_query_reshape = proj_query_reshape.\
|
| 332 |
+
permute(0, 1, 3, 2, 4)
|
| 333 |
+
position_feat_x_reshape = position_feat_x.\
|
| 334 |
+
permute(0, 1, 2, 4, 3)
|
| 335 |
+
position_feat_y_reshape = position_feat_y.\
|
| 336 |
+
permute(0, 1, 2, 4, 3)
|
| 337 |
+
|
| 338 |
+
energy_x = torch.matmul(proj_query_reshape,
|
| 339 |
+
position_feat_x_reshape)
|
| 340 |
+
energy_x = energy_x.permute(0, 1, 3, 2, 4).unsqueeze(4)
|
| 341 |
+
|
| 342 |
+
energy_y = torch.matmul(proj_query_reshape,
|
| 343 |
+
position_feat_y_reshape)
|
| 344 |
+
energy_y = energy_y.unsqueeze(5)
|
| 345 |
+
|
| 346 |
+
energy += energy_x + energy_y
|
| 347 |
+
|
| 348 |
+
elif self.attention_type[3]:
|
| 349 |
+
geom_bias = self.geom_bias.\
|
| 350 |
+
view(1, num_heads, self.qk_embed_dim, 1).\
|
| 351 |
+
repeat(n, 1, 1, 1)
|
| 352 |
+
|
| 353 |
+
position_feat_x_reshape = position_feat_x.\
|
| 354 |
+
view(n, num_heads, w*w_kv, self.qk_embed_dim)
|
| 355 |
+
|
| 356 |
+
position_feat_y_reshape = position_feat_y.\
|
| 357 |
+
view(n, num_heads, h * h_kv, self.qk_embed_dim)
|
| 358 |
+
|
| 359 |
+
energy_x = torch.matmul(position_feat_x_reshape, geom_bias)
|
| 360 |
+
energy_x = energy_x.view(n, num_heads, 1, w, 1, w_kv)
|
| 361 |
+
|
| 362 |
+
energy_y = torch.matmul(position_feat_y_reshape, geom_bias)
|
| 363 |
+
energy_y = energy_y.view(n, num_heads, h, 1, h_kv, 1)
|
| 364 |
+
|
| 365 |
+
energy += energy_x + energy_y
|
| 366 |
+
|
| 367 |
+
energy = energy.view(n, num_heads, h * w, h_kv * w_kv)
|
| 368 |
+
|
| 369 |
+
if self.spatial_range >= 0:
|
| 370 |
+
cur_local_constraint_map = \
|
| 371 |
+
self.local_constraint_map[:h, :w, :h_kv, :w_kv].\
|
| 372 |
+
contiguous().\
|
| 373 |
+
view(1, 1, h*w, h_kv*w_kv)
|
| 374 |
+
|
| 375 |
+
energy = energy.masked_fill_(cur_local_constraint_map,
|
| 376 |
+
float('-inf'))
|
| 377 |
+
|
| 378 |
+
attention = F.softmax(energy, 3)
|
| 379 |
+
|
| 380 |
+
proj_value = self.value_conv(x_kv)
|
| 381 |
+
proj_value_reshape = proj_value.\
|
| 382 |
+
view((n, num_heads, self.v_dim, h_kv * w_kv)).\
|
| 383 |
+
permute(0, 1, 3, 2)
|
| 384 |
+
|
| 385 |
+
out = torch.matmul(attention, proj_value_reshape).\
|
| 386 |
+
permute(0, 1, 3, 2).\
|
| 387 |
+
contiguous().\
|
| 388 |
+
view(n, self.v_dim * self.num_heads, h, w)
|
| 389 |
+
|
| 390 |
+
out = self.proj_conv(out)
|
| 391 |
+
|
| 392 |
+
# output is downsampled, upsample back to input size
|
| 393 |
+
if self.q_downsample is not None:
|
| 394 |
+
out = F.interpolate(
|
| 395 |
+
out,
|
| 396 |
+
size=x_input.shape[2:],
|
| 397 |
+
mode='bilinear',
|
| 398 |
+
align_corners=False)
|
| 399 |
+
|
| 400 |
+
out = self.gamma * out + x_input
|
| 401 |
+
return out
|
| 402 |
+
|
| 403 |
+
def init_weights(self):
|
| 404 |
+
for m in self.modules():
|
| 405 |
+
if hasattr(m, 'kaiming_init') and m.kaiming_init:
|
| 406 |
+
kaiming_init(
|
| 407 |
+
m,
|
| 408 |
+
mode='fan_in',
|
| 409 |
+
nonlinearity='leaky_relu',
|
| 410 |
+
bias=0,
|
| 411 |
+
distribution='uniform',
|
| 412 |
+
a=1)
|
RAVE-main/annotator/mmpkg/mmcv/cnn/bricks/hsigmoid.py
ADDED
|
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
import torch.nn as nn
|
| 3 |
+
|
| 4 |
+
from .registry import ACTIVATION_LAYERS
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
@ACTIVATION_LAYERS.register_module()
|
| 8 |
+
class HSigmoid(nn.Module):
|
| 9 |
+
"""Hard Sigmoid Module. Apply the hard sigmoid function:
|
| 10 |
+
Hsigmoid(x) = min(max((x + bias) / divisor, min_value), max_value)
|
| 11 |
+
Default: Hsigmoid(x) = min(max((x + 1) / 2, 0), 1)
|
| 12 |
+
|
| 13 |
+
Args:
|
| 14 |
+
bias (float): Bias of the input feature map. Default: 1.0.
|
| 15 |
+
divisor (float): Divisor of the input feature map. Default: 2.0.
|
| 16 |
+
min_value (float): Lower bound value. Default: 0.0.
|
| 17 |
+
max_value (float): Upper bound value. Default: 1.0.
|
| 18 |
+
|
| 19 |
+
Returns:
|
| 20 |
+
Tensor: The output tensor.
|
| 21 |
+
"""
|
| 22 |
+
|
| 23 |
+
def __init__(self, bias=1.0, divisor=2.0, min_value=0.0, max_value=1.0):
|
| 24 |
+
super(HSigmoid, self).__init__()
|
| 25 |
+
self.bias = bias
|
| 26 |
+
self.divisor = divisor
|
| 27 |
+
assert self.divisor != 0
|
| 28 |
+
self.min_value = min_value
|
| 29 |
+
self.max_value = max_value
|
| 30 |
+
|
| 31 |
+
def forward(self, x):
|
| 32 |
+
x = (x + self.bias) / self.divisor
|
| 33 |
+
|
| 34 |
+
return x.clamp_(self.min_value, self.max_value)
|
RAVE-main/annotator/mmpkg/mmcv/cnn/bricks/hswish.py
ADDED
|
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
import torch.nn as nn
|
| 3 |
+
|
| 4 |
+
from .registry import ACTIVATION_LAYERS
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
@ACTIVATION_LAYERS.register_module()
|
| 8 |
+
class HSwish(nn.Module):
|
| 9 |
+
"""Hard Swish Module.
|
| 10 |
+
|
| 11 |
+
This module applies the hard swish function:
|
| 12 |
+
|
| 13 |
+
.. math::
|
| 14 |
+
Hswish(x) = x * ReLU6(x + 3) / 6
|
| 15 |
+
|
| 16 |
+
Args:
|
| 17 |
+
inplace (bool): can optionally do the operation in-place.
|
| 18 |
+
Default: False.
|
| 19 |
+
|
| 20 |
+
Returns:
|
| 21 |
+
Tensor: The output tensor.
|
| 22 |
+
"""
|
| 23 |
+
|
| 24 |
+
def __init__(self, inplace=False):
|
| 25 |
+
super(HSwish, self).__init__()
|
| 26 |
+
self.act = nn.ReLU6(inplace)
|
| 27 |
+
|
| 28 |
+
def forward(self, x):
|
| 29 |
+
return x * self.act(x + 3) / 6
|
RAVE-main/annotator/mmpkg/mmcv/cnn/bricks/non_local.py
ADDED
|
@@ -0,0 +1,306 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
from abc import ABCMeta
|
| 3 |
+
|
| 4 |
+
import torch
|
| 5 |
+
import torch.nn as nn
|
| 6 |
+
|
| 7 |
+
from ..utils import constant_init, normal_init
|
| 8 |
+
from .conv_module import ConvModule
|
| 9 |
+
from .registry import PLUGIN_LAYERS
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
class _NonLocalNd(nn.Module, metaclass=ABCMeta):
|
| 13 |
+
"""Basic Non-local module.
|
| 14 |
+
|
| 15 |
+
This module is proposed in
|
| 16 |
+
"Non-local Neural Networks"
|
| 17 |
+
Paper reference: https://arxiv.org/abs/1711.07971
|
| 18 |
+
Code reference: https://github.com/AlexHex7/Non-local_pytorch
|
| 19 |
+
|
| 20 |
+
Args:
|
| 21 |
+
in_channels (int): Channels of the input feature map.
|
| 22 |
+
reduction (int): Channel reduction ratio. Default: 2.
|
| 23 |
+
use_scale (bool): Whether to scale pairwise_weight by
|
| 24 |
+
`1/sqrt(inter_channels)` when the mode is `embedded_gaussian`.
|
| 25 |
+
Default: True.
|
| 26 |
+
conv_cfg (None | dict): The config dict for convolution layers.
|
| 27 |
+
If not specified, it will use `nn.Conv2d` for convolution layers.
|
| 28 |
+
Default: None.
|
| 29 |
+
norm_cfg (None | dict): The config dict for normalization layers.
|
| 30 |
+
Default: None. (This parameter is only applicable to conv_out.)
|
| 31 |
+
mode (str): Options are `gaussian`, `concatenation`,
|
| 32 |
+
`embedded_gaussian` and `dot_product`. Default: embedded_gaussian.
|
| 33 |
+
"""
|
| 34 |
+
|
| 35 |
+
def __init__(self,
|
| 36 |
+
in_channels,
|
| 37 |
+
reduction=2,
|
| 38 |
+
use_scale=True,
|
| 39 |
+
conv_cfg=None,
|
| 40 |
+
norm_cfg=None,
|
| 41 |
+
mode='embedded_gaussian',
|
| 42 |
+
**kwargs):
|
| 43 |
+
super(_NonLocalNd, self).__init__()
|
| 44 |
+
self.in_channels = in_channels
|
| 45 |
+
self.reduction = reduction
|
| 46 |
+
self.use_scale = use_scale
|
| 47 |
+
self.inter_channels = max(in_channels // reduction, 1)
|
| 48 |
+
self.mode = mode
|
| 49 |
+
|
| 50 |
+
if mode not in [
|
| 51 |
+
'gaussian', 'embedded_gaussian', 'dot_product', 'concatenation'
|
| 52 |
+
]:
|
| 53 |
+
raise ValueError("Mode should be in 'gaussian', 'concatenation', "
|
| 54 |
+
f"'embedded_gaussian' or 'dot_product', but got "
|
| 55 |
+
f'{mode} instead.')
|
| 56 |
+
|
| 57 |
+
# g, theta, phi are defaulted as `nn.ConvNd`.
|
| 58 |
+
# Here we use ConvModule for potential usage.
|
| 59 |
+
self.g = ConvModule(
|
| 60 |
+
self.in_channels,
|
| 61 |
+
self.inter_channels,
|
| 62 |
+
kernel_size=1,
|
| 63 |
+
conv_cfg=conv_cfg,
|
| 64 |
+
act_cfg=None)
|
| 65 |
+
self.conv_out = ConvModule(
|
| 66 |
+
self.inter_channels,
|
| 67 |
+
self.in_channels,
|
| 68 |
+
kernel_size=1,
|
| 69 |
+
conv_cfg=conv_cfg,
|
| 70 |
+
norm_cfg=norm_cfg,
|
| 71 |
+
act_cfg=None)
|
| 72 |
+
|
| 73 |
+
if self.mode != 'gaussian':
|
| 74 |
+
self.theta = ConvModule(
|
| 75 |
+
self.in_channels,
|
| 76 |
+
self.inter_channels,
|
| 77 |
+
kernel_size=1,
|
| 78 |
+
conv_cfg=conv_cfg,
|
| 79 |
+
act_cfg=None)
|
| 80 |
+
self.phi = ConvModule(
|
| 81 |
+
self.in_channels,
|
| 82 |
+
self.inter_channels,
|
| 83 |
+
kernel_size=1,
|
| 84 |
+
conv_cfg=conv_cfg,
|
| 85 |
+
act_cfg=None)
|
| 86 |
+
|
| 87 |
+
if self.mode == 'concatenation':
|
| 88 |
+
self.concat_project = ConvModule(
|
| 89 |
+
self.inter_channels * 2,
|
| 90 |
+
1,
|
| 91 |
+
kernel_size=1,
|
| 92 |
+
stride=1,
|
| 93 |
+
padding=0,
|
| 94 |
+
bias=False,
|
| 95 |
+
act_cfg=dict(type='ReLU'))
|
| 96 |
+
|
| 97 |
+
self.init_weights(**kwargs)
|
| 98 |
+
|
| 99 |
+
def init_weights(self, std=0.01, zeros_init=True):
|
| 100 |
+
if self.mode != 'gaussian':
|
| 101 |
+
for m in [self.g, self.theta, self.phi]:
|
| 102 |
+
normal_init(m.conv, std=std)
|
| 103 |
+
else:
|
| 104 |
+
normal_init(self.g.conv, std=std)
|
| 105 |
+
if zeros_init:
|
| 106 |
+
if self.conv_out.norm_cfg is None:
|
| 107 |
+
constant_init(self.conv_out.conv, 0)
|
| 108 |
+
else:
|
| 109 |
+
constant_init(self.conv_out.norm, 0)
|
| 110 |
+
else:
|
| 111 |
+
if self.conv_out.norm_cfg is None:
|
| 112 |
+
normal_init(self.conv_out.conv, std=std)
|
| 113 |
+
else:
|
| 114 |
+
normal_init(self.conv_out.norm, std=std)
|
| 115 |
+
|
| 116 |
+
def gaussian(self, theta_x, phi_x):
|
| 117 |
+
# NonLocal1d pairwise_weight: [N, H, H]
|
| 118 |
+
# NonLocal2d pairwise_weight: [N, HxW, HxW]
|
| 119 |
+
# NonLocal3d pairwise_weight: [N, TxHxW, TxHxW]
|
| 120 |
+
pairwise_weight = torch.matmul(theta_x, phi_x)
|
| 121 |
+
pairwise_weight = pairwise_weight.softmax(dim=-1)
|
| 122 |
+
return pairwise_weight
|
| 123 |
+
|
| 124 |
+
def embedded_gaussian(self, theta_x, phi_x):
|
| 125 |
+
# NonLocal1d pairwise_weight: [N, H, H]
|
| 126 |
+
# NonLocal2d pairwise_weight: [N, HxW, HxW]
|
| 127 |
+
# NonLocal3d pairwise_weight: [N, TxHxW, TxHxW]
|
| 128 |
+
pairwise_weight = torch.matmul(theta_x, phi_x)
|
| 129 |
+
if self.use_scale:
|
| 130 |
+
# theta_x.shape[-1] is `self.inter_channels`
|
| 131 |
+
pairwise_weight /= theta_x.shape[-1]**0.5
|
| 132 |
+
pairwise_weight = pairwise_weight.softmax(dim=-1)
|
| 133 |
+
return pairwise_weight
|
| 134 |
+
|
| 135 |
+
def dot_product(self, theta_x, phi_x):
|
| 136 |
+
# NonLocal1d pairwise_weight: [N, H, H]
|
| 137 |
+
# NonLocal2d pairwise_weight: [N, HxW, HxW]
|
| 138 |
+
# NonLocal3d pairwise_weight: [N, TxHxW, TxHxW]
|
| 139 |
+
pairwise_weight = torch.matmul(theta_x, phi_x)
|
| 140 |
+
pairwise_weight /= pairwise_weight.shape[-1]
|
| 141 |
+
return pairwise_weight
|
| 142 |
+
|
| 143 |
+
def concatenation(self, theta_x, phi_x):
|
| 144 |
+
# NonLocal1d pairwise_weight: [N, H, H]
|
| 145 |
+
# NonLocal2d pairwise_weight: [N, HxW, HxW]
|
| 146 |
+
# NonLocal3d pairwise_weight: [N, TxHxW, TxHxW]
|
| 147 |
+
h = theta_x.size(2)
|
| 148 |
+
w = phi_x.size(3)
|
| 149 |
+
theta_x = theta_x.repeat(1, 1, 1, w)
|
| 150 |
+
phi_x = phi_x.repeat(1, 1, h, 1)
|
| 151 |
+
|
| 152 |
+
concat_feature = torch.cat([theta_x, phi_x], dim=1)
|
| 153 |
+
pairwise_weight = self.concat_project(concat_feature)
|
| 154 |
+
n, _, h, w = pairwise_weight.size()
|
| 155 |
+
pairwise_weight = pairwise_weight.view(n, h, w)
|
| 156 |
+
pairwise_weight /= pairwise_weight.shape[-1]
|
| 157 |
+
|
| 158 |
+
return pairwise_weight
|
| 159 |
+
|
| 160 |
+
def forward(self, x):
|
| 161 |
+
# Assume `reduction = 1`, then `inter_channels = C`
|
| 162 |
+
# or `inter_channels = C` when `mode="gaussian"`
|
| 163 |
+
|
| 164 |
+
# NonLocal1d x: [N, C, H]
|
| 165 |
+
# NonLocal2d x: [N, C, H, W]
|
| 166 |
+
# NonLocal3d x: [N, C, T, H, W]
|
| 167 |
+
n = x.size(0)
|
| 168 |
+
|
| 169 |
+
# NonLocal1d g_x: [N, H, C]
|
| 170 |
+
# NonLocal2d g_x: [N, HxW, C]
|
| 171 |
+
# NonLocal3d g_x: [N, TxHxW, C]
|
| 172 |
+
g_x = self.g(x).view(n, self.inter_channels, -1)
|
| 173 |
+
g_x = g_x.permute(0, 2, 1)
|
| 174 |
+
|
| 175 |
+
# NonLocal1d theta_x: [N, H, C], phi_x: [N, C, H]
|
| 176 |
+
# NonLocal2d theta_x: [N, HxW, C], phi_x: [N, C, HxW]
|
| 177 |
+
# NonLocal3d theta_x: [N, TxHxW, C], phi_x: [N, C, TxHxW]
|
| 178 |
+
if self.mode == 'gaussian':
|
| 179 |
+
theta_x = x.view(n, self.in_channels, -1)
|
| 180 |
+
theta_x = theta_x.permute(0, 2, 1)
|
| 181 |
+
if self.sub_sample:
|
| 182 |
+
phi_x = self.phi(x).view(n, self.in_channels, -1)
|
| 183 |
+
else:
|
| 184 |
+
phi_x = x.view(n, self.in_channels, -1)
|
| 185 |
+
elif self.mode == 'concatenation':
|
| 186 |
+
theta_x = self.theta(x).view(n, self.inter_channels, -1, 1)
|
| 187 |
+
phi_x = self.phi(x).view(n, self.inter_channels, 1, -1)
|
| 188 |
+
else:
|
| 189 |
+
theta_x = self.theta(x).view(n, self.inter_channels, -1)
|
| 190 |
+
theta_x = theta_x.permute(0, 2, 1)
|
| 191 |
+
phi_x = self.phi(x).view(n, self.inter_channels, -1)
|
| 192 |
+
|
| 193 |
+
pairwise_func = getattr(self, self.mode)
|
| 194 |
+
# NonLocal1d pairwise_weight: [N, H, H]
|
| 195 |
+
# NonLocal2d pairwise_weight: [N, HxW, HxW]
|
| 196 |
+
# NonLocal3d pairwise_weight: [N, TxHxW, TxHxW]
|
| 197 |
+
pairwise_weight = pairwise_func(theta_x, phi_x)
|
| 198 |
+
|
| 199 |
+
# NonLocal1d y: [N, H, C]
|
| 200 |
+
# NonLocal2d y: [N, HxW, C]
|
| 201 |
+
# NonLocal3d y: [N, TxHxW, C]
|
| 202 |
+
y = torch.matmul(pairwise_weight, g_x)
|
| 203 |
+
# NonLocal1d y: [N, C, H]
|
| 204 |
+
# NonLocal2d y: [N, C, H, W]
|
| 205 |
+
# NonLocal3d y: [N, C, T, H, W]
|
| 206 |
+
y = y.permute(0, 2, 1).contiguous().reshape(n, self.inter_channels,
|
| 207 |
+
*x.size()[2:])
|
| 208 |
+
|
| 209 |
+
output = x + self.conv_out(y)
|
| 210 |
+
|
| 211 |
+
return output
|
| 212 |
+
|
| 213 |
+
|
| 214 |
+
class NonLocal1d(_NonLocalNd):
|
| 215 |
+
"""1D Non-local module.
|
| 216 |
+
|
| 217 |
+
Args:
|
| 218 |
+
in_channels (int): Same as `NonLocalND`.
|
| 219 |
+
sub_sample (bool): Whether to apply max pooling after pairwise
|
| 220 |
+
function (Note that the `sub_sample` is applied on spatial only).
|
| 221 |
+
Default: False.
|
| 222 |
+
conv_cfg (None | dict): Same as `NonLocalND`.
|
| 223 |
+
Default: dict(type='Conv1d').
|
| 224 |
+
"""
|
| 225 |
+
|
| 226 |
+
def __init__(self,
|
| 227 |
+
in_channels,
|
| 228 |
+
sub_sample=False,
|
| 229 |
+
conv_cfg=dict(type='Conv1d'),
|
| 230 |
+
**kwargs):
|
| 231 |
+
super(NonLocal1d, self).__init__(
|
| 232 |
+
in_channels, conv_cfg=conv_cfg, **kwargs)
|
| 233 |
+
|
| 234 |
+
self.sub_sample = sub_sample
|
| 235 |
+
|
| 236 |
+
if sub_sample:
|
| 237 |
+
max_pool_layer = nn.MaxPool1d(kernel_size=2)
|
| 238 |
+
self.g = nn.Sequential(self.g, max_pool_layer)
|
| 239 |
+
if self.mode != 'gaussian':
|
| 240 |
+
self.phi = nn.Sequential(self.phi, max_pool_layer)
|
| 241 |
+
else:
|
| 242 |
+
self.phi = max_pool_layer
|
| 243 |
+
|
| 244 |
+
|
| 245 |
+
@PLUGIN_LAYERS.register_module()
|
| 246 |
+
class NonLocal2d(_NonLocalNd):
|
| 247 |
+
"""2D Non-local module.
|
| 248 |
+
|
| 249 |
+
Args:
|
| 250 |
+
in_channels (int): Same as `NonLocalND`.
|
| 251 |
+
sub_sample (bool): Whether to apply max pooling after pairwise
|
| 252 |
+
function (Note that the `sub_sample` is applied on spatial only).
|
| 253 |
+
Default: False.
|
| 254 |
+
conv_cfg (None | dict): Same as `NonLocalND`.
|
| 255 |
+
Default: dict(type='Conv2d').
|
| 256 |
+
"""
|
| 257 |
+
|
| 258 |
+
_abbr_ = 'nonlocal_block'
|
| 259 |
+
|
| 260 |
+
def __init__(self,
|
| 261 |
+
in_channels,
|
| 262 |
+
sub_sample=False,
|
| 263 |
+
conv_cfg=dict(type='Conv2d'),
|
| 264 |
+
**kwargs):
|
| 265 |
+
super(NonLocal2d, self).__init__(
|
| 266 |
+
in_channels, conv_cfg=conv_cfg, **kwargs)
|
| 267 |
+
|
| 268 |
+
self.sub_sample = sub_sample
|
| 269 |
+
|
| 270 |
+
if sub_sample:
|
| 271 |
+
max_pool_layer = nn.MaxPool2d(kernel_size=(2, 2))
|
| 272 |
+
self.g = nn.Sequential(self.g, max_pool_layer)
|
| 273 |
+
if self.mode != 'gaussian':
|
| 274 |
+
self.phi = nn.Sequential(self.phi, max_pool_layer)
|
| 275 |
+
else:
|
| 276 |
+
self.phi = max_pool_layer
|
| 277 |
+
|
| 278 |
+
|
| 279 |
+
class NonLocal3d(_NonLocalNd):
|
| 280 |
+
"""3D Non-local module.
|
| 281 |
+
|
| 282 |
+
Args:
|
| 283 |
+
in_channels (int): Same as `NonLocalND`.
|
| 284 |
+
sub_sample (bool): Whether to apply max pooling after pairwise
|
| 285 |
+
function (Note that the `sub_sample` is applied on spatial only).
|
| 286 |
+
Default: False.
|
| 287 |
+
conv_cfg (None | dict): Same as `NonLocalND`.
|
| 288 |
+
Default: dict(type='Conv3d').
|
| 289 |
+
"""
|
| 290 |
+
|
| 291 |
+
def __init__(self,
|
| 292 |
+
in_channels,
|
| 293 |
+
sub_sample=False,
|
| 294 |
+
conv_cfg=dict(type='Conv3d'),
|
| 295 |
+
**kwargs):
|
| 296 |
+
super(NonLocal3d, self).__init__(
|
| 297 |
+
in_channels, conv_cfg=conv_cfg, **kwargs)
|
| 298 |
+
self.sub_sample = sub_sample
|
| 299 |
+
|
| 300 |
+
if sub_sample:
|
| 301 |
+
max_pool_layer = nn.MaxPool3d(kernel_size=(1, 2, 2))
|
| 302 |
+
self.g = nn.Sequential(self.g, max_pool_layer)
|
| 303 |
+
if self.mode != 'gaussian':
|
| 304 |
+
self.phi = nn.Sequential(self.phi, max_pool_layer)
|
| 305 |
+
else:
|
| 306 |
+
self.phi = max_pool_layer
|
RAVE-main/annotator/mmpkg/mmcv/cnn/bricks/norm.py
ADDED
|
@@ -0,0 +1,144 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
import inspect
|
| 3 |
+
|
| 4 |
+
import torch.nn as nn
|
| 5 |
+
|
| 6 |
+
from annotator.mmpkg.mmcv.utils import is_tuple_of
|
| 7 |
+
from annotator.mmpkg.mmcv.utils.parrots_wrapper import SyncBatchNorm, _BatchNorm, _InstanceNorm
|
| 8 |
+
from .registry import NORM_LAYERS
|
| 9 |
+
|
| 10 |
+
NORM_LAYERS.register_module('BN', module=nn.BatchNorm2d)
|
| 11 |
+
NORM_LAYERS.register_module('BN1d', module=nn.BatchNorm1d)
|
| 12 |
+
NORM_LAYERS.register_module('BN2d', module=nn.BatchNorm2d)
|
| 13 |
+
NORM_LAYERS.register_module('BN3d', module=nn.BatchNorm3d)
|
| 14 |
+
NORM_LAYERS.register_module('SyncBN', module=SyncBatchNorm)
|
| 15 |
+
NORM_LAYERS.register_module('GN', module=nn.GroupNorm)
|
| 16 |
+
NORM_LAYERS.register_module('LN', module=nn.LayerNorm)
|
| 17 |
+
NORM_LAYERS.register_module('IN', module=nn.InstanceNorm2d)
|
| 18 |
+
NORM_LAYERS.register_module('IN1d', module=nn.InstanceNorm1d)
|
| 19 |
+
NORM_LAYERS.register_module('IN2d', module=nn.InstanceNorm2d)
|
| 20 |
+
NORM_LAYERS.register_module('IN3d', module=nn.InstanceNorm3d)
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
def infer_abbr(class_type):
|
| 24 |
+
"""Infer abbreviation from the class name.
|
| 25 |
+
|
| 26 |
+
When we build a norm layer with `build_norm_layer()`, we want to preserve
|
| 27 |
+
the norm type in variable names, e.g, self.bn1, self.gn. This method will
|
| 28 |
+
infer the abbreviation to map class types to abbreviations.
|
| 29 |
+
|
| 30 |
+
Rule 1: If the class has the property "_abbr_", return the property.
|
| 31 |
+
Rule 2: If the parent class is _BatchNorm, GroupNorm, LayerNorm or
|
| 32 |
+
InstanceNorm, the abbreviation of this layer will be "bn", "gn", "ln" and
|
| 33 |
+
"in" respectively.
|
| 34 |
+
Rule 3: If the class name contains "batch", "group", "layer" or "instance",
|
| 35 |
+
the abbreviation of this layer will be "bn", "gn", "ln" and "in"
|
| 36 |
+
respectively.
|
| 37 |
+
Rule 4: Otherwise, the abbreviation falls back to "norm".
|
| 38 |
+
|
| 39 |
+
Args:
|
| 40 |
+
class_type (type): The norm layer type.
|
| 41 |
+
|
| 42 |
+
Returns:
|
| 43 |
+
str: The inferred abbreviation.
|
| 44 |
+
"""
|
| 45 |
+
if not inspect.isclass(class_type):
|
| 46 |
+
raise TypeError(
|
| 47 |
+
f'class_type must be a type, but got {type(class_type)}')
|
| 48 |
+
if hasattr(class_type, '_abbr_'):
|
| 49 |
+
return class_type._abbr_
|
| 50 |
+
if issubclass(class_type, _InstanceNorm): # IN is a subclass of BN
|
| 51 |
+
return 'in'
|
| 52 |
+
elif issubclass(class_type, _BatchNorm):
|
| 53 |
+
return 'bn'
|
| 54 |
+
elif issubclass(class_type, nn.GroupNorm):
|
| 55 |
+
return 'gn'
|
| 56 |
+
elif issubclass(class_type, nn.LayerNorm):
|
| 57 |
+
return 'ln'
|
| 58 |
+
else:
|
| 59 |
+
class_name = class_type.__name__.lower()
|
| 60 |
+
if 'batch' in class_name:
|
| 61 |
+
return 'bn'
|
| 62 |
+
elif 'group' in class_name:
|
| 63 |
+
return 'gn'
|
| 64 |
+
elif 'layer' in class_name:
|
| 65 |
+
return 'ln'
|
| 66 |
+
elif 'instance' in class_name:
|
| 67 |
+
return 'in'
|
| 68 |
+
else:
|
| 69 |
+
return 'norm_layer'
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
def build_norm_layer(cfg, num_features, postfix=''):
|
| 73 |
+
"""Build normalization layer.
|
| 74 |
+
|
| 75 |
+
Args:
|
| 76 |
+
cfg (dict): The norm layer config, which should contain:
|
| 77 |
+
|
| 78 |
+
- type (str): Layer type.
|
| 79 |
+
- layer args: Args needed to instantiate a norm layer.
|
| 80 |
+
- requires_grad (bool, optional): Whether stop gradient updates.
|
| 81 |
+
num_features (int): Number of input channels.
|
| 82 |
+
postfix (int | str): The postfix to be appended into norm abbreviation
|
| 83 |
+
to create named layer.
|
| 84 |
+
|
| 85 |
+
Returns:
|
| 86 |
+
(str, nn.Module): The first element is the layer name consisting of
|
| 87 |
+
abbreviation and postfix, e.g., bn1, gn. The second element is the
|
| 88 |
+
created norm layer.
|
| 89 |
+
"""
|
| 90 |
+
if not isinstance(cfg, dict):
|
| 91 |
+
raise TypeError('cfg must be a dict')
|
| 92 |
+
if 'type' not in cfg:
|
| 93 |
+
raise KeyError('the cfg dict must contain the key "type"')
|
| 94 |
+
cfg_ = cfg.copy()
|
| 95 |
+
|
| 96 |
+
layer_type = cfg_.pop('type')
|
| 97 |
+
if layer_type not in NORM_LAYERS:
|
| 98 |
+
raise KeyError(f'Unrecognized norm type {layer_type}')
|
| 99 |
+
|
| 100 |
+
norm_layer = NORM_LAYERS.get(layer_type)
|
| 101 |
+
abbr = infer_abbr(norm_layer)
|
| 102 |
+
|
| 103 |
+
assert isinstance(postfix, (int, str))
|
| 104 |
+
name = abbr + str(postfix)
|
| 105 |
+
|
| 106 |
+
requires_grad = cfg_.pop('requires_grad', True)
|
| 107 |
+
cfg_.setdefault('eps', 1e-5)
|
| 108 |
+
if layer_type != 'GN':
|
| 109 |
+
layer = norm_layer(num_features, **cfg_)
|
| 110 |
+
if layer_type == 'SyncBN' and hasattr(layer, '_specify_ddp_gpu_num'):
|
| 111 |
+
layer._specify_ddp_gpu_num(1)
|
| 112 |
+
else:
|
| 113 |
+
assert 'num_groups' in cfg_
|
| 114 |
+
layer = norm_layer(num_channels=num_features, **cfg_)
|
| 115 |
+
|
| 116 |
+
for param in layer.parameters():
|
| 117 |
+
param.requires_grad = requires_grad
|
| 118 |
+
|
| 119 |
+
return name, layer
|
| 120 |
+
|
| 121 |
+
|
| 122 |
+
def is_norm(layer, exclude=None):
|
| 123 |
+
"""Check if a layer is a normalization layer.
|
| 124 |
+
|
| 125 |
+
Args:
|
| 126 |
+
layer (nn.Module): The layer to be checked.
|
| 127 |
+
exclude (type | tuple[type]): Types to be excluded.
|
| 128 |
+
|
| 129 |
+
Returns:
|
| 130 |
+
bool: Whether the layer is a norm layer.
|
| 131 |
+
"""
|
| 132 |
+
if exclude is not None:
|
| 133 |
+
if not isinstance(exclude, tuple):
|
| 134 |
+
exclude = (exclude, )
|
| 135 |
+
if not is_tuple_of(exclude, type):
|
| 136 |
+
raise TypeError(
|
| 137 |
+
f'"exclude" must be either None or type or a tuple of types, '
|
| 138 |
+
f'but got {type(exclude)}: {exclude}')
|
| 139 |
+
|
| 140 |
+
if exclude and isinstance(layer, exclude):
|
| 141 |
+
return False
|
| 142 |
+
|
| 143 |
+
all_norm_bases = (_BatchNorm, _InstanceNorm, nn.GroupNorm, nn.LayerNorm)
|
| 144 |
+
return isinstance(layer, all_norm_bases)
|
RAVE-main/annotator/mmpkg/mmcv/cnn/bricks/padding.py
ADDED
|
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
import torch.nn as nn
|
| 3 |
+
|
| 4 |
+
from .registry import PADDING_LAYERS
|
| 5 |
+
|
| 6 |
+
PADDING_LAYERS.register_module('zero', module=nn.ZeroPad2d)
|
| 7 |
+
PADDING_LAYERS.register_module('reflect', module=nn.ReflectionPad2d)
|
| 8 |
+
PADDING_LAYERS.register_module('replicate', module=nn.ReplicationPad2d)
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
def build_padding_layer(cfg, *args, **kwargs):
|
| 12 |
+
"""Build padding layer.
|
| 13 |
+
|
| 14 |
+
Args:
|
| 15 |
+
cfg (None or dict): The padding layer config, which should contain:
|
| 16 |
+
- type (str): Layer type.
|
| 17 |
+
- layer args: Args needed to instantiate a padding layer.
|
| 18 |
+
|
| 19 |
+
Returns:
|
| 20 |
+
nn.Module: Created padding layer.
|
| 21 |
+
"""
|
| 22 |
+
if not isinstance(cfg, dict):
|
| 23 |
+
raise TypeError('cfg must be a dict')
|
| 24 |
+
if 'type' not in cfg:
|
| 25 |
+
raise KeyError('the cfg dict must contain the key "type"')
|
| 26 |
+
|
| 27 |
+
cfg_ = cfg.copy()
|
| 28 |
+
padding_type = cfg_.pop('type')
|
| 29 |
+
if padding_type not in PADDING_LAYERS:
|
| 30 |
+
raise KeyError(f'Unrecognized padding type {padding_type}.')
|
| 31 |
+
else:
|
| 32 |
+
padding_layer = PADDING_LAYERS.get(padding_type)
|
| 33 |
+
|
| 34 |
+
layer = padding_layer(*args, **kwargs, **cfg_)
|
| 35 |
+
|
| 36 |
+
return layer
|
RAVE-main/annotator/mmpkg/mmcv/cnn/bricks/plugin.py
ADDED
|
@@ -0,0 +1,88 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import inspect
|
| 2 |
+
import platform
|
| 3 |
+
|
| 4 |
+
from .registry import PLUGIN_LAYERS
|
| 5 |
+
|
| 6 |
+
if platform.system() == 'Windows':
|
| 7 |
+
import regex as re
|
| 8 |
+
else:
|
| 9 |
+
import re
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
def infer_abbr(class_type):
|
| 13 |
+
"""Infer abbreviation from the class name.
|
| 14 |
+
|
| 15 |
+
This method will infer the abbreviation to map class types to
|
| 16 |
+
abbreviations.
|
| 17 |
+
|
| 18 |
+
Rule 1: If the class has the property "abbr", return the property.
|
| 19 |
+
Rule 2: Otherwise, the abbreviation falls back to snake case of class
|
| 20 |
+
name, e.g. the abbreviation of ``FancyBlock`` will be ``fancy_block``.
|
| 21 |
+
|
| 22 |
+
Args:
|
| 23 |
+
class_type (type): The norm layer type.
|
| 24 |
+
|
| 25 |
+
Returns:
|
| 26 |
+
str: The inferred abbreviation.
|
| 27 |
+
"""
|
| 28 |
+
|
| 29 |
+
def camel2snack(word):
|
| 30 |
+
"""Convert camel case word into snack case.
|
| 31 |
+
|
| 32 |
+
Modified from `inflection lib
|
| 33 |
+
<https://inflection.readthedocs.io/en/latest/#inflection.underscore>`_.
|
| 34 |
+
|
| 35 |
+
Example::
|
| 36 |
+
|
| 37 |
+
>>> camel2snack("FancyBlock")
|
| 38 |
+
'fancy_block'
|
| 39 |
+
"""
|
| 40 |
+
|
| 41 |
+
word = re.sub(r'([A-Z]+)([A-Z][a-z])', r'\1_\2', word)
|
| 42 |
+
word = re.sub(r'([a-z\d])([A-Z])', r'\1_\2', word)
|
| 43 |
+
word = word.replace('-', '_')
|
| 44 |
+
return word.lower()
|
| 45 |
+
|
| 46 |
+
if not inspect.isclass(class_type):
|
| 47 |
+
raise TypeError(
|
| 48 |
+
f'class_type must be a type, but got {type(class_type)}')
|
| 49 |
+
if hasattr(class_type, '_abbr_'):
|
| 50 |
+
return class_type._abbr_
|
| 51 |
+
else:
|
| 52 |
+
return camel2snack(class_type.__name__)
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
def build_plugin_layer(cfg, postfix='', **kwargs):
|
| 56 |
+
"""Build plugin layer.
|
| 57 |
+
|
| 58 |
+
Args:
|
| 59 |
+
cfg (None or dict): cfg should contain:
|
| 60 |
+
type (str): identify plugin layer type.
|
| 61 |
+
layer args: args needed to instantiate a plugin layer.
|
| 62 |
+
postfix (int, str): appended into norm abbreviation to
|
| 63 |
+
create named layer. Default: ''.
|
| 64 |
+
|
| 65 |
+
Returns:
|
| 66 |
+
tuple[str, nn.Module]:
|
| 67 |
+
name (str): abbreviation + postfix
|
| 68 |
+
layer (nn.Module): created plugin layer
|
| 69 |
+
"""
|
| 70 |
+
if not isinstance(cfg, dict):
|
| 71 |
+
raise TypeError('cfg must be a dict')
|
| 72 |
+
if 'type' not in cfg:
|
| 73 |
+
raise KeyError('the cfg dict must contain the key "type"')
|
| 74 |
+
cfg_ = cfg.copy()
|
| 75 |
+
|
| 76 |
+
layer_type = cfg_.pop('type')
|
| 77 |
+
if layer_type not in PLUGIN_LAYERS:
|
| 78 |
+
raise KeyError(f'Unrecognized plugin type {layer_type}')
|
| 79 |
+
|
| 80 |
+
plugin_layer = PLUGIN_LAYERS.get(layer_type)
|
| 81 |
+
abbr = infer_abbr(plugin_layer)
|
| 82 |
+
|
| 83 |
+
assert isinstance(postfix, (int, str))
|
| 84 |
+
name = abbr + str(postfix)
|
| 85 |
+
|
| 86 |
+
layer = plugin_layer(**kwargs, **cfg_)
|
| 87 |
+
|
| 88 |
+
return name, layer
|
RAVE-main/annotator/mmpkg/mmcv/cnn/bricks/registry.py
ADDED
|
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
from annotator.mmpkg.mmcv.utils import Registry
|
| 3 |
+
|
| 4 |
+
CONV_LAYERS = Registry('conv layer')
|
| 5 |
+
NORM_LAYERS = Registry('norm layer')
|
| 6 |
+
ACTIVATION_LAYERS = Registry('activation layer')
|
| 7 |
+
PADDING_LAYERS = Registry('padding layer')
|
| 8 |
+
UPSAMPLE_LAYERS = Registry('upsample layer')
|
| 9 |
+
PLUGIN_LAYERS = Registry('plugin layer')
|
| 10 |
+
|
| 11 |
+
DROPOUT_LAYERS = Registry('drop out layers')
|
| 12 |
+
POSITIONAL_ENCODING = Registry('position encoding')
|
| 13 |
+
ATTENTION = Registry('attention')
|
| 14 |
+
FEEDFORWARD_NETWORK = Registry('feed-forward Network')
|
| 15 |
+
TRANSFORMER_LAYER = Registry('transformerLayer')
|
| 16 |
+
TRANSFORMER_LAYER_SEQUENCE = Registry('transformer-layers sequence')
|
RAVE-main/annotator/mmpkg/mmcv/cnn/bricks/scale.py
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
import torch
|
| 3 |
+
import torch.nn as nn
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
class Scale(nn.Module):
|
| 7 |
+
"""A learnable scale parameter.
|
| 8 |
+
|
| 9 |
+
This layer scales the input by a learnable factor. It multiplies a
|
| 10 |
+
learnable scale parameter of shape (1,) with input of any shape.
|
| 11 |
+
|
| 12 |
+
Args:
|
| 13 |
+
scale (float): Initial value of scale factor. Default: 1.0
|
| 14 |
+
"""
|
| 15 |
+
|
| 16 |
+
def __init__(self, scale=1.0):
|
| 17 |
+
super(Scale, self).__init__()
|
| 18 |
+
self.scale = nn.Parameter(torch.tensor(scale, dtype=torch.float))
|
| 19 |
+
|
| 20 |
+
def forward(self, x):
|
| 21 |
+
return x * self.scale
|
RAVE-main/annotator/mmpkg/mmcv/cnn/bricks/swish.py
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
import torch
|
| 3 |
+
import torch.nn as nn
|
| 4 |
+
|
| 5 |
+
from .registry import ACTIVATION_LAYERS
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
@ACTIVATION_LAYERS.register_module()
|
| 9 |
+
class Swish(nn.Module):
|
| 10 |
+
"""Swish Module.
|
| 11 |
+
|
| 12 |
+
This module applies the swish function:
|
| 13 |
+
|
| 14 |
+
.. math::
|
| 15 |
+
Swish(x) = x * Sigmoid(x)
|
| 16 |
+
|
| 17 |
+
Returns:
|
| 18 |
+
Tensor: The output tensor.
|
| 19 |
+
"""
|
| 20 |
+
|
| 21 |
+
def __init__(self):
|
| 22 |
+
super(Swish, self).__init__()
|
| 23 |
+
|
| 24 |
+
def forward(self, x):
|
| 25 |
+
return x * torch.sigmoid(x)
|
RAVE-main/annotator/mmpkg/mmcv/cnn/bricks/transformer.py
ADDED
|
@@ -0,0 +1,595 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
import copy
|
| 3 |
+
import warnings
|
| 4 |
+
|
| 5 |
+
import torch
|
| 6 |
+
import torch.nn as nn
|
| 7 |
+
|
| 8 |
+
from annotator.mmpkg.mmcv import ConfigDict, deprecated_api_warning
|
| 9 |
+
from annotator.mmpkg.mmcv.cnn import Linear, build_activation_layer, build_norm_layer
|
| 10 |
+
from annotator.mmpkg.mmcv.runner.base_module import BaseModule, ModuleList, Sequential
|
| 11 |
+
from annotator.mmpkg.mmcv.utils import build_from_cfg
|
| 12 |
+
from .drop import build_dropout
|
| 13 |
+
from .registry import (ATTENTION, FEEDFORWARD_NETWORK, POSITIONAL_ENCODING,
|
| 14 |
+
TRANSFORMER_LAYER, TRANSFORMER_LAYER_SEQUENCE)
|
| 15 |
+
|
| 16 |
+
# Avoid BC-breaking of importing MultiScaleDeformableAttention from this file
|
| 17 |
+
try:
|
| 18 |
+
from annotator.mmpkg.mmcv.ops.multi_scale_deform_attn import MultiScaleDeformableAttention # noqa F401
|
| 19 |
+
warnings.warn(
|
| 20 |
+
ImportWarning(
|
| 21 |
+
'``MultiScaleDeformableAttention`` has been moved to '
|
| 22 |
+
'``mmcv.ops.multi_scale_deform_attn``, please change original path ' # noqa E501
|
| 23 |
+
'``from annotator.mmpkg.mmcv.cnn.bricks.transformer import MultiScaleDeformableAttention`` ' # noqa E501
|
| 24 |
+
'to ``from annotator.mmpkg.mmcv.ops.multi_scale_deform_attn import MultiScaleDeformableAttention`` ' # noqa E501
|
| 25 |
+
))
|
| 26 |
+
|
| 27 |
+
except ImportError:
|
| 28 |
+
warnings.warn('Fail to import ``MultiScaleDeformableAttention`` from '
|
| 29 |
+
'``mmcv.ops.multi_scale_deform_attn``, '
|
| 30 |
+
'You should install ``mmcv-full`` if you need this module. ')
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
def build_positional_encoding(cfg, default_args=None):
|
| 34 |
+
"""Builder for Position Encoding."""
|
| 35 |
+
return build_from_cfg(cfg, POSITIONAL_ENCODING, default_args)
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
def build_attention(cfg, default_args=None):
|
| 39 |
+
"""Builder for attention."""
|
| 40 |
+
return build_from_cfg(cfg, ATTENTION, default_args)
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
def build_feedforward_network(cfg, default_args=None):
|
| 44 |
+
"""Builder for feed-forward network (FFN)."""
|
| 45 |
+
return build_from_cfg(cfg, FEEDFORWARD_NETWORK, default_args)
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
def build_transformer_layer(cfg, default_args=None):
|
| 49 |
+
"""Builder for transformer layer."""
|
| 50 |
+
return build_from_cfg(cfg, TRANSFORMER_LAYER, default_args)
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
def build_transformer_layer_sequence(cfg, default_args=None):
|
| 54 |
+
"""Builder for transformer encoder and transformer decoder."""
|
| 55 |
+
return build_from_cfg(cfg, TRANSFORMER_LAYER_SEQUENCE, default_args)
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
@ATTENTION.register_module()
|
| 59 |
+
class MultiheadAttention(BaseModule):
|
| 60 |
+
"""A wrapper for ``torch.nn.MultiheadAttention``.
|
| 61 |
+
|
| 62 |
+
This module implements MultiheadAttention with identity connection,
|
| 63 |
+
and positional encoding is also passed as input.
|
| 64 |
+
|
| 65 |
+
Args:
|
| 66 |
+
embed_dims (int): The embedding dimension.
|
| 67 |
+
num_heads (int): Parallel attention heads.
|
| 68 |
+
attn_drop (float): A Dropout layer on attn_output_weights.
|
| 69 |
+
Default: 0.0.
|
| 70 |
+
proj_drop (float): A Dropout layer after `nn.MultiheadAttention`.
|
| 71 |
+
Default: 0.0.
|
| 72 |
+
dropout_layer (obj:`ConfigDict`): The dropout_layer used
|
| 73 |
+
when adding the shortcut.
|
| 74 |
+
init_cfg (obj:`mmcv.ConfigDict`): The Config for initialization.
|
| 75 |
+
Default: None.
|
| 76 |
+
batch_first (bool): When it is True, Key, Query and Value are shape of
|
| 77 |
+
(batch, n, embed_dim), otherwise (n, batch, embed_dim).
|
| 78 |
+
Default to False.
|
| 79 |
+
"""
|
| 80 |
+
|
| 81 |
+
def __init__(self,
|
| 82 |
+
embed_dims,
|
| 83 |
+
num_heads,
|
| 84 |
+
attn_drop=0.,
|
| 85 |
+
proj_drop=0.,
|
| 86 |
+
dropout_layer=dict(type='Dropout', drop_prob=0.),
|
| 87 |
+
init_cfg=None,
|
| 88 |
+
batch_first=False,
|
| 89 |
+
**kwargs):
|
| 90 |
+
super(MultiheadAttention, self).__init__(init_cfg)
|
| 91 |
+
if 'dropout' in kwargs:
|
| 92 |
+
warnings.warn('The arguments `dropout` in MultiheadAttention '
|
| 93 |
+
'has been deprecated, now you can separately '
|
| 94 |
+
'set `attn_drop`(float), proj_drop(float), '
|
| 95 |
+
'and `dropout_layer`(dict) ')
|
| 96 |
+
attn_drop = kwargs['dropout']
|
| 97 |
+
dropout_layer['drop_prob'] = kwargs.pop('dropout')
|
| 98 |
+
|
| 99 |
+
self.embed_dims = embed_dims
|
| 100 |
+
self.num_heads = num_heads
|
| 101 |
+
self.batch_first = batch_first
|
| 102 |
+
|
| 103 |
+
self.attn = nn.MultiheadAttention(embed_dims, num_heads, attn_drop,
|
| 104 |
+
**kwargs)
|
| 105 |
+
|
| 106 |
+
self.proj_drop = nn.Dropout(proj_drop)
|
| 107 |
+
self.dropout_layer = build_dropout(
|
| 108 |
+
dropout_layer) if dropout_layer else nn.Identity()
|
| 109 |
+
|
| 110 |
+
@deprecated_api_warning({'residual': 'identity'},
|
| 111 |
+
cls_name='MultiheadAttention')
|
| 112 |
+
def forward(self,
|
| 113 |
+
query,
|
| 114 |
+
key=None,
|
| 115 |
+
value=None,
|
| 116 |
+
identity=None,
|
| 117 |
+
query_pos=None,
|
| 118 |
+
key_pos=None,
|
| 119 |
+
attn_mask=None,
|
| 120 |
+
key_padding_mask=None,
|
| 121 |
+
**kwargs):
|
| 122 |
+
"""Forward function for `MultiheadAttention`.
|
| 123 |
+
|
| 124 |
+
**kwargs allow passing a more general data flow when combining
|
| 125 |
+
with other operations in `transformerlayer`.
|
| 126 |
+
|
| 127 |
+
Args:
|
| 128 |
+
query (Tensor): The input query with shape [num_queries, bs,
|
| 129 |
+
embed_dims] if self.batch_first is False, else
|
| 130 |
+
[bs, num_queries embed_dims].
|
| 131 |
+
key (Tensor): The key tensor with shape [num_keys, bs,
|
| 132 |
+
embed_dims] if self.batch_first is False, else
|
| 133 |
+
[bs, num_keys, embed_dims] .
|
| 134 |
+
If None, the ``query`` will be used. Defaults to None.
|
| 135 |
+
value (Tensor): The value tensor with same shape as `key`.
|
| 136 |
+
Same in `nn.MultiheadAttention.forward`. Defaults to None.
|
| 137 |
+
If None, the `key` will be used.
|
| 138 |
+
identity (Tensor): This tensor, with the same shape as x,
|
| 139 |
+
will be used for the identity link.
|
| 140 |
+
If None, `x` will be used. Defaults to None.
|
| 141 |
+
query_pos (Tensor): The positional encoding for query, with
|
| 142 |
+
the same shape as `x`. If not None, it will
|
| 143 |
+
be added to `x` before forward function. Defaults to None.
|
| 144 |
+
key_pos (Tensor): The positional encoding for `key`, with the
|
| 145 |
+
same shape as `key`. Defaults to None. If not None, it will
|
| 146 |
+
be added to `key` before forward function. If None, and
|
| 147 |
+
`query_pos` has the same shape as `key`, then `query_pos`
|
| 148 |
+
will be used for `key_pos`. Defaults to None.
|
| 149 |
+
attn_mask (Tensor): ByteTensor mask with shape [num_queries,
|
| 150 |
+
num_keys]. Same in `nn.MultiheadAttention.forward`.
|
| 151 |
+
Defaults to None.
|
| 152 |
+
key_padding_mask (Tensor): ByteTensor with shape [bs, num_keys].
|
| 153 |
+
Defaults to None.
|
| 154 |
+
|
| 155 |
+
Returns:
|
| 156 |
+
Tensor: forwarded results with shape
|
| 157 |
+
[num_queries, bs, embed_dims]
|
| 158 |
+
if self.batch_first is False, else
|
| 159 |
+
[bs, num_queries embed_dims].
|
| 160 |
+
"""
|
| 161 |
+
|
| 162 |
+
if key is None:
|
| 163 |
+
key = query
|
| 164 |
+
if value is None:
|
| 165 |
+
value = key
|
| 166 |
+
if identity is None:
|
| 167 |
+
identity = query
|
| 168 |
+
if key_pos is None:
|
| 169 |
+
if query_pos is not None:
|
| 170 |
+
# use query_pos if key_pos is not available
|
| 171 |
+
if query_pos.shape == key.shape:
|
| 172 |
+
key_pos = query_pos
|
| 173 |
+
else:
|
| 174 |
+
warnings.warn(f'position encoding of key is'
|
| 175 |
+
f'missing in {self.__class__.__name__}.')
|
| 176 |
+
if query_pos is not None:
|
| 177 |
+
query = query + query_pos
|
| 178 |
+
if key_pos is not None:
|
| 179 |
+
key = key + key_pos
|
| 180 |
+
|
| 181 |
+
# Because the dataflow('key', 'query', 'value') of
|
| 182 |
+
# ``torch.nn.MultiheadAttention`` is (num_query, batch,
|
| 183 |
+
# embed_dims), We should adjust the shape of dataflow from
|
| 184 |
+
# batch_first (batch, num_query, embed_dims) to num_query_first
|
| 185 |
+
# (num_query ,batch, embed_dims), and recover ``attn_output``
|
| 186 |
+
# from num_query_first to batch_first.
|
| 187 |
+
if self.batch_first:
|
| 188 |
+
query = query.transpose(0, 1)
|
| 189 |
+
key = key.transpose(0, 1)
|
| 190 |
+
value = value.transpose(0, 1)
|
| 191 |
+
|
| 192 |
+
out = self.attn(
|
| 193 |
+
query=query,
|
| 194 |
+
key=key,
|
| 195 |
+
value=value,
|
| 196 |
+
attn_mask=attn_mask,
|
| 197 |
+
key_padding_mask=key_padding_mask)[0]
|
| 198 |
+
|
| 199 |
+
if self.batch_first:
|
| 200 |
+
out = out.transpose(0, 1)
|
| 201 |
+
|
| 202 |
+
return identity + self.dropout_layer(self.proj_drop(out))
|
| 203 |
+
|
| 204 |
+
|
| 205 |
+
@FEEDFORWARD_NETWORK.register_module()
|
| 206 |
+
class FFN(BaseModule):
|
| 207 |
+
"""Implements feed-forward networks (FFNs) with identity connection.
|
| 208 |
+
|
| 209 |
+
Args:
|
| 210 |
+
embed_dims (int): The feature dimension. Same as
|
| 211 |
+
`MultiheadAttention`. Defaults: 256.
|
| 212 |
+
feedforward_channels (int): The hidden dimension of FFNs.
|
| 213 |
+
Defaults: 1024.
|
| 214 |
+
num_fcs (int, optional): The number of fully-connected layers in
|
| 215 |
+
FFNs. Default: 2.
|
| 216 |
+
act_cfg (dict, optional): The activation config for FFNs.
|
| 217 |
+
Default: dict(type='ReLU')
|
| 218 |
+
ffn_drop (float, optional): Probability of an element to be
|
| 219 |
+
zeroed in FFN. Default 0.0.
|
| 220 |
+
add_identity (bool, optional): Whether to add the
|
| 221 |
+
identity connection. Default: `True`.
|
| 222 |
+
dropout_layer (obj:`ConfigDict`): The dropout_layer used
|
| 223 |
+
when adding the shortcut.
|
| 224 |
+
init_cfg (obj:`mmcv.ConfigDict`): The Config for initialization.
|
| 225 |
+
Default: None.
|
| 226 |
+
"""
|
| 227 |
+
|
| 228 |
+
@deprecated_api_warning(
|
| 229 |
+
{
|
| 230 |
+
'dropout': 'ffn_drop',
|
| 231 |
+
'add_residual': 'add_identity'
|
| 232 |
+
},
|
| 233 |
+
cls_name='FFN')
|
| 234 |
+
def __init__(self,
|
| 235 |
+
embed_dims=256,
|
| 236 |
+
feedforward_channels=1024,
|
| 237 |
+
num_fcs=2,
|
| 238 |
+
act_cfg=dict(type='ReLU', inplace=True),
|
| 239 |
+
ffn_drop=0.,
|
| 240 |
+
dropout_layer=None,
|
| 241 |
+
add_identity=True,
|
| 242 |
+
init_cfg=None,
|
| 243 |
+
**kwargs):
|
| 244 |
+
super(FFN, self).__init__(init_cfg)
|
| 245 |
+
assert num_fcs >= 2, 'num_fcs should be no less ' \
|
| 246 |
+
f'than 2. got {num_fcs}.'
|
| 247 |
+
self.embed_dims = embed_dims
|
| 248 |
+
self.feedforward_channels = feedforward_channels
|
| 249 |
+
self.num_fcs = num_fcs
|
| 250 |
+
self.act_cfg = act_cfg
|
| 251 |
+
self.activate = build_activation_layer(act_cfg)
|
| 252 |
+
|
| 253 |
+
layers = []
|
| 254 |
+
in_channels = embed_dims
|
| 255 |
+
for _ in range(num_fcs - 1):
|
| 256 |
+
layers.append(
|
| 257 |
+
Sequential(
|
| 258 |
+
Linear(in_channels, feedforward_channels), self.activate,
|
| 259 |
+
nn.Dropout(ffn_drop)))
|
| 260 |
+
in_channels = feedforward_channels
|
| 261 |
+
layers.append(Linear(feedforward_channels, embed_dims))
|
| 262 |
+
layers.append(nn.Dropout(ffn_drop))
|
| 263 |
+
self.layers = Sequential(*layers)
|
| 264 |
+
self.dropout_layer = build_dropout(
|
| 265 |
+
dropout_layer) if dropout_layer else torch.nn.Identity()
|
| 266 |
+
self.add_identity = add_identity
|
| 267 |
+
|
| 268 |
+
@deprecated_api_warning({'residual': 'identity'}, cls_name='FFN')
|
| 269 |
+
def forward(self, x, identity=None):
|
| 270 |
+
"""Forward function for `FFN`.
|
| 271 |
+
|
| 272 |
+
The function would add x to the output tensor if residue is None.
|
| 273 |
+
"""
|
| 274 |
+
out = self.layers(x)
|
| 275 |
+
if not self.add_identity:
|
| 276 |
+
return self.dropout_layer(out)
|
| 277 |
+
if identity is None:
|
| 278 |
+
identity = x
|
| 279 |
+
return identity + self.dropout_layer(out)
|
| 280 |
+
|
| 281 |
+
|
| 282 |
+
@TRANSFORMER_LAYER.register_module()
|
| 283 |
+
class BaseTransformerLayer(BaseModule):
|
| 284 |
+
"""Base `TransformerLayer` for vision transformer.
|
| 285 |
+
|
| 286 |
+
It can be built from `mmcv.ConfigDict` and support more flexible
|
| 287 |
+
customization, for example, using any number of `FFN or LN ` and
|
| 288 |
+
use different kinds of `attention` by specifying a list of `ConfigDict`
|
| 289 |
+
named `attn_cfgs`. It is worth mentioning that it supports `prenorm`
|
| 290 |
+
when you specifying `norm` as the first element of `operation_order`.
|
| 291 |
+
More details about the `prenorm`: `On Layer Normalization in the
|
| 292 |
+
Transformer Architecture <https://arxiv.org/abs/2002.04745>`_ .
|
| 293 |
+
|
| 294 |
+
Args:
|
| 295 |
+
attn_cfgs (list[`mmcv.ConfigDict`] | obj:`mmcv.ConfigDict` | None )):
|
| 296 |
+
Configs for `self_attention` or `cross_attention` modules,
|
| 297 |
+
The order of the configs in the list should be consistent with
|
| 298 |
+
corresponding attentions in operation_order.
|
| 299 |
+
If it is a dict, all of the attention modules in operation_order
|
| 300 |
+
will be built with this config. Default: None.
|
| 301 |
+
ffn_cfgs (list[`mmcv.ConfigDict`] | obj:`mmcv.ConfigDict` | None )):
|
| 302 |
+
Configs for FFN, The order of the configs in the list should be
|
| 303 |
+
consistent with corresponding ffn in operation_order.
|
| 304 |
+
If it is a dict, all of the attention modules in operation_order
|
| 305 |
+
will be built with this config.
|
| 306 |
+
operation_order (tuple[str]): The execution order of operation
|
| 307 |
+
in transformer. Such as ('self_attn', 'norm', 'ffn', 'norm').
|
| 308 |
+
Support `prenorm` when you specifying first element as `norm`.
|
| 309 |
+
Default:None.
|
| 310 |
+
norm_cfg (dict): Config dict for normalization layer.
|
| 311 |
+
Default: dict(type='LN').
|
| 312 |
+
init_cfg (obj:`mmcv.ConfigDict`): The Config for initialization.
|
| 313 |
+
Default: None.
|
| 314 |
+
batch_first (bool): Key, Query and Value are shape
|
| 315 |
+
of (batch, n, embed_dim)
|
| 316 |
+
or (n, batch, embed_dim). Default to False.
|
| 317 |
+
"""
|
| 318 |
+
|
| 319 |
+
def __init__(self,
|
| 320 |
+
attn_cfgs=None,
|
| 321 |
+
ffn_cfgs=dict(
|
| 322 |
+
type='FFN',
|
| 323 |
+
embed_dims=256,
|
| 324 |
+
feedforward_channels=1024,
|
| 325 |
+
num_fcs=2,
|
| 326 |
+
ffn_drop=0.,
|
| 327 |
+
act_cfg=dict(type='ReLU', inplace=True),
|
| 328 |
+
),
|
| 329 |
+
operation_order=None,
|
| 330 |
+
norm_cfg=dict(type='LN'),
|
| 331 |
+
init_cfg=None,
|
| 332 |
+
batch_first=False,
|
| 333 |
+
**kwargs):
|
| 334 |
+
|
| 335 |
+
deprecated_args = dict(
|
| 336 |
+
feedforward_channels='feedforward_channels',
|
| 337 |
+
ffn_dropout='ffn_drop',
|
| 338 |
+
ffn_num_fcs='num_fcs')
|
| 339 |
+
for ori_name, new_name in deprecated_args.items():
|
| 340 |
+
if ori_name in kwargs:
|
| 341 |
+
warnings.warn(
|
| 342 |
+
f'The arguments `{ori_name}` in BaseTransformerLayer '
|
| 343 |
+
f'has been deprecated, now you should set `{new_name}` '
|
| 344 |
+
f'and other FFN related arguments '
|
| 345 |
+
f'to a dict named `ffn_cfgs`. ')
|
| 346 |
+
ffn_cfgs[new_name] = kwargs[ori_name]
|
| 347 |
+
|
| 348 |
+
super(BaseTransformerLayer, self).__init__(init_cfg)
|
| 349 |
+
|
| 350 |
+
self.batch_first = batch_first
|
| 351 |
+
|
| 352 |
+
assert set(operation_order) & set(
|
| 353 |
+
['self_attn', 'norm', 'ffn', 'cross_attn']) == \
|
| 354 |
+
set(operation_order), f'The operation_order of' \
|
| 355 |
+
f' {self.__class__.__name__} should ' \
|
| 356 |
+
f'contains all four operation type ' \
|
| 357 |
+
f"{['self_attn', 'norm', 'ffn', 'cross_attn']}"
|
| 358 |
+
|
| 359 |
+
num_attn = operation_order.count('self_attn') + operation_order.count(
|
| 360 |
+
'cross_attn')
|
| 361 |
+
if isinstance(attn_cfgs, dict):
|
| 362 |
+
attn_cfgs = [copy.deepcopy(attn_cfgs) for _ in range(num_attn)]
|
| 363 |
+
else:
|
| 364 |
+
assert num_attn == len(attn_cfgs), f'The length ' \
|
| 365 |
+
f'of attn_cfg {num_attn} is ' \
|
| 366 |
+
f'not consistent with the number of attention' \
|
| 367 |
+
f'in operation_order {operation_order}.'
|
| 368 |
+
|
| 369 |
+
self.num_attn = num_attn
|
| 370 |
+
self.operation_order = operation_order
|
| 371 |
+
self.norm_cfg = norm_cfg
|
| 372 |
+
self.pre_norm = operation_order[0] == 'norm'
|
| 373 |
+
self.attentions = ModuleList()
|
| 374 |
+
|
| 375 |
+
index = 0
|
| 376 |
+
for operation_name in operation_order:
|
| 377 |
+
if operation_name in ['self_attn', 'cross_attn']:
|
| 378 |
+
if 'batch_first' in attn_cfgs[index]:
|
| 379 |
+
assert self.batch_first == attn_cfgs[index]['batch_first']
|
| 380 |
+
else:
|
| 381 |
+
attn_cfgs[index]['batch_first'] = self.batch_first
|
| 382 |
+
attention = build_attention(attn_cfgs[index])
|
| 383 |
+
# Some custom attentions used as `self_attn`
|
| 384 |
+
# or `cross_attn` can have different behavior.
|
| 385 |
+
attention.operation_name = operation_name
|
| 386 |
+
self.attentions.append(attention)
|
| 387 |
+
index += 1
|
| 388 |
+
|
| 389 |
+
self.embed_dims = self.attentions[0].embed_dims
|
| 390 |
+
|
| 391 |
+
self.ffns = ModuleList()
|
| 392 |
+
num_ffns = operation_order.count('ffn')
|
| 393 |
+
if isinstance(ffn_cfgs, dict):
|
| 394 |
+
ffn_cfgs = ConfigDict(ffn_cfgs)
|
| 395 |
+
if isinstance(ffn_cfgs, dict):
|
| 396 |
+
ffn_cfgs = [copy.deepcopy(ffn_cfgs) for _ in range(num_ffns)]
|
| 397 |
+
assert len(ffn_cfgs) == num_ffns
|
| 398 |
+
for ffn_index in range(num_ffns):
|
| 399 |
+
if 'embed_dims' not in ffn_cfgs[ffn_index]:
|
| 400 |
+
ffn_cfgs['embed_dims'] = self.embed_dims
|
| 401 |
+
else:
|
| 402 |
+
assert ffn_cfgs[ffn_index]['embed_dims'] == self.embed_dims
|
| 403 |
+
self.ffns.append(
|
| 404 |
+
build_feedforward_network(ffn_cfgs[ffn_index],
|
| 405 |
+
dict(type='FFN')))
|
| 406 |
+
|
| 407 |
+
self.norms = ModuleList()
|
| 408 |
+
num_norms = operation_order.count('norm')
|
| 409 |
+
for _ in range(num_norms):
|
| 410 |
+
self.norms.append(build_norm_layer(norm_cfg, self.embed_dims)[1])
|
| 411 |
+
|
| 412 |
+
def forward(self,
|
| 413 |
+
query,
|
| 414 |
+
key=None,
|
| 415 |
+
value=None,
|
| 416 |
+
query_pos=None,
|
| 417 |
+
key_pos=None,
|
| 418 |
+
attn_masks=None,
|
| 419 |
+
query_key_padding_mask=None,
|
| 420 |
+
key_padding_mask=None,
|
| 421 |
+
**kwargs):
|
| 422 |
+
"""Forward function for `TransformerDecoderLayer`.
|
| 423 |
+
|
| 424 |
+
**kwargs contains some specific arguments of attentions.
|
| 425 |
+
|
| 426 |
+
Args:
|
| 427 |
+
query (Tensor): The input query with shape
|
| 428 |
+
[num_queries, bs, embed_dims] if
|
| 429 |
+
self.batch_first is False, else
|
| 430 |
+
[bs, num_queries embed_dims].
|
| 431 |
+
key (Tensor): The key tensor with shape [num_keys, bs,
|
| 432 |
+
embed_dims] if self.batch_first is False, else
|
| 433 |
+
[bs, num_keys, embed_dims] .
|
| 434 |
+
value (Tensor): The value tensor with same shape as `key`.
|
| 435 |
+
query_pos (Tensor): The positional encoding for `query`.
|
| 436 |
+
Default: None.
|
| 437 |
+
key_pos (Tensor): The positional encoding for `key`.
|
| 438 |
+
Default: None.
|
| 439 |
+
attn_masks (List[Tensor] | None): 2D Tensor used in
|
| 440 |
+
calculation of corresponding attention. The length of
|
| 441 |
+
it should equal to the number of `attention` in
|
| 442 |
+
`operation_order`. Default: None.
|
| 443 |
+
query_key_padding_mask (Tensor): ByteTensor for `query`, with
|
| 444 |
+
shape [bs, num_queries]. Only used in `self_attn` layer.
|
| 445 |
+
Defaults to None.
|
| 446 |
+
key_padding_mask (Tensor): ByteTensor for `query`, with
|
| 447 |
+
shape [bs, num_keys]. Default: None.
|
| 448 |
+
|
| 449 |
+
Returns:
|
| 450 |
+
Tensor: forwarded results with shape [num_queries, bs, embed_dims].
|
| 451 |
+
"""
|
| 452 |
+
|
| 453 |
+
norm_index = 0
|
| 454 |
+
attn_index = 0
|
| 455 |
+
ffn_index = 0
|
| 456 |
+
identity = query
|
| 457 |
+
if attn_masks is None:
|
| 458 |
+
attn_masks = [None for _ in range(self.num_attn)]
|
| 459 |
+
elif isinstance(attn_masks, torch.Tensor):
|
| 460 |
+
attn_masks = [
|
| 461 |
+
copy.deepcopy(attn_masks) for _ in range(self.num_attn)
|
| 462 |
+
]
|
| 463 |
+
warnings.warn(f'Use same attn_mask in all attentions in '
|
| 464 |
+
f'{self.__class__.__name__} ')
|
| 465 |
+
else:
|
| 466 |
+
assert len(attn_masks) == self.num_attn, f'The length of ' \
|
| 467 |
+
f'attn_masks {len(attn_masks)} must be equal ' \
|
| 468 |
+
f'to the number of attention in ' \
|
| 469 |
+
f'operation_order {self.num_attn}'
|
| 470 |
+
|
| 471 |
+
for layer in self.operation_order:
|
| 472 |
+
if layer == 'self_attn':
|
| 473 |
+
temp_key = temp_value = query
|
| 474 |
+
query = self.attentions[attn_index](
|
| 475 |
+
query,
|
| 476 |
+
temp_key,
|
| 477 |
+
temp_value,
|
| 478 |
+
identity if self.pre_norm else None,
|
| 479 |
+
query_pos=query_pos,
|
| 480 |
+
key_pos=query_pos,
|
| 481 |
+
attn_mask=attn_masks[attn_index],
|
| 482 |
+
key_padding_mask=query_key_padding_mask,
|
| 483 |
+
**kwargs)
|
| 484 |
+
attn_index += 1
|
| 485 |
+
identity = query
|
| 486 |
+
|
| 487 |
+
elif layer == 'norm':
|
| 488 |
+
query = self.norms[norm_index](query)
|
| 489 |
+
norm_index += 1
|
| 490 |
+
|
| 491 |
+
elif layer == 'cross_attn':
|
| 492 |
+
query = self.attentions[attn_index](
|
| 493 |
+
query,
|
| 494 |
+
key,
|
| 495 |
+
value,
|
| 496 |
+
identity if self.pre_norm else None,
|
| 497 |
+
query_pos=query_pos,
|
| 498 |
+
key_pos=key_pos,
|
| 499 |
+
attn_mask=attn_masks[attn_index],
|
| 500 |
+
key_padding_mask=key_padding_mask,
|
| 501 |
+
**kwargs)
|
| 502 |
+
attn_index += 1
|
| 503 |
+
identity = query
|
| 504 |
+
|
| 505 |
+
elif layer == 'ffn':
|
| 506 |
+
query = self.ffns[ffn_index](
|
| 507 |
+
query, identity if self.pre_norm else None)
|
| 508 |
+
ffn_index += 1
|
| 509 |
+
|
| 510 |
+
return query
|
| 511 |
+
|
| 512 |
+
|
| 513 |
+
@TRANSFORMER_LAYER_SEQUENCE.register_module()
|
| 514 |
+
class TransformerLayerSequence(BaseModule):
|
| 515 |
+
"""Base class for TransformerEncoder and TransformerDecoder in vision
|
| 516 |
+
transformer.
|
| 517 |
+
|
| 518 |
+
As base-class of Encoder and Decoder in vision transformer.
|
| 519 |
+
Support customization such as specifying different kind
|
| 520 |
+
of `transformer_layer` in `transformer_coder`.
|
| 521 |
+
|
| 522 |
+
Args:
|
| 523 |
+
transformerlayer (list[obj:`mmcv.ConfigDict`] |
|
| 524 |
+
obj:`mmcv.ConfigDict`): Config of transformerlayer
|
| 525 |
+
in TransformerCoder. If it is obj:`mmcv.ConfigDict`,
|
| 526 |
+
it would be repeated `num_layer` times to a
|
| 527 |
+
list[`mmcv.ConfigDict`]. Default: None.
|
| 528 |
+
num_layers (int): The number of `TransformerLayer`. Default: None.
|
| 529 |
+
init_cfg (obj:`mmcv.ConfigDict`): The Config for initialization.
|
| 530 |
+
Default: None.
|
| 531 |
+
"""
|
| 532 |
+
|
| 533 |
+
def __init__(self, transformerlayers=None, num_layers=None, init_cfg=None):
|
| 534 |
+
super(TransformerLayerSequence, self).__init__(init_cfg)
|
| 535 |
+
if isinstance(transformerlayers, dict):
|
| 536 |
+
transformerlayers = [
|
| 537 |
+
copy.deepcopy(transformerlayers) for _ in range(num_layers)
|
| 538 |
+
]
|
| 539 |
+
else:
|
| 540 |
+
assert isinstance(transformerlayers, list) and \
|
| 541 |
+
len(transformerlayers) == num_layers
|
| 542 |
+
self.num_layers = num_layers
|
| 543 |
+
self.layers = ModuleList()
|
| 544 |
+
for i in range(num_layers):
|
| 545 |
+
self.layers.append(build_transformer_layer(transformerlayers[i]))
|
| 546 |
+
self.embed_dims = self.layers[0].embed_dims
|
| 547 |
+
self.pre_norm = self.layers[0].pre_norm
|
| 548 |
+
|
| 549 |
+
def forward(self,
|
| 550 |
+
query,
|
| 551 |
+
key,
|
| 552 |
+
value,
|
| 553 |
+
query_pos=None,
|
| 554 |
+
key_pos=None,
|
| 555 |
+
attn_masks=None,
|
| 556 |
+
query_key_padding_mask=None,
|
| 557 |
+
key_padding_mask=None,
|
| 558 |
+
**kwargs):
|
| 559 |
+
"""Forward function for `TransformerCoder`.
|
| 560 |
+
|
| 561 |
+
Args:
|
| 562 |
+
query (Tensor): Input query with shape
|
| 563 |
+
`(num_queries, bs, embed_dims)`.
|
| 564 |
+
key (Tensor): The key tensor with shape
|
| 565 |
+
`(num_keys, bs, embed_dims)`.
|
| 566 |
+
value (Tensor): The value tensor with shape
|
| 567 |
+
`(num_keys, bs, embed_dims)`.
|
| 568 |
+
query_pos (Tensor): The positional encoding for `query`.
|
| 569 |
+
Default: None.
|
| 570 |
+
key_pos (Tensor): The positional encoding for `key`.
|
| 571 |
+
Default: None.
|
| 572 |
+
attn_masks (List[Tensor], optional): Each element is 2D Tensor
|
| 573 |
+
which is used in calculation of corresponding attention in
|
| 574 |
+
operation_order. Default: None.
|
| 575 |
+
query_key_padding_mask (Tensor): ByteTensor for `query`, with
|
| 576 |
+
shape [bs, num_queries]. Only used in self-attention
|
| 577 |
+
Default: None.
|
| 578 |
+
key_padding_mask (Tensor): ByteTensor for `query`, with
|
| 579 |
+
shape [bs, num_keys]. Default: None.
|
| 580 |
+
|
| 581 |
+
Returns:
|
| 582 |
+
Tensor: results with shape [num_queries, bs, embed_dims].
|
| 583 |
+
"""
|
| 584 |
+
for layer in self.layers:
|
| 585 |
+
query = layer(
|
| 586 |
+
query,
|
| 587 |
+
key,
|
| 588 |
+
value,
|
| 589 |
+
query_pos=query_pos,
|
| 590 |
+
key_pos=key_pos,
|
| 591 |
+
attn_masks=attn_masks,
|
| 592 |
+
query_key_padding_mask=query_key_padding_mask,
|
| 593 |
+
key_padding_mask=key_padding_mask,
|
| 594 |
+
**kwargs)
|
| 595 |
+
return query
|
RAVE-main/annotator/mmpkg/mmcv/cnn/bricks/upsample.py
ADDED
|
@@ -0,0 +1,84 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
import torch.nn as nn
|
| 3 |
+
import torch.nn.functional as F
|
| 4 |
+
|
| 5 |
+
from ..utils import xavier_init
|
| 6 |
+
from .registry import UPSAMPLE_LAYERS
|
| 7 |
+
|
| 8 |
+
UPSAMPLE_LAYERS.register_module('nearest', module=nn.Upsample)
|
| 9 |
+
UPSAMPLE_LAYERS.register_module('bilinear', module=nn.Upsample)
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
@UPSAMPLE_LAYERS.register_module(name='pixel_shuffle')
|
| 13 |
+
class PixelShufflePack(nn.Module):
|
| 14 |
+
"""Pixel Shuffle upsample layer.
|
| 15 |
+
|
| 16 |
+
This module packs `F.pixel_shuffle()` and a nn.Conv2d module together to
|
| 17 |
+
achieve a simple upsampling with pixel shuffle.
|
| 18 |
+
|
| 19 |
+
Args:
|
| 20 |
+
in_channels (int): Number of input channels.
|
| 21 |
+
out_channels (int): Number of output channels.
|
| 22 |
+
scale_factor (int): Upsample ratio.
|
| 23 |
+
upsample_kernel (int): Kernel size of the conv layer to expand the
|
| 24 |
+
channels.
|
| 25 |
+
"""
|
| 26 |
+
|
| 27 |
+
def __init__(self, in_channels, out_channels, scale_factor,
|
| 28 |
+
upsample_kernel):
|
| 29 |
+
super(PixelShufflePack, self).__init__()
|
| 30 |
+
self.in_channels = in_channels
|
| 31 |
+
self.out_channels = out_channels
|
| 32 |
+
self.scale_factor = scale_factor
|
| 33 |
+
self.upsample_kernel = upsample_kernel
|
| 34 |
+
self.upsample_conv = nn.Conv2d(
|
| 35 |
+
self.in_channels,
|
| 36 |
+
self.out_channels * scale_factor * scale_factor,
|
| 37 |
+
self.upsample_kernel,
|
| 38 |
+
padding=(self.upsample_kernel - 1) // 2)
|
| 39 |
+
self.init_weights()
|
| 40 |
+
|
| 41 |
+
def init_weights(self):
|
| 42 |
+
xavier_init(self.upsample_conv, distribution='uniform')
|
| 43 |
+
|
| 44 |
+
def forward(self, x):
|
| 45 |
+
x = self.upsample_conv(x)
|
| 46 |
+
x = F.pixel_shuffle(x, self.scale_factor)
|
| 47 |
+
return x
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
def build_upsample_layer(cfg, *args, **kwargs):
|
| 51 |
+
"""Build upsample layer.
|
| 52 |
+
|
| 53 |
+
Args:
|
| 54 |
+
cfg (dict): The upsample layer config, which should contain:
|
| 55 |
+
|
| 56 |
+
- type (str): Layer type.
|
| 57 |
+
- scale_factor (int): Upsample ratio, which is not applicable to
|
| 58 |
+
deconv.
|
| 59 |
+
- layer args: Args needed to instantiate a upsample layer.
|
| 60 |
+
args (argument list): Arguments passed to the ``__init__``
|
| 61 |
+
method of the corresponding conv layer.
|
| 62 |
+
kwargs (keyword arguments): Keyword arguments passed to the
|
| 63 |
+
``__init__`` method of the corresponding conv layer.
|
| 64 |
+
|
| 65 |
+
Returns:
|
| 66 |
+
nn.Module: Created upsample layer.
|
| 67 |
+
"""
|
| 68 |
+
if not isinstance(cfg, dict):
|
| 69 |
+
raise TypeError(f'cfg must be a dict, but got {type(cfg)}')
|
| 70 |
+
if 'type' not in cfg:
|
| 71 |
+
raise KeyError(
|
| 72 |
+
f'the cfg dict must contain the key "type", but got {cfg}')
|
| 73 |
+
cfg_ = cfg.copy()
|
| 74 |
+
|
| 75 |
+
layer_type = cfg_.pop('type')
|
| 76 |
+
if layer_type not in UPSAMPLE_LAYERS:
|
| 77 |
+
raise KeyError(f'Unrecognized upsample type {layer_type}')
|
| 78 |
+
else:
|
| 79 |
+
upsample = UPSAMPLE_LAYERS.get(layer_type)
|
| 80 |
+
|
| 81 |
+
if upsample is nn.Upsample:
|
| 82 |
+
cfg_['mode'] = layer_type
|
| 83 |
+
layer = upsample(*args, **kwargs, **cfg_)
|
| 84 |
+
return layer
|
RAVE-main/annotator/mmpkg/mmcv/cnn/bricks/wrappers.py
ADDED
|
@@ -0,0 +1,180 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
r"""Modified from https://github.com/facebookresearch/detectron2/blob/master/detectron2/layers/wrappers.py # noqa: E501
|
| 3 |
+
|
| 4 |
+
Wrap some nn modules to support empty tensor input. Currently, these wrappers
|
| 5 |
+
are mainly used in mask heads like fcn_mask_head and maskiou_heads since mask
|
| 6 |
+
heads are trained on only positive RoIs.
|
| 7 |
+
"""
|
| 8 |
+
import math
|
| 9 |
+
|
| 10 |
+
import torch
|
| 11 |
+
import torch.nn as nn
|
| 12 |
+
from torch.nn.modules.utils import _pair, _triple
|
| 13 |
+
|
| 14 |
+
from .registry import CONV_LAYERS, UPSAMPLE_LAYERS
|
| 15 |
+
|
| 16 |
+
if torch.__version__ == 'parrots':
|
| 17 |
+
TORCH_VERSION = torch.__version__
|
| 18 |
+
else:
|
| 19 |
+
# torch.__version__ could be 1.3.1+cu92, we only need the first two
|
| 20 |
+
# for comparison
|
| 21 |
+
TORCH_VERSION = tuple(int(x) for x in torch.__version__.split('.')[:2])
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
def obsolete_torch_version(torch_version, version_threshold):
|
| 25 |
+
return torch_version == 'parrots' or torch_version <= version_threshold
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
class NewEmptyTensorOp(torch.autograd.Function):
|
| 29 |
+
|
| 30 |
+
@staticmethod
|
| 31 |
+
def forward(ctx, x, new_shape):
|
| 32 |
+
ctx.shape = x.shape
|
| 33 |
+
return x.new_empty(new_shape)
|
| 34 |
+
|
| 35 |
+
@staticmethod
|
| 36 |
+
def backward(ctx, grad):
|
| 37 |
+
shape = ctx.shape
|
| 38 |
+
return NewEmptyTensorOp.apply(grad, shape), None
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
@CONV_LAYERS.register_module('Conv', force=True)
|
| 42 |
+
class Conv2d(nn.Conv2d):
|
| 43 |
+
|
| 44 |
+
def forward(self, x):
|
| 45 |
+
if x.numel() == 0 and obsolete_torch_version(TORCH_VERSION, (1, 4)):
|
| 46 |
+
out_shape = [x.shape[0], self.out_channels]
|
| 47 |
+
for i, k, p, s, d in zip(x.shape[-2:], self.kernel_size,
|
| 48 |
+
self.padding, self.stride, self.dilation):
|
| 49 |
+
o = (i + 2 * p - (d * (k - 1) + 1)) // s + 1
|
| 50 |
+
out_shape.append(o)
|
| 51 |
+
empty = NewEmptyTensorOp.apply(x, out_shape)
|
| 52 |
+
if self.training:
|
| 53 |
+
# produce dummy gradient to avoid DDP warning.
|
| 54 |
+
dummy = sum(x.view(-1)[0] for x in self.parameters()) * 0.0
|
| 55 |
+
return empty + dummy
|
| 56 |
+
else:
|
| 57 |
+
return empty
|
| 58 |
+
|
| 59 |
+
return super().forward(x)
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
@CONV_LAYERS.register_module('Conv3d', force=True)
|
| 63 |
+
class Conv3d(nn.Conv3d):
|
| 64 |
+
|
| 65 |
+
def forward(self, x):
|
| 66 |
+
if x.numel() == 0 and obsolete_torch_version(TORCH_VERSION, (1, 4)):
|
| 67 |
+
out_shape = [x.shape[0], self.out_channels]
|
| 68 |
+
for i, k, p, s, d in zip(x.shape[-3:], self.kernel_size,
|
| 69 |
+
self.padding, self.stride, self.dilation):
|
| 70 |
+
o = (i + 2 * p - (d * (k - 1) + 1)) // s + 1
|
| 71 |
+
out_shape.append(o)
|
| 72 |
+
empty = NewEmptyTensorOp.apply(x, out_shape)
|
| 73 |
+
if self.training:
|
| 74 |
+
# produce dummy gradient to avoid DDP warning.
|
| 75 |
+
dummy = sum(x.view(-1)[0] for x in self.parameters()) * 0.0
|
| 76 |
+
return empty + dummy
|
| 77 |
+
else:
|
| 78 |
+
return empty
|
| 79 |
+
|
| 80 |
+
return super().forward(x)
|
| 81 |
+
|
| 82 |
+
|
| 83 |
+
@CONV_LAYERS.register_module()
|
| 84 |
+
@CONV_LAYERS.register_module('deconv')
|
| 85 |
+
@UPSAMPLE_LAYERS.register_module('deconv', force=True)
|
| 86 |
+
class ConvTranspose2d(nn.ConvTranspose2d):
|
| 87 |
+
|
| 88 |
+
def forward(self, x):
|
| 89 |
+
if x.numel() == 0 and obsolete_torch_version(TORCH_VERSION, (1, 4)):
|
| 90 |
+
out_shape = [x.shape[0], self.out_channels]
|
| 91 |
+
for i, k, p, s, d, op in zip(x.shape[-2:], self.kernel_size,
|
| 92 |
+
self.padding, self.stride,
|
| 93 |
+
self.dilation, self.output_padding):
|
| 94 |
+
out_shape.append((i - 1) * s - 2 * p + (d * (k - 1) + 1) + op)
|
| 95 |
+
empty = NewEmptyTensorOp.apply(x, out_shape)
|
| 96 |
+
if self.training:
|
| 97 |
+
# produce dummy gradient to avoid DDP warning.
|
| 98 |
+
dummy = sum(x.view(-1)[0] for x in self.parameters()) * 0.0
|
| 99 |
+
return empty + dummy
|
| 100 |
+
else:
|
| 101 |
+
return empty
|
| 102 |
+
|
| 103 |
+
return super().forward(x)
|
| 104 |
+
|
| 105 |
+
|
| 106 |
+
@CONV_LAYERS.register_module()
|
| 107 |
+
@CONV_LAYERS.register_module('deconv3d')
|
| 108 |
+
@UPSAMPLE_LAYERS.register_module('deconv3d', force=True)
|
| 109 |
+
class ConvTranspose3d(nn.ConvTranspose3d):
|
| 110 |
+
|
| 111 |
+
def forward(self, x):
|
| 112 |
+
if x.numel() == 0 and obsolete_torch_version(TORCH_VERSION, (1, 4)):
|
| 113 |
+
out_shape = [x.shape[0], self.out_channels]
|
| 114 |
+
for i, k, p, s, d, op in zip(x.shape[-3:], self.kernel_size,
|
| 115 |
+
self.padding, self.stride,
|
| 116 |
+
self.dilation, self.output_padding):
|
| 117 |
+
out_shape.append((i - 1) * s - 2 * p + (d * (k - 1) + 1) + op)
|
| 118 |
+
empty = NewEmptyTensorOp.apply(x, out_shape)
|
| 119 |
+
if self.training:
|
| 120 |
+
# produce dummy gradient to avoid DDP warning.
|
| 121 |
+
dummy = sum(x.view(-1)[0] for x in self.parameters()) * 0.0
|
| 122 |
+
return empty + dummy
|
| 123 |
+
else:
|
| 124 |
+
return empty
|
| 125 |
+
|
| 126 |
+
return super().forward(x)
|
| 127 |
+
|
| 128 |
+
|
| 129 |
+
class MaxPool2d(nn.MaxPool2d):
|
| 130 |
+
|
| 131 |
+
def forward(self, x):
|
| 132 |
+
# PyTorch 1.9 does not support empty tensor inference yet
|
| 133 |
+
if x.numel() == 0 and obsolete_torch_version(TORCH_VERSION, (1, 9)):
|
| 134 |
+
out_shape = list(x.shape[:2])
|
| 135 |
+
for i, k, p, s, d in zip(x.shape[-2:], _pair(self.kernel_size),
|
| 136 |
+
_pair(self.padding), _pair(self.stride),
|
| 137 |
+
_pair(self.dilation)):
|
| 138 |
+
o = (i + 2 * p - (d * (k - 1) + 1)) / s + 1
|
| 139 |
+
o = math.ceil(o) if self.ceil_mode else math.floor(o)
|
| 140 |
+
out_shape.append(o)
|
| 141 |
+
empty = NewEmptyTensorOp.apply(x, out_shape)
|
| 142 |
+
return empty
|
| 143 |
+
|
| 144 |
+
return super().forward(x)
|
| 145 |
+
|
| 146 |
+
|
| 147 |
+
class MaxPool3d(nn.MaxPool3d):
|
| 148 |
+
|
| 149 |
+
def forward(self, x):
|
| 150 |
+
# PyTorch 1.9 does not support empty tensor inference yet
|
| 151 |
+
if x.numel() == 0 and obsolete_torch_version(TORCH_VERSION, (1, 9)):
|
| 152 |
+
out_shape = list(x.shape[:2])
|
| 153 |
+
for i, k, p, s, d in zip(x.shape[-3:], _triple(self.kernel_size),
|
| 154 |
+
_triple(self.padding),
|
| 155 |
+
_triple(self.stride),
|
| 156 |
+
_triple(self.dilation)):
|
| 157 |
+
o = (i + 2 * p - (d * (k - 1) + 1)) / s + 1
|
| 158 |
+
o = math.ceil(o) if self.ceil_mode else math.floor(o)
|
| 159 |
+
out_shape.append(o)
|
| 160 |
+
empty = NewEmptyTensorOp.apply(x, out_shape)
|
| 161 |
+
return empty
|
| 162 |
+
|
| 163 |
+
return super().forward(x)
|
| 164 |
+
|
| 165 |
+
|
| 166 |
+
class Linear(torch.nn.Linear):
|
| 167 |
+
|
| 168 |
+
def forward(self, x):
|
| 169 |
+
# empty tensor forward of Linear layer is supported in Pytorch 1.6
|
| 170 |
+
if x.numel() == 0 and obsolete_torch_version(TORCH_VERSION, (1, 5)):
|
| 171 |
+
out_shape = [x.shape[0], self.out_features]
|
| 172 |
+
empty = NewEmptyTensorOp.apply(x, out_shape)
|
| 173 |
+
if self.training:
|
| 174 |
+
# produce dummy gradient to avoid DDP warning.
|
| 175 |
+
dummy = sum(x.view(-1)[0] for x in self.parameters()) * 0.0
|
| 176 |
+
return empty + dummy
|
| 177 |
+
else:
|
| 178 |
+
return empty
|
| 179 |
+
|
| 180 |
+
return super().forward(x)
|
RAVE-main/annotator/mmpkg/mmcv/cnn/builder.py
ADDED
|
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
from ..runner import Sequential
|
| 3 |
+
from ..utils import Registry, build_from_cfg
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
def build_model_from_cfg(cfg, registry, default_args=None):
|
| 7 |
+
"""Build a PyTorch model from config dict(s). Different from
|
| 8 |
+
``build_from_cfg``, if cfg is a list, a ``nn.Sequential`` will be built.
|
| 9 |
+
|
| 10 |
+
Args:
|
| 11 |
+
cfg (dict, list[dict]): The config of modules, is is either a config
|
| 12 |
+
dict or a list of config dicts. If cfg is a list, a
|
| 13 |
+
the built modules will be wrapped with ``nn.Sequential``.
|
| 14 |
+
registry (:obj:`Registry`): A registry the module belongs to.
|
| 15 |
+
default_args (dict, optional): Default arguments to build the module.
|
| 16 |
+
Defaults to None.
|
| 17 |
+
|
| 18 |
+
Returns:
|
| 19 |
+
nn.Module: A built nn module.
|
| 20 |
+
"""
|
| 21 |
+
if isinstance(cfg, list):
|
| 22 |
+
modules = [
|
| 23 |
+
build_from_cfg(cfg_, registry, default_args) for cfg_ in cfg
|
| 24 |
+
]
|
| 25 |
+
return Sequential(*modules)
|
| 26 |
+
else:
|
| 27 |
+
return build_from_cfg(cfg, registry, default_args)
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
MODELS = Registry('model', build_func=build_model_from_cfg)
|
RAVE-main/annotator/mmpkg/mmcv/cnn/resnet.py
ADDED
|
@@ -0,0 +1,316 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
import logging
|
| 3 |
+
|
| 4 |
+
import torch.nn as nn
|
| 5 |
+
import torch.utils.checkpoint as cp
|
| 6 |
+
|
| 7 |
+
from .utils import constant_init, kaiming_init
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
def conv3x3(in_planes, out_planes, stride=1, dilation=1):
|
| 11 |
+
"""3x3 convolution with padding."""
|
| 12 |
+
return nn.Conv2d(
|
| 13 |
+
in_planes,
|
| 14 |
+
out_planes,
|
| 15 |
+
kernel_size=3,
|
| 16 |
+
stride=stride,
|
| 17 |
+
padding=dilation,
|
| 18 |
+
dilation=dilation,
|
| 19 |
+
bias=False)
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
class BasicBlock(nn.Module):
|
| 23 |
+
expansion = 1
|
| 24 |
+
|
| 25 |
+
def __init__(self,
|
| 26 |
+
inplanes,
|
| 27 |
+
planes,
|
| 28 |
+
stride=1,
|
| 29 |
+
dilation=1,
|
| 30 |
+
downsample=None,
|
| 31 |
+
style='pytorch',
|
| 32 |
+
with_cp=False):
|
| 33 |
+
super(BasicBlock, self).__init__()
|
| 34 |
+
assert style in ['pytorch', 'caffe']
|
| 35 |
+
self.conv1 = conv3x3(inplanes, planes, stride, dilation)
|
| 36 |
+
self.bn1 = nn.BatchNorm2d(planes)
|
| 37 |
+
self.relu = nn.ReLU(inplace=True)
|
| 38 |
+
self.conv2 = conv3x3(planes, planes)
|
| 39 |
+
self.bn2 = nn.BatchNorm2d(planes)
|
| 40 |
+
self.downsample = downsample
|
| 41 |
+
self.stride = stride
|
| 42 |
+
self.dilation = dilation
|
| 43 |
+
assert not with_cp
|
| 44 |
+
|
| 45 |
+
def forward(self, x):
|
| 46 |
+
residual = x
|
| 47 |
+
|
| 48 |
+
out = self.conv1(x)
|
| 49 |
+
out = self.bn1(out)
|
| 50 |
+
out = self.relu(out)
|
| 51 |
+
|
| 52 |
+
out = self.conv2(out)
|
| 53 |
+
out = self.bn2(out)
|
| 54 |
+
|
| 55 |
+
if self.downsample is not None:
|
| 56 |
+
residual = self.downsample(x)
|
| 57 |
+
|
| 58 |
+
out += residual
|
| 59 |
+
out = self.relu(out)
|
| 60 |
+
|
| 61 |
+
return out
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
class Bottleneck(nn.Module):
|
| 65 |
+
expansion = 4
|
| 66 |
+
|
| 67 |
+
def __init__(self,
|
| 68 |
+
inplanes,
|
| 69 |
+
planes,
|
| 70 |
+
stride=1,
|
| 71 |
+
dilation=1,
|
| 72 |
+
downsample=None,
|
| 73 |
+
style='pytorch',
|
| 74 |
+
with_cp=False):
|
| 75 |
+
"""Bottleneck block.
|
| 76 |
+
|
| 77 |
+
If style is "pytorch", the stride-two layer is the 3x3 conv layer, if
|
| 78 |
+
it is "caffe", the stride-two layer is the first 1x1 conv layer.
|
| 79 |
+
"""
|
| 80 |
+
super(Bottleneck, self).__init__()
|
| 81 |
+
assert style in ['pytorch', 'caffe']
|
| 82 |
+
if style == 'pytorch':
|
| 83 |
+
conv1_stride = 1
|
| 84 |
+
conv2_stride = stride
|
| 85 |
+
else:
|
| 86 |
+
conv1_stride = stride
|
| 87 |
+
conv2_stride = 1
|
| 88 |
+
self.conv1 = nn.Conv2d(
|
| 89 |
+
inplanes, planes, kernel_size=1, stride=conv1_stride, bias=False)
|
| 90 |
+
self.conv2 = nn.Conv2d(
|
| 91 |
+
planes,
|
| 92 |
+
planes,
|
| 93 |
+
kernel_size=3,
|
| 94 |
+
stride=conv2_stride,
|
| 95 |
+
padding=dilation,
|
| 96 |
+
dilation=dilation,
|
| 97 |
+
bias=False)
|
| 98 |
+
|
| 99 |
+
self.bn1 = nn.BatchNorm2d(planes)
|
| 100 |
+
self.bn2 = nn.BatchNorm2d(planes)
|
| 101 |
+
self.conv3 = nn.Conv2d(
|
| 102 |
+
planes, planes * self.expansion, kernel_size=1, bias=False)
|
| 103 |
+
self.bn3 = nn.BatchNorm2d(planes * self.expansion)
|
| 104 |
+
self.relu = nn.ReLU(inplace=True)
|
| 105 |
+
self.downsample = downsample
|
| 106 |
+
self.stride = stride
|
| 107 |
+
self.dilation = dilation
|
| 108 |
+
self.with_cp = with_cp
|
| 109 |
+
|
| 110 |
+
def forward(self, x):
|
| 111 |
+
|
| 112 |
+
def _inner_forward(x):
|
| 113 |
+
residual = x
|
| 114 |
+
|
| 115 |
+
out = self.conv1(x)
|
| 116 |
+
out = self.bn1(out)
|
| 117 |
+
out = self.relu(out)
|
| 118 |
+
|
| 119 |
+
out = self.conv2(out)
|
| 120 |
+
out = self.bn2(out)
|
| 121 |
+
out = self.relu(out)
|
| 122 |
+
|
| 123 |
+
out = self.conv3(out)
|
| 124 |
+
out = self.bn3(out)
|
| 125 |
+
|
| 126 |
+
if self.downsample is not None:
|
| 127 |
+
residual = self.downsample(x)
|
| 128 |
+
|
| 129 |
+
out += residual
|
| 130 |
+
|
| 131 |
+
return out
|
| 132 |
+
|
| 133 |
+
if self.with_cp and x.requires_grad:
|
| 134 |
+
out = cp.checkpoint(_inner_forward, x)
|
| 135 |
+
else:
|
| 136 |
+
out = _inner_forward(x)
|
| 137 |
+
|
| 138 |
+
out = self.relu(out)
|
| 139 |
+
|
| 140 |
+
return out
|
| 141 |
+
|
| 142 |
+
|
| 143 |
+
def make_res_layer(block,
|
| 144 |
+
inplanes,
|
| 145 |
+
planes,
|
| 146 |
+
blocks,
|
| 147 |
+
stride=1,
|
| 148 |
+
dilation=1,
|
| 149 |
+
style='pytorch',
|
| 150 |
+
with_cp=False):
|
| 151 |
+
downsample = None
|
| 152 |
+
if stride != 1 or inplanes != planes * block.expansion:
|
| 153 |
+
downsample = nn.Sequential(
|
| 154 |
+
nn.Conv2d(
|
| 155 |
+
inplanes,
|
| 156 |
+
planes * block.expansion,
|
| 157 |
+
kernel_size=1,
|
| 158 |
+
stride=stride,
|
| 159 |
+
bias=False),
|
| 160 |
+
nn.BatchNorm2d(planes * block.expansion),
|
| 161 |
+
)
|
| 162 |
+
|
| 163 |
+
layers = []
|
| 164 |
+
layers.append(
|
| 165 |
+
block(
|
| 166 |
+
inplanes,
|
| 167 |
+
planes,
|
| 168 |
+
stride,
|
| 169 |
+
dilation,
|
| 170 |
+
downsample,
|
| 171 |
+
style=style,
|
| 172 |
+
with_cp=with_cp))
|
| 173 |
+
inplanes = planes * block.expansion
|
| 174 |
+
for _ in range(1, blocks):
|
| 175 |
+
layers.append(
|
| 176 |
+
block(inplanes, planes, 1, dilation, style=style, with_cp=with_cp))
|
| 177 |
+
|
| 178 |
+
return nn.Sequential(*layers)
|
| 179 |
+
|
| 180 |
+
|
| 181 |
+
class ResNet(nn.Module):
|
| 182 |
+
"""ResNet backbone.
|
| 183 |
+
|
| 184 |
+
Args:
|
| 185 |
+
depth (int): Depth of resnet, from {18, 34, 50, 101, 152}.
|
| 186 |
+
num_stages (int): Resnet stages, normally 4.
|
| 187 |
+
strides (Sequence[int]): Strides of the first block of each stage.
|
| 188 |
+
dilations (Sequence[int]): Dilation of each stage.
|
| 189 |
+
out_indices (Sequence[int]): Output from which stages.
|
| 190 |
+
style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two
|
| 191 |
+
layer is the 3x3 conv layer, otherwise the stride-two layer is
|
| 192 |
+
the first 1x1 conv layer.
|
| 193 |
+
frozen_stages (int): Stages to be frozen (all param fixed). -1 means
|
| 194 |
+
not freezing any parameters.
|
| 195 |
+
bn_eval (bool): Whether to set BN layers as eval mode, namely, freeze
|
| 196 |
+
running stats (mean and var).
|
| 197 |
+
bn_frozen (bool): Whether to freeze weight and bias of BN layers.
|
| 198 |
+
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
|
| 199 |
+
memory while slowing down the training speed.
|
| 200 |
+
"""
|
| 201 |
+
|
| 202 |
+
arch_settings = {
|
| 203 |
+
18: (BasicBlock, (2, 2, 2, 2)),
|
| 204 |
+
34: (BasicBlock, (3, 4, 6, 3)),
|
| 205 |
+
50: (Bottleneck, (3, 4, 6, 3)),
|
| 206 |
+
101: (Bottleneck, (3, 4, 23, 3)),
|
| 207 |
+
152: (Bottleneck, (3, 8, 36, 3))
|
| 208 |
+
}
|
| 209 |
+
|
| 210 |
+
def __init__(self,
|
| 211 |
+
depth,
|
| 212 |
+
num_stages=4,
|
| 213 |
+
strides=(1, 2, 2, 2),
|
| 214 |
+
dilations=(1, 1, 1, 1),
|
| 215 |
+
out_indices=(0, 1, 2, 3),
|
| 216 |
+
style='pytorch',
|
| 217 |
+
frozen_stages=-1,
|
| 218 |
+
bn_eval=True,
|
| 219 |
+
bn_frozen=False,
|
| 220 |
+
with_cp=False):
|
| 221 |
+
super(ResNet, self).__init__()
|
| 222 |
+
if depth not in self.arch_settings:
|
| 223 |
+
raise KeyError(f'invalid depth {depth} for resnet')
|
| 224 |
+
assert num_stages >= 1 and num_stages <= 4
|
| 225 |
+
block, stage_blocks = self.arch_settings[depth]
|
| 226 |
+
stage_blocks = stage_blocks[:num_stages]
|
| 227 |
+
assert len(strides) == len(dilations) == num_stages
|
| 228 |
+
assert max(out_indices) < num_stages
|
| 229 |
+
|
| 230 |
+
self.out_indices = out_indices
|
| 231 |
+
self.style = style
|
| 232 |
+
self.frozen_stages = frozen_stages
|
| 233 |
+
self.bn_eval = bn_eval
|
| 234 |
+
self.bn_frozen = bn_frozen
|
| 235 |
+
self.with_cp = with_cp
|
| 236 |
+
|
| 237 |
+
self.inplanes = 64
|
| 238 |
+
self.conv1 = nn.Conv2d(
|
| 239 |
+
3, 64, kernel_size=7, stride=2, padding=3, bias=False)
|
| 240 |
+
self.bn1 = nn.BatchNorm2d(64)
|
| 241 |
+
self.relu = nn.ReLU(inplace=True)
|
| 242 |
+
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
|
| 243 |
+
|
| 244 |
+
self.res_layers = []
|
| 245 |
+
for i, num_blocks in enumerate(stage_blocks):
|
| 246 |
+
stride = strides[i]
|
| 247 |
+
dilation = dilations[i]
|
| 248 |
+
planes = 64 * 2**i
|
| 249 |
+
res_layer = make_res_layer(
|
| 250 |
+
block,
|
| 251 |
+
self.inplanes,
|
| 252 |
+
planes,
|
| 253 |
+
num_blocks,
|
| 254 |
+
stride=stride,
|
| 255 |
+
dilation=dilation,
|
| 256 |
+
style=self.style,
|
| 257 |
+
with_cp=with_cp)
|
| 258 |
+
self.inplanes = planes * block.expansion
|
| 259 |
+
layer_name = f'layer{i + 1}'
|
| 260 |
+
self.add_module(layer_name, res_layer)
|
| 261 |
+
self.res_layers.append(layer_name)
|
| 262 |
+
|
| 263 |
+
self.feat_dim = block.expansion * 64 * 2**(len(stage_blocks) - 1)
|
| 264 |
+
|
| 265 |
+
def init_weights(self, pretrained=None):
|
| 266 |
+
if isinstance(pretrained, str):
|
| 267 |
+
logger = logging.getLogger()
|
| 268 |
+
from ..runner import load_checkpoint
|
| 269 |
+
load_checkpoint(self, pretrained, strict=False, logger=logger)
|
| 270 |
+
elif pretrained is None:
|
| 271 |
+
for m in self.modules():
|
| 272 |
+
if isinstance(m, nn.Conv2d):
|
| 273 |
+
kaiming_init(m)
|
| 274 |
+
elif isinstance(m, nn.BatchNorm2d):
|
| 275 |
+
constant_init(m, 1)
|
| 276 |
+
else:
|
| 277 |
+
raise TypeError('pretrained must be a str or None')
|
| 278 |
+
|
| 279 |
+
def forward(self, x):
|
| 280 |
+
x = self.conv1(x)
|
| 281 |
+
x = self.bn1(x)
|
| 282 |
+
x = self.relu(x)
|
| 283 |
+
x = self.maxpool(x)
|
| 284 |
+
outs = []
|
| 285 |
+
for i, layer_name in enumerate(self.res_layers):
|
| 286 |
+
res_layer = getattr(self, layer_name)
|
| 287 |
+
x = res_layer(x)
|
| 288 |
+
if i in self.out_indices:
|
| 289 |
+
outs.append(x)
|
| 290 |
+
if len(outs) == 1:
|
| 291 |
+
return outs[0]
|
| 292 |
+
else:
|
| 293 |
+
return tuple(outs)
|
| 294 |
+
|
| 295 |
+
def train(self, mode=True):
|
| 296 |
+
super(ResNet, self).train(mode)
|
| 297 |
+
if self.bn_eval:
|
| 298 |
+
for m in self.modules():
|
| 299 |
+
if isinstance(m, nn.BatchNorm2d):
|
| 300 |
+
m.eval()
|
| 301 |
+
if self.bn_frozen:
|
| 302 |
+
for params in m.parameters():
|
| 303 |
+
params.requires_grad = False
|
| 304 |
+
if mode and self.frozen_stages >= 0:
|
| 305 |
+
for param in self.conv1.parameters():
|
| 306 |
+
param.requires_grad = False
|
| 307 |
+
for param in self.bn1.parameters():
|
| 308 |
+
param.requires_grad = False
|
| 309 |
+
self.bn1.eval()
|
| 310 |
+
self.bn1.weight.requires_grad = False
|
| 311 |
+
self.bn1.bias.requires_grad = False
|
| 312 |
+
for i in range(1, self.frozen_stages + 1):
|
| 313 |
+
mod = getattr(self, f'layer{i}')
|
| 314 |
+
mod.eval()
|
| 315 |
+
for param in mod.parameters():
|
| 316 |
+
param.requires_grad = False
|
RAVE-main/annotator/mmpkg/mmcv/cnn/vgg.py
ADDED
|
@@ -0,0 +1,175 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
import logging
|
| 3 |
+
|
| 4 |
+
import torch.nn as nn
|
| 5 |
+
|
| 6 |
+
from .utils import constant_init, kaiming_init, normal_init
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
def conv3x3(in_planes, out_planes, dilation=1):
|
| 10 |
+
"""3x3 convolution with padding."""
|
| 11 |
+
return nn.Conv2d(
|
| 12 |
+
in_planes,
|
| 13 |
+
out_planes,
|
| 14 |
+
kernel_size=3,
|
| 15 |
+
padding=dilation,
|
| 16 |
+
dilation=dilation)
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
def make_vgg_layer(inplanes,
|
| 20 |
+
planes,
|
| 21 |
+
num_blocks,
|
| 22 |
+
dilation=1,
|
| 23 |
+
with_bn=False,
|
| 24 |
+
ceil_mode=False):
|
| 25 |
+
layers = []
|
| 26 |
+
for _ in range(num_blocks):
|
| 27 |
+
layers.append(conv3x3(inplanes, planes, dilation))
|
| 28 |
+
if with_bn:
|
| 29 |
+
layers.append(nn.BatchNorm2d(planes))
|
| 30 |
+
layers.append(nn.ReLU(inplace=True))
|
| 31 |
+
inplanes = planes
|
| 32 |
+
layers.append(nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=ceil_mode))
|
| 33 |
+
|
| 34 |
+
return layers
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
class VGG(nn.Module):
|
| 38 |
+
"""VGG backbone.
|
| 39 |
+
|
| 40 |
+
Args:
|
| 41 |
+
depth (int): Depth of vgg, from {11, 13, 16, 19}.
|
| 42 |
+
with_bn (bool): Use BatchNorm or not.
|
| 43 |
+
num_classes (int): number of classes for classification.
|
| 44 |
+
num_stages (int): VGG stages, normally 5.
|
| 45 |
+
dilations (Sequence[int]): Dilation of each stage.
|
| 46 |
+
out_indices (Sequence[int]): Output from which stages.
|
| 47 |
+
frozen_stages (int): Stages to be frozen (all param fixed). -1 means
|
| 48 |
+
not freezing any parameters.
|
| 49 |
+
bn_eval (bool): Whether to set BN layers as eval mode, namely, freeze
|
| 50 |
+
running stats (mean and var).
|
| 51 |
+
bn_frozen (bool): Whether to freeze weight and bias of BN layers.
|
| 52 |
+
"""
|
| 53 |
+
|
| 54 |
+
arch_settings = {
|
| 55 |
+
11: (1, 1, 2, 2, 2),
|
| 56 |
+
13: (2, 2, 2, 2, 2),
|
| 57 |
+
16: (2, 2, 3, 3, 3),
|
| 58 |
+
19: (2, 2, 4, 4, 4)
|
| 59 |
+
}
|
| 60 |
+
|
| 61 |
+
def __init__(self,
|
| 62 |
+
depth,
|
| 63 |
+
with_bn=False,
|
| 64 |
+
num_classes=-1,
|
| 65 |
+
num_stages=5,
|
| 66 |
+
dilations=(1, 1, 1, 1, 1),
|
| 67 |
+
out_indices=(0, 1, 2, 3, 4),
|
| 68 |
+
frozen_stages=-1,
|
| 69 |
+
bn_eval=True,
|
| 70 |
+
bn_frozen=False,
|
| 71 |
+
ceil_mode=False,
|
| 72 |
+
with_last_pool=True):
|
| 73 |
+
super(VGG, self).__init__()
|
| 74 |
+
if depth not in self.arch_settings:
|
| 75 |
+
raise KeyError(f'invalid depth {depth} for vgg')
|
| 76 |
+
assert num_stages >= 1 and num_stages <= 5
|
| 77 |
+
stage_blocks = self.arch_settings[depth]
|
| 78 |
+
self.stage_blocks = stage_blocks[:num_stages]
|
| 79 |
+
assert len(dilations) == num_stages
|
| 80 |
+
assert max(out_indices) <= num_stages
|
| 81 |
+
|
| 82 |
+
self.num_classes = num_classes
|
| 83 |
+
self.out_indices = out_indices
|
| 84 |
+
self.frozen_stages = frozen_stages
|
| 85 |
+
self.bn_eval = bn_eval
|
| 86 |
+
self.bn_frozen = bn_frozen
|
| 87 |
+
|
| 88 |
+
self.inplanes = 3
|
| 89 |
+
start_idx = 0
|
| 90 |
+
vgg_layers = []
|
| 91 |
+
self.range_sub_modules = []
|
| 92 |
+
for i, num_blocks in enumerate(self.stage_blocks):
|
| 93 |
+
num_modules = num_blocks * (2 + with_bn) + 1
|
| 94 |
+
end_idx = start_idx + num_modules
|
| 95 |
+
dilation = dilations[i]
|
| 96 |
+
planes = 64 * 2**i if i < 4 else 512
|
| 97 |
+
vgg_layer = make_vgg_layer(
|
| 98 |
+
self.inplanes,
|
| 99 |
+
planes,
|
| 100 |
+
num_blocks,
|
| 101 |
+
dilation=dilation,
|
| 102 |
+
with_bn=with_bn,
|
| 103 |
+
ceil_mode=ceil_mode)
|
| 104 |
+
vgg_layers.extend(vgg_layer)
|
| 105 |
+
self.inplanes = planes
|
| 106 |
+
self.range_sub_modules.append([start_idx, end_idx])
|
| 107 |
+
start_idx = end_idx
|
| 108 |
+
if not with_last_pool:
|
| 109 |
+
vgg_layers.pop(-1)
|
| 110 |
+
self.range_sub_modules[-1][1] -= 1
|
| 111 |
+
self.module_name = 'features'
|
| 112 |
+
self.add_module(self.module_name, nn.Sequential(*vgg_layers))
|
| 113 |
+
|
| 114 |
+
if self.num_classes > 0:
|
| 115 |
+
self.classifier = nn.Sequential(
|
| 116 |
+
nn.Linear(512 * 7 * 7, 4096),
|
| 117 |
+
nn.ReLU(True),
|
| 118 |
+
nn.Dropout(),
|
| 119 |
+
nn.Linear(4096, 4096),
|
| 120 |
+
nn.ReLU(True),
|
| 121 |
+
nn.Dropout(),
|
| 122 |
+
nn.Linear(4096, num_classes),
|
| 123 |
+
)
|
| 124 |
+
|
| 125 |
+
def init_weights(self, pretrained=None):
|
| 126 |
+
if isinstance(pretrained, str):
|
| 127 |
+
logger = logging.getLogger()
|
| 128 |
+
from ..runner import load_checkpoint
|
| 129 |
+
load_checkpoint(self, pretrained, strict=False, logger=logger)
|
| 130 |
+
elif pretrained is None:
|
| 131 |
+
for m in self.modules():
|
| 132 |
+
if isinstance(m, nn.Conv2d):
|
| 133 |
+
kaiming_init(m)
|
| 134 |
+
elif isinstance(m, nn.BatchNorm2d):
|
| 135 |
+
constant_init(m, 1)
|
| 136 |
+
elif isinstance(m, nn.Linear):
|
| 137 |
+
normal_init(m, std=0.01)
|
| 138 |
+
else:
|
| 139 |
+
raise TypeError('pretrained must be a str or None')
|
| 140 |
+
|
| 141 |
+
def forward(self, x):
|
| 142 |
+
outs = []
|
| 143 |
+
vgg_layers = getattr(self, self.module_name)
|
| 144 |
+
for i in range(len(self.stage_blocks)):
|
| 145 |
+
for j in range(*self.range_sub_modules[i]):
|
| 146 |
+
vgg_layer = vgg_layers[j]
|
| 147 |
+
x = vgg_layer(x)
|
| 148 |
+
if i in self.out_indices:
|
| 149 |
+
outs.append(x)
|
| 150 |
+
if self.num_classes > 0:
|
| 151 |
+
x = x.view(x.size(0), -1)
|
| 152 |
+
x = self.classifier(x)
|
| 153 |
+
outs.append(x)
|
| 154 |
+
if len(outs) == 1:
|
| 155 |
+
return outs[0]
|
| 156 |
+
else:
|
| 157 |
+
return tuple(outs)
|
| 158 |
+
|
| 159 |
+
def train(self, mode=True):
|
| 160 |
+
super(VGG, self).train(mode)
|
| 161 |
+
if self.bn_eval:
|
| 162 |
+
for m in self.modules():
|
| 163 |
+
if isinstance(m, nn.BatchNorm2d):
|
| 164 |
+
m.eval()
|
| 165 |
+
if self.bn_frozen:
|
| 166 |
+
for params in m.parameters():
|
| 167 |
+
params.requires_grad = False
|
| 168 |
+
vgg_layers = getattr(self, self.module_name)
|
| 169 |
+
if mode and self.frozen_stages >= 0:
|
| 170 |
+
for i in range(self.frozen_stages):
|
| 171 |
+
for j in range(*self.range_sub_modules[i]):
|
| 172 |
+
mod = vgg_layers[j]
|
| 173 |
+
mod.eval()
|
| 174 |
+
for param in mod.parameters():
|
| 175 |
+
param.requires_grad = False
|
RAVE-main/annotator/mmpkg/mmcv/engine/test.py
ADDED
|
@@ -0,0 +1,202 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
import os.path as osp
|
| 3 |
+
import pickle
|
| 4 |
+
import shutil
|
| 5 |
+
import tempfile
|
| 6 |
+
import time
|
| 7 |
+
|
| 8 |
+
import torch
|
| 9 |
+
import torch.distributed as dist
|
| 10 |
+
|
| 11 |
+
import annotator.mmpkg.mmcv as mmcv
|
| 12 |
+
from annotator.mmpkg.mmcv.runner import get_dist_info
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
def single_gpu_test(model, data_loader):
|
| 16 |
+
"""Test model with a single gpu.
|
| 17 |
+
|
| 18 |
+
This method tests model with a single gpu and displays test progress bar.
|
| 19 |
+
|
| 20 |
+
Args:
|
| 21 |
+
model (nn.Module): Model to be tested.
|
| 22 |
+
data_loader (nn.Dataloader): Pytorch data loader.
|
| 23 |
+
|
| 24 |
+
Returns:
|
| 25 |
+
list: The prediction results.
|
| 26 |
+
"""
|
| 27 |
+
model.eval()
|
| 28 |
+
results = []
|
| 29 |
+
dataset = data_loader.dataset
|
| 30 |
+
prog_bar = mmcv.ProgressBar(len(dataset))
|
| 31 |
+
for data in data_loader:
|
| 32 |
+
with torch.no_grad():
|
| 33 |
+
result = model(return_loss=False, **data)
|
| 34 |
+
results.extend(result)
|
| 35 |
+
|
| 36 |
+
# Assume result has the same length of batch_size
|
| 37 |
+
# refer to https://github.com/open-mmlab/mmcv/issues/985
|
| 38 |
+
batch_size = len(result)
|
| 39 |
+
for _ in range(batch_size):
|
| 40 |
+
prog_bar.update()
|
| 41 |
+
return results
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
def multi_gpu_test(model, data_loader, tmpdir=None, gpu_collect=False):
|
| 45 |
+
"""Test model with multiple gpus.
|
| 46 |
+
|
| 47 |
+
This method tests model with multiple gpus and collects the results
|
| 48 |
+
under two different modes: gpu and cpu modes. By setting
|
| 49 |
+
``gpu_collect=True``, it encodes results to gpu tensors and use gpu
|
| 50 |
+
communication for results collection. On cpu mode it saves the results on
|
| 51 |
+
different gpus to ``tmpdir`` and collects them by the rank 0 worker.
|
| 52 |
+
|
| 53 |
+
Args:
|
| 54 |
+
model (nn.Module): Model to be tested.
|
| 55 |
+
data_loader (nn.Dataloader): Pytorch data loader.
|
| 56 |
+
tmpdir (str): Path of directory to save the temporary results from
|
| 57 |
+
different gpus under cpu mode.
|
| 58 |
+
gpu_collect (bool): Option to use either gpu or cpu to collect results.
|
| 59 |
+
|
| 60 |
+
Returns:
|
| 61 |
+
list: The prediction results.
|
| 62 |
+
"""
|
| 63 |
+
model.eval()
|
| 64 |
+
results = []
|
| 65 |
+
dataset = data_loader.dataset
|
| 66 |
+
rank, world_size = get_dist_info()
|
| 67 |
+
if rank == 0:
|
| 68 |
+
prog_bar = mmcv.ProgressBar(len(dataset))
|
| 69 |
+
time.sleep(2) # This line can prevent deadlock problem in some cases.
|
| 70 |
+
for i, data in enumerate(data_loader):
|
| 71 |
+
with torch.no_grad():
|
| 72 |
+
result = model(return_loss=False, **data)
|
| 73 |
+
results.extend(result)
|
| 74 |
+
|
| 75 |
+
if rank == 0:
|
| 76 |
+
batch_size = len(result)
|
| 77 |
+
batch_size_all = batch_size * world_size
|
| 78 |
+
if batch_size_all + prog_bar.completed > len(dataset):
|
| 79 |
+
batch_size_all = len(dataset) - prog_bar.completed
|
| 80 |
+
for _ in range(batch_size_all):
|
| 81 |
+
prog_bar.update()
|
| 82 |
+
|
| 83 |
+
# collect results from all ranks
|
| 84 |
+
if gpu_collect:
|
| 85 |
+
results = collect_results_gpu(results, len(dataset))
|
| 86 |
+
else:
|
| 87 |
+
results = collect_results_cpu(results, len(dataset), tmpdir)
|
| 88 |
+
return results
|
| 89 |
+
|
| 90 |
+
|
| 91 |
+
def collect_results_cpu(result_part, size, tmpdir=None):
|
| 92 |
+
"""Collect results under cpu mode.
|
| 93 |
+
|
| 94 |
+
On cpu mode, this function will save the results on different gpus to
|
| 95 |
+
``tmpdir`` and collect them by the rank 0 worker.
|
| 96 |
+
|
| 97 |
+
Args:
|
| 98 |
+
result_part (list): Result list containing result parts
|
| 99 |
+
to be collected.
|
| 100 |
+
size (int): Size of the results, commonly equal to length of
|
| 101 |
+
the results.
|
| 102 |
+
tmpdir (str | None): temporal directory for collected results to
|
| 103 |
+
store. If set to None, it will create a random temporal directory
|
| 104 |
+
for it.
|
| 105 |
+
|
| 106 |
+
Returns:
|
| 107 |
+
list: The collected results.
|
| 108 |
+
"""
|
| 109 |
+
rank, world_size = get_dist_info()
|
| 110 |
+
# create a tmp dir if it is not specified
|
| 111 |
+
if tmpdir is None:
|
| 112 |
+
MAX_LEN = 512
|
| 113 |
+
# 32 is whitespace
|
| 114 |
+
dir_tensor = torch.full((MAX_LEN, ),
|
| 115 |
+
32,
|
| 116 |
+
dtype=torch.uint8,
|
| 117 |
+
device='cuda')
|
| 118 |
+
if rank == 0:
|
| 119 |
+
mmcv.mkdir_or_exist('.dist_test')
|
| 120 |
+
tmpdir = tempfile.mkdtemp(dir='.dist_test')
|
| 121 |
+
tmpdir = torch.tensor(
|
| 122 |
+
bytearray(tmpdir.encode()), dtype=torch.uint8, device='cuda')
|
| 123 |
+
dir_tensor[:len(tmpdir)] = tmpdir
|
| 124 |
+
dist.broadcast(dir_tensor, 0)
|
| 125 |
+
tmpdir = dir_tensor.cpu().numpy().tobytes().decode().rstrip()
|
| 126 |
+
else:
|
| 127 |
+
mmcv.mkdir_or_exist(tmpdir)
|
| 128 |
+
# dump the part result to the dir
|
| 129 |
+
mmcv.dump(result_part, osp.join(tmpdir, f'part_{rank}.pkl'))
|
| 130 |
+
dist.barrier()
|
| 131 |
+
# collect all parts
|
| 132 |
+
if rank != 0:
|
| 133 |
+
return None
|
| 134 |
+
else:
|
| 135 |
+
# load results of all parts from tmp dir
|
| 136 |
+
part_list = []
|
| 137 |
+
for i in range(world_size):
|
| 138 |
+
part_file = osp.join(tmpdir, f'part_{i}.pkl')
|
| 139 |
+
part_result = mmcv.load(part_file)
|
| 140 |
+
# When data is severely insufficient, an empty part_result
|
| 141 |
+
# on a certain gpu could makes the overall outputs empty.
|
| 142 |
+
if part_result:
|
| 143 |
+
part_list.append(part_result)
|
| 144 |
+
# sort the results
|
| 145 |
+
ordered_results = []
|
| 146 |
+
for res in zip(*part_list):
|
| 147 |
+
ordered_results.extend(list(res))
|
| 148 |
+
# the dataloader may pad some samples
|
| 149 |
+
ordered_results = ordered_results[:size]
|
| 150 |
+
# remove tmp dir
|
| 151 |
+
shutil.rmtree(tmpdir)
|
| 152 |
+
return ordered_results
|
| 153 |
+
|
| 154 |
+
|
| 155 |
+
def collect_results_gpu(result_part, size):
|
| 156 |
+
"""Collect results under gpu mode.
|
| 157 |
+
|
| 158 |
+
On gpu mode, this function will encode results to gpu tensors and use gpu
|
| 159 |
+
communication for results collection.
|
| 160 |
+
|
| 161 |
+
Args:
|
| 162 |
+
result_part (list): Result list containing result parts
|
| 163 |
+
to be collected.
|
| 164 |
+
size (int): Size of the results, commonly equal to length of
|
| 165 |
+
the results.
|
| 166 |
+
|
| 167 |
+
Returns:
|
| 168 |
+
list: The collected results.
|
| 169 |
+
"""
|
| 170 |
+
rank, world_size = get_dist_info()
|
| 171 |
+
# dump result part to tensor with pickle
|
| 172 |
+
part_tensor = torch.tensor(
|
| 173 |
+
bytearray(pickle.dumps(result_part)), dtype=torch.uint8, device='cuda')
|
| 174 |
+
# gather all result part tensor shape
|
| 175 |
+
shape_tensor = torch.tensor(part_tensor.shape, device='cuda')
|
| 176 |
+
shape_list = [shape_tensor.clone() for _ in range(world_size)]
|
| 177 |
+
dist.all_gather(shape_list, shape_tensor)
|
| 178 |
+
# padding result part tensor to max length
|
| 179 |
+
shape_max = torch.tensor(shape_list).max()
|
| 180 |
+
part_send = torch.zeros(shape_max, dtype=torch.uint8, device='cuda')
|
| 181 |
+
part_send[:shape_tensor[0]] = part_tensor
|
| 182 |
+
part_recv_list = [
|
| 183 |
+
part_tensor.new_zeros(shape_max) for _ in range(world_size)
|
| 184 |
+
]
|
| 185 |
+
# gather all result part
|
| 186 |
+
dist.all_gather(part_recv_list, part_send)
|
| 187 |
+
|
| 188 |
+
if rank == 0:
|
| 189 |
+
part_list = []
|
| 190 |
+
for recv, shape in zip(part_recv_list, shape_list):
|
| 191 |
+
part_result = pickle.loads(recv[:shape[0]].cpu().numpy().tobytes())
|
| 192 |
+
# When data is severely insufficient, an empty part_result
|
| 193 |
+
# on a certain gpu could makes the overall outputs empty.
|
| 194 |
+
if part_result:
|
| 195 |
+
part_list.append(part_result)
|
| 196 |
+
# sort the results
|
| 197 |
+
ordered_results = []
|
| 198 |
+
for res in zip(*part_list):
|
| 199 |
+
ordered_results.extend(list(res))
|
| 200 |
+
# the dataloader may pad some samples
|
| 201 |
+
ordered_results = ordered_results[:size]
|
| 202 |
+
return ordered_results
|
RAVE-main/annotator/mmpkg/mmcv/model_zoo/deprecated.json
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"resnet50_caffe": "detectron/resnet50_caffe",
|
| 3 |
+
"resnet50_caffe_bgr": "detectron2/resnet50_caffe_bgr",
|
| 4 |
+
"resnet101_caffe": "detectron/resnet101_caffe",
|
| 5 |
+
"resnet101_caffe_bgr": "detectron2/resnet101_caffe_bgr"
|
| 6 |
+
}
|
RAVE-main/annotator/mmpkg/mmcv/model_zoo/mmcls.json
ADDED
|
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"vgg11": "https://download.openmmlab.com/mmclassification/v0/vgg/vgg11_batch256_imagenet_20210208-4271cd6c.pth",
|
| 3 |
+
"vgg13": "https://download.openmmlab.com/mmclassification/v0/vgg/vgg13_batch256_imagenet_20210208-4d1d6080.pth",
|
| 4 |
+
"vgg16": "https://download.openmmlab.com/mmclassification/v0/vgg/vgg16_batch256_imagenet_20210208-db26f1a5.pth",
|
| 5 |
+
"vgg19": "https://download.openmmlab.com/mmclassification/v0/vgg/vgg19_batch256_imagenet_20210208-e6920e4a.pth",
|
| 6 |
+
"vgg11_bn": "https://download.openmmlab.com/mmclassification/v0/vgg/vgg11_bn_batch256_imagenet_20210207-f244902c.pth",
|
| 7 |
+
"vgg13_bn": "https://download.openmmlab.com/mmclassification/v0/vgg/vgg13_bn_batch256_imagenet_20210207-1a8b7864.pth",
|
| 8 |
+
"vgg16_bn": "https://download.openmmlab.com/mmclassification/v0/vgg/vgg16_bn_batch256_imagenet_20210208-7e55cd29.pth",
|
| 9 |
+
"vgg19_bn": "https://download.openmmlab.com/mmclassification/v0/vgg/vgg19_bn_batch256_imagenet_20210208-da620c4f.pth",
|
| 10 |
+
"resnet18": "https://download.openmmlab.com/mmclassification/v0/resnet/resnet18_batch256_imagenet_20200708-34ab8f90.pth",
|
| 11 |
+
"resnet34": "https://download.openmmlab.com/mmclassification/v0/resnet/resnet34_batch256_imagenet_20200708-32ffb4f7.pth",
|
| 12 |
+
"resnet50": "https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_batch256_imagenet_20200708-cfb998bf.pth",
|
| 13 |
+
"resnet101": "https://download.openmmlab.com/mmclassification/v0/resnet/resnet101_batch256_imagenet_20200708-753f3608.pth",
|
| 14 |
+
"resnet152": "https://download.openmmlab.com/mmclassification/v0/resnet/resnet152_batch256_imagenet_20200708-ec25b1f9.pth",
|
| 15 |
+
"resnet50_v1d": "https://download.openmmlab.com/mmclassification/v0/resnet/resnetv1d50_batch256_imagenet_20200708-1ad0ce94.pth",
|
| 16 |
+
"resnet101_v1d": "https://download.openmmlab.com/mmclassification/v0/resnet/resnetv1d101_batch256_imagenet_20200708-9cb302ef.pth",
|
| 17 |
+
"resnet152_v1d": "https://download.openmmlab.com/mmclassification/v0/resnet/resnetv1d152_batch256_imagenet_20200708-e79cb6a2.pth",
|
| 18 |
+
"resnext50_32x4d": "https://download.openmmlab.com/mmclassification/v0/resnext/resnext50_32x4d_b32x8_imagenet_20210429-56066e27.pth",
|
| 19 |
+
"resnext101_32x4d": "https://download.openmmlab.com/mmclassification/v0/resnext/resnext101_32x4d_b32x8_imagenet_20210506-e0fa3dd5.pth",
|
| 20 |
+
"resnext101_32x8d": "https://download.openmmlab.com/mmclassification/v0/resnext/resnext101_32x8d_b32x8_imagenet_20210506-23a247d5.pth",
|
| 21 |
+
"resnext152_32x4d": "https://download.openmmlab.com/mmclassification/v0/resnext/resnext152_32x4d_b32x8_imagenet_20210524-927787be.pth",
|
| 22 |
+
"se-resnet50": "https://download.openmmlab.com/mmclassification/v0/se-resnet/se-resnet50_batch256_imagenet_20200804-ae206104.pth",
|
| 23 |
+
"se-resnet101": "https://download.openmmlab.com/mmclassification/v0/se-resnet/se-resnet101_batch256_imagenet_20200804-ba5b51d4.pth",
|
| 24 |
+
"resnest50": "https://download.openmmlab.com/mmclassification/v0/resnest/resnest50_imagenet_converted-1ebf0afe.pth",
|
| 25 |
+
"resnest101": "https://download.openmmlab.com/mmclassification/v0/resnest/resnest101_imagenet_converted-032caa52.pth",
|
| 26 |
+
"resnest200": "https://download.openmmlab.com/mmclassification/v0/resnest/resnest200_imagenet_converted-581a60f2.pth",
|
| 27 |
+
"resnest269": "https://download.openmmlab.com/mmclassification/v0/resnest/resnest269_imagenet_converted-59930960.pth",
|
| 28 |
+
"shufflenet_v1": "https://download.openmmlab.com/mmclassification/v0/shufflenet_v1/shufflenet_v1_batch1024_imagenet_20200804-5d6cec73.pth",
|
| 29 |
+
"shufflenet_v2": "https://download.openmmlab.com/mmclassification/v0/shufflenet_v2/shufflenet_v2_batch1024_imagenet_20200812-5bf4721e.pth",
|
| 30 |
+
"mobilenet_v2": "https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth"
|
| 31 |
+
}
|
RAVE-main/annotator/mmpkg/mmcv/model_zoo/open_mmlab.json
ADDED
|
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"vgg16_caffe": "https://download.openmmlab.com/pretrain/third_party/vgg16_caffe-292e1171.pth",
|
| 3 |
+
"detectron/resnet50_caffe": "https://download.openmmlab.com/pretrain/third_party/resnet50_caffe-788b5fa3.pth",
|
| 4 |
+
"detectron2/resnet50_caffe": "https://download.openmmlab.com/pretrain/third_party/resnet50_msra-5891d200.pth",
|
| 5 |
+
"detectron/resnet101_caffe": "https://download.openmmlab.com/pretrain/third_party/resnet101_caffe-3ad79236.pth",
|
| 6 |
+
"detectron2/resnet101_caffe": "https://download.openmmlab.com/pretrain/third_party/resnet101_msra-6cc46731.pth",
|
| 7 |
+
"detectron2/resnext101_32x8d": "https://download.openmmlab.com/pretrain/third_party/resnext101_32x8d-1516f1aa.pth",
|
| 8 |
+
"resnext50_32x4d": "https://download.openmmlab.com/pretrain/third_party/resnext50-32x4d-0ab1a123.pth",
|
| 9 |
+
"resnext101_32x4d": "https://download.openmmlab.com/pretrain/third_party/resnext101_32x4d-a5af3160.pth",
|
| 10 |
+
"resnext101_64x4d": "https://download.openmmlab.com/pretrain/third_party/resnext101_64x4d-ee2c6f71.pth",
|
| 11 |
+
"contrib/resnet50_gn": "https://download.openmmlab.com/pretrain/third_party/resnet50_gn_thangvubk-ad1730dd.pth",
|
| 12 |
+
"detectron/resnet50_gn": "https://download.openmmlab.com/pretrain/third_party/resnet50_gn-9186a21c.pth",
|
| 13 |
+
"detectron/resnet101_gn": "https://download.openmmlab.com/pretrain/third_party/resnet101_gn-cac0ab98.pth",
|
| 14 |
+
"jhu/resnet50_gn_ws": "https://download.openmmlab.com/pretrain/third_party/resnet50_gn_ws-15beedd8.pth",
|
| 15 |
+
"jhu/resnet101_gn_ws": "https://download.openmmlab.com/pretrain/third_party/resnet101_gn_ws-3e3c308c.pth",
|
| 16 |
+
"jhu/resnext50_32x4d_gn_ws": "https://download.openmmlab.com/pretrain/third_party/resnext50_32x4d_gn_ws-0d87ac85.pth",
|
| 17 |
+
"jhu/resnext101_32x4d_gn_ws": "https://download.openmmlab.com/pretrain/third_party/resnext101_32x4d_gn_ws-34ac1a9e.pth",
|
| 18 |
+
"jhu/resnext50_32x4d_gn": "https://download.openmmlab.com/pretrain/third_party/resnext50_32x4d_gn-c7e8b754.pth",
|
| 19 |
+
"jhu/resnext101_32x4d_gn": "https://download.openmmlab.com/pretrain/third_party/resnext101_32x4d_gn-ac3bb84e.pth",
|
| 20 |
+
"msra/hrnetv2_w18_small": "https://download.openmmlab.com/pretrain/third_party/hrnetv2_w18_small-b5a04e21.pth",
|
| 21 |
+
"msra/hrnetv2_w18": "https://download.openmmlab.com/pretrain/third_party/hrnetv2_w18-00eb2006.pth",
|
| 22 |
+
"msra/hrnetv2_w32": "https://download.openmmlab.com/pretrain/third_party/hrnetv2_w32-dc9eeb4f.pth",
|
| 23 |
+
"msra/hrnetv2_w40": "https://download.openmmlab.com/pretrain/third_party/hrnetv2_w40-ed0b031c.pth",
|
| 24 |
+
"msra/hrnetv2_w48": "https://download.openmmlab.com/pretrain/third_party/hrnetv2_w48-d2186c55.pth",
|
| 25 |
+
"bninception_caffe": "https://download.openmmlab.com/pretrain/third_party/bn_inception_caffe-ed2e8665.pth",
|
| 26 |
+
"kin400/i3d_r50_f32s2_k400": "https://download.openmmlab.com/pretrain/third_party/i3d_r50_f32s2_k400-2c57e077.pth",
|
| 27 |
+
"kin400/nl3d_r50_f32s2_k400": "https://download.openmmlab.com/pretrain/third_party/nl3d_r50_f32s2_k400-fa7e7caa.pth",
|
| 28 |
+
"res2net101_v1d_26w_4s": "https://download.openmmlab.com/pretrain/third_party/res2net101_v1d_26w_4s_mmdetv2-f0a600f9.pth",
|
| 29 |
+
"regnetx_400mf": "https://download.openmmlab.com/pretrain/third_party/regnetx_400mf-a5b10d96.pth",
|
| 30 |
+
"regnetx_800mf": "https://download.openmmlab.com/pretrain/third_party/regnetx_800mf-1f4be4c7.pth",
|
| 31 |
+
"regnetx_1.6gf": "https://download.openmmlab.com/pretrain/third_party/regnetx_1.6gf-5791c176.pth",
|
| 32 |
+
"regnetx_3.2gf": "https://download.openmmlab.com/pretrain/third_party/regnetx_3.2gf-c2599b0f.pth",
|
| 33 |
+
"regnetx_4.0gf": "https://download.openmmlab.com/pretrain/third_party/regnetx_4.0gf-a88f671e.pth",
|
| 34 |
+
"regnetx_6.4gf": "https://download.openmmlab.com/pretrain/third_party/regnetx_6.4gf-006af45d.pth",
|
| 35 |
+
"regnetx_8.0gf": "https://download.openmmlab.com/pretrain/third_party/regnetx_8.0gf-3c68abe7.pth",
|
| 36 |
+
"regnetx_12gf": "https://download.openmmlab.com/pretrain/third_party/regnetx_12gf-4c2a3350.pth",
|
| 37 |
+
"resnet18_v1c": "https://download.openmmlab.com/pretrain/third_party/resnet18_v1c-b5776b93.pth",
|
| 38 |
+
"resnet50_v1c": "https://download.openmmlab.com/pretrain/third_party/resnet50_v1c-2cccc1ad.pth",
|
| 39 |
+
"resnet101_v1c": "https://download.openmmlab.com/pretrain/third_party/resnet101_v1c-e67eebb6.pth",
|
| 40 |
+
"mmedit/vgg16": "https://download.openmmlab.com/mmediting/third_party/vgg_state_dict.pth",
|
| 41 |
+
"mmedit/res34_en_nomixup": "https://download.openmmlab.com/mmediting/third_party/model_best_resnet34_En_nomixup.pth",
|
| 42 |
+
"mmedit/mobilenet_v2": "https://download.openmmlab.com/mmediting/third_party/mobilenet_v2.pth",
|
| 43 |
+
"contrib/mobilenet_v3_large": "https://download.openmmlab.com/pretrain/third_party/mobilenet_v3_large-bc2c3fd3.pth",
|
| 44 |
+
"contrib/mobilenet_v3_small": "https://download.openmmlab.com/pretrain/third_party/mobilenet_v3_small-47085aa1.pth",
|
| 45 |
+
"resnest50": "https://download.openmmlab.com/pretrain/third_party/resnest50_d2-7497a55b.pth",
|
| 46 |
+
"resnest101": "https://download.openmmlab.com/pretrain/third_party/resnest101_d2-f3b931b2.pth",
|
| 47 |
+
"resnest200": "https://download.openmmlab.com/pretrain/third_party/resnest200_d2-ca88e41f.pth",
|
| 48 |
+
"darknet53": "https://download.openmmlab.com/pretrain/third_party/darknet53-a628ea1b.pth",
|
| 49 |
+
"mmdet/mobilenet_v2": "https://download.openmmlab.com/mmdetection/v2.0/third_party/mobilenet_v2_batch256_imagenet-ff34753d.pth"
|
| 50 |
+
}
|
RAVE-main/annotator/mmpkg/mmcv/ops/__init__.py
ADDED
|
@@ -0,0 +1,81 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
from .assign_score_withk import assign_score_withk
|
| 3 |
+
from .ball_query import ball_query
|
| 4 |
+
from .bbox import bbox_overlaps
|
| 5 |
+
from .border_align import BorderAlign, border_align
|
| 6 |
+
from .box_iou_rotated import box_iou_rotated
|
| 7 |
+
from .carafe import CARAFE, CARAFENaive, CARAFEPack, carafe, carafe_naive
|
| 8 |
+
from .cc_attention import CrissCrossAttention
|
| 9 |
+
from .contour_expand import contour_expand
|
| 10 |
+
from .corner_pool import CornerPool
|
| 11 |
+
from .correlation import Correlation
|
| 12 |
+
from .deform_conv import DeformConv2d, DeformConv2dPack, deform_conv2d
|
| 13 |
+
from .deform_roi_pool import (DeformRoIPool, DeformRoIPoolPack,
|
| 14 |
+
ModulatedDeformRoIPoolPack, deform_roi_pool)
|
| 15 |
+
from .deprecated_wrappers import Conv2d_deprecated as Conv2d
|
| 16 |
+
from .deprecated_wrappers import ConvTranspose2d_deprecated as ConvTranspose2d
|
| 17 |
+
from .deprecated_wrappers import Linear_deprecated as Linear
|
| 18 |
+
from .deprecated_wrappers import MaxPool2d_deprecated as MaxPool2d
|
| 19 |
+
from .focal_loss import (SigmoidFocalLoss, SoftmaxFocalLoss,
|
| 20 |
+
sigmoid_focal_loss, softmax_focal_loss)
|
| 21 |
+
from .furthest_point_sample import (furthest_point_sample,
|
| 22 |
+
furthest_point_sample_with_dist)
|
| 23 |
+
from .fused_bias_leakyrelu import FusedBiasLeakyReLU, fused_bias_leakyrelu
|
| 24 |
+
from .gather_points import gather_points
|
| 25 |
+
from .group_points import GroupAll, QueryAndGroup, grouping_operation
|
| 26 |
+
from .info import (get_compiler_version, get_compiling_cuda_version,
|
| 27 |
+
get_onnxruntime_op_path)
|
| 28 |
+
from .iou3d import boxes_iou_bev, nms_bev, nms_normal_bev
|
| 29 |
+
from .knn import knn
|
| 30 |
+
from .masked_conv import MaskedConv2d, masked_conv2d
|
| 31 |
+
from .modulated_deform_conv import (ModulatedDeformConv2d,
|
| 32 |
+
ModulatedDeformConv2dPack,
|
| 33 |
+
modulated_deform_conv2d)
|
| 34 |
+
from .multi_scale_deform_attn import MultiScaleDeformableAttention
|
| 35 |
+
from .nms import batched_nms, nms, nms_match, nms_rotated, soft_nms
|
| 36 |
+
from .pixel_group import pixel_group
|
| 37 |
+
from .point_sample import (SimpleRoIAlign, point_sample,
|
| 38 |
+
rel_roi_point_to_rel_img_point)
|
| 39 |
+
from .points_in_boxes import (points_in_boxes_all, points_in_boxes_cpu,
|
| 40 |
+
points_in_boxes_part)
|
| 41 |
+
from .points_sampler import PointsSampler
|
| 42 |
+
from .psa_mask import PSAMask
|
| 43 |
+
from .roi_align import RoIAlign, roi_align
|
| 44 |
+
from .roi_align_rotated import RoIAlignRotated, roi_align_rotated
|
| 45 |
+
from .roi_pool import RoIPool, roi_pool
|
| 46 |
+
from .roiaware_pool3d import RoIAwarePool3d
|
| 47 |
+
from .roipoint_pool3d import RoIPointPool3d
|
| 48 |
+
from .saconv import SAConv2d
|
| 49 |
+
from .scatter_points import DynamicScatter, dynamic_scatter
|
| 50 |
+
from .sync_bn import SyncBatchNorm
|
| 51 |
+
from .three_interpolate import three_interpolate
|
| 52 |
+
from .three_nn import three_nn
|
| 53 |
+
from .tin_shift import TINShift, tin_shift
|
| 54 |
+
from .upfirdn2d import upfirdn2d
|
| 55 |
+
from .voxelize import Voxelization, voxelization
|
| 56 |
+
|
| 57 |
+
__all__ = [
|
| 58 |
+
'bbox_overlaps', 'CARAFE', 'CARAFENaive', 'CARAFEPack', 'carafe',
|
| 59 |
+
'carafe_naive', 'CornerPool', 'DeformConv2d', 'DeformConv2dPack',
|
| 60 |
+
'deform_conv2d', 'DeformRoIPool', 'DeformRoIPoolPack',
|
| 61 |
+
'ModulatedDeformRoIPoolPack', 'deform_roi_pool', 'SigmoidFocalLoss',
|
| 62 |
+
'SoftmaxFocalLoss', 'sigmoid_focal_loss', 'softmax_focal_loss',
|
| 63 |
+
'get_compiler_version', 'get_compiling_cuda_version',
|
| 64 |
+
'get_onnxruntime_op_path', 'MaskedConv2d', 'masked_conv2d',
|
| 65 |
+
'ModulatedDeformConv2d', 'ModulatedDeformConv2dPack',
|
| 66 |
+
'modulated_deform_conv2d', 'batched_nms', 'nms', 'soft_nms', 'nms_match',
|
| 67 |
+
'RoIAlign', 'roi_align', 'RoIPool', 'roi_pool', 'SyncBatchNorm', 'Conv2d',
|
| 68 |
+
'ConvTranspose2d', 'Linear', 'MaxPool2d', 'CrissCrossAttention', 'PSAMask',
|
| 69 |
+
'point_sample', 'rel_roi_point_to_rel_img_point', 'SimpleRoIAlign',
|
| 70 |
+
'SAConv2d', 'TINShift', 'tin_shift', 'assign_score_withk',
|
| 71 |
+
'box_iou_rotated', 'RoIPointPool3d', 'nms_rotated', 'knn', 'ball_query',
|
| 72 |
+
'upfirdn2d', 'FusedBiasLeakyReLU', 'fused_bias_leakyrelu',
|
| 73 |
+
'RoIAlignRotated', 'roi_align_rotated', 'pixel_group', 'QueryAndGroup',
|
| 74 |
+
'GroupAll', 'grouping_operation', 'contour_expand', 'three_nn',
|
| 75 |
+
'three_interpolate', 'MultiScaleDeformableAttention', 'BorderAlign',
|
| 76 |
+
'border_align', 'gather_points', 'furthest_point_sample',
|
| 77 |
+
'furthest_point_sample_with_dist', 'PointsSampler', 'Correlation',
|
| 78 |
+
'boxes_iou_bev', 'nms_bev', 'nms_normal_bev', 'Voxelization',
|
| 79 |
+
'voxelization', 'dynamic_scatter', 'DynamicScatter', 'RoIAwarePool3d',
|
| 80 |
+
'points_in_boxes_part', 'points_in_boxes_cpu', 'points_in_boxes_all'
|
| 81 |
+
]
|
RAVE-main/annotator/mmpkg/mmcv/ops/assign_score_withk.py
ADDED
|
@@ -0,0 +1,123 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from torch.autograd import Function
|
| 2 |
+
|
| 3 |
+
from ..utils import ext_loader
|
| 4 |
+
|
| 5 |
+
ext_module = ext_loader.load_ext(
|
| 6 |
+
'_ext', ['assign_score_withk_forward', 'assign_score_withk_backward'])
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
class AssignScoreWithK(Function):
|
| 10 |
+
r"""Perform weighted sum to generate output features according to scores.
|
| 11 |
+
Modified from `PAConv <https://github.com/CVMI-Lab/PAConv/tree/main/
|
| 12 |
+
scene_seg/lib/paconv_lib/src/gpu>`_.
|
| 13 |
+
|
| 14 |
+
This is a memory-efficient CUDA implementation of assign_scores operation,
|
| 15 |
+
which first transform all point features with weight bank, then assemble
|
| 16 |
+
neighbor features with ``knn_idx`` and perform weighted sum of ``scores``.
|
| 17 |
+
|
| 18 |
+
See the `paper <https://arxiv.org/pdf/2103.14635.pdf>`_ appendix Sec. D for
|
| 19 |
+
more detailed descriptions.
|
| 20 |
+
|
| 21 |
+
Note:
|
| 22 |
+
This implementation assumes using ``neighbor`` kernel input, which is
|
| 23 |
+
(point_features - center_features, point_features).
|
| 24 |
+
See https://github.com/CVMI-Lab/PAConv/blob/main/scene_seg/model/
|
| 25 |
+
pointnet2/paconv.py#L128 for more details.
|
| 26 |
+
"""
|
| 27 |
+
|
| 28 |
+
@staticmethod
|
| 29 |
+
def forward(ctx,
|
| 30 |
+
scores,
|
| 31 |
+
point_features,
|
| 32 |
+
center_features,
|
| 33 |
+
knn_idx,
|
| 34 |
+
aggregate='sum'):
|
| 35 |
+
"""
|
| 36 |
+
Args:
|
| 37 |
+
scores (torch.Tensor): (B, npoint, K, M), predicted scores to
|
| 38 |
+
aggregate weight matrices in the weight bank.
|
| 39 |
+
``npoint`` is the number of sampled centers.
|
| 40 |
+
``K`` is the number of queried neighbors.
|
| 41 |
+
``M`` is the number of weight matrices in the weight bank.
|
| 42 |
+
point_features (torch.Tensor): (B, N, M, out_dim)
|
| 43 |
+
Pre-computed point features to be aggregated.
|
| 44 |
+
center_features (torch.Tensor): (B, N, M, out_dim)
|
| 45 |
+
Pre-computed center features to be aggregated.
|
| 46 |
+
knn_idx (torch.Tensor): (B, npoint, K), index of sampled kNN.
|
| 47 |
+
We assume the first idx in each row is the idx of the center.
|
| 48 |
+
aggregate (str, optional): Aggregation method.
|
| 49 |
+
Can be 'sum', 'avg' or 'max'. Defaults: 'sum'.
|
| 50 |
+
|
| 51 |
+
Returns:
|
| 52 |
+
torch.Tensor: (B, out_dim, npoint, K), the aggregated features.
|
| 53 |
+
"""
|
| 54 |
+
agg = {'sum': 0, 'avg': 1, 'max': 2}
|
| 55 |
+
|
| 56 |
+
B, N, M, out_dim = point_features.size()
|
| 57 |
+
_, npoint, K, _ = scores.size()
|
| 58 |
+
|
| 59 |
+
output = point_features.new_zeros((B, out_dim, npoint, K))
|
| 60 |
+
ext_module.assign_score_withk_forward(
|
| 61 |
+
point_features.contiguous(),
|
| 62 |
+
center_features.contiguous(),
|
| 63 |
+
scores.contiguous(),
|
| 64 |
+
knn_idx.contiguous(),
|
| 65 |
+
output,
|
| 66 |
+
B=B,
|
| 67 |
+
N0=N,
|
| 68 |
+
N1=npoint,
|
| 69 |
+
M=M,
|
| 70 |
+
K=K,
|
| 71 |
+
O=out_dim,
|
| 72 |
+
aggregate=agg[aggregate])
|
| 73 |
+
|
| 74 |
+
ctx.save_for_backward(output, point_features, center_features, scores,
|
| 75 |
+
knn_idx)
|
| 76 |
+
ctx.agg = agg[aggregate]
|
| 77 |
+
|
| 78 |
+
return output
|
| 79 |
+
|
| 80 |
+
@staticmethod
|
| 81 |
+
def backward(ctx, grad_out):
|
| 82 |
+
"""
|
| 83 |
+
Args:
|
| 84 |
+
grad_out (torch.Tensor): (B, out_dim, npoint, K)
|
| 85 |
+
|
| 86 |
+
Returns:
|
| 87 |
+
grad_scores (torch.Tensor): (B, npoint, K, M)
|
| 88 |
+
grad_point_features (torch.Tensor): (B, N, M, out_dim)
|
| 89 |
+
grad_center_features (torch.Tensor): (B, N, M, out_dim)
|
| 90 |
+
"""
|
| 91 |
+
_, point_features, center_features, scores, knn_idx = ctx.saved_tensors
|
| 92 |
+
|
| 93 |
+
agg = ctx.agg
|
| 94 |
+
|
| 95 |
+
B, N, M, out_dim = point_features.size()
|
| 96 |
+
_, npoint, K, _ = scores.size()
|
| 97 |
+
|
| 98 |
+
grad_point_features = point_features.new_zeros(point_features.shape)
|
| 99 |
+
grad_center_features = center_features.new_zeros(center_features.shape)
|
| 100 |
+
grad_scores = scores.new_zeros(scores.shape)
|
| 101 |
+
|
| 102 |
+
ext_module.assign_score_withk_backward(
|
| 103 |
+
grad_out.contiguous(),
|
| 104 |
+
point_features.contiguous(),
|
| 105 |
+
center_features.contiguous(),
|
| 106 |
+
scores.contiguous(),
|
| 107 |
+
knn_idx.contiguous(),
|
| 108 |
+
grad_point_features,
|
| 109 |
+
grad_center_features,
|
| 110 |
+
grad_scores,
|
| 111 |
+
B=B,
|
| 112 |
+
N0=N,
|
| 113 |
+
N1=npoint,
|
| 114 |
+
M=M,
|
| 115 |
+
K=K,
|
| 116 |
+
O=out_dim,
|
| 117 |
+
aggregate=agg)
|
| 118 |
+
|
| 119 |
+
return grad_scores, grad_point_features, \
|
| 120 |
+
grad_center_features, None, None
|
| 121 |
+
|
| 122 |
+
|
| 123 |
+
assign_score_withk = AssignScoreWithK.apply
|
RAVE-main/annotator/mmpkg/mmcv/ops/border_align.py
ADDED
|
@@ -0,0 +1,109 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
# modified from
|
| 3 |
+
# https://github.com/Megvii-BaseDetection/cvpods/blob/master/cvpods/layers/border_align.py
|
| 4 |
+
|
| 5 |
+
import torch
|
| 6 |
+
import torch.nn as nn
|
| 7 |
+
from torch.autograd import Function
|
| 8 |
+
from torch.autograd.function import once_differentiable
|
| 9 |
+
|
| 10 |
+
from ..utils import ext_loader
|
| 11 |
+
|
| 12 |
+
ext_module = ext_loader.load_ext(
|
| 13 |
+
'_ext', ['border_align_forward', 'border_align_backward'])
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
class BorderAlignFunction(Function):
|
| 17 |
+
|
| 18 |
+
@staticmethod
|
| 19 |
+
def symbolic(g, input, boxes, pool_size):
|
| 20 |
+
return g.op(
|
| 21 |
+
'mmcv::MMCVBorderAlign', input, boxes, pool_size_i=pool_size)
|
| 22 |
+
|
| 23 |
+
@staticmethod
|
| 24 |
+
def forward(ctx, input, boxes, pool_size):
|
| 25 |
+
ctx.pool_size = pool_size
|
| 26 |
+
ctx.input_shape = input.size()
|
| 27 |
+
|
| 28 |
+
assert boxes.ndim == 3, 'boxes must be with shape [B, H*W, 4]'
|
| 29 |
+
assert boxes.size(2) == 4, \
|
| 30 |
+
'the last dimension of boxes must be (x1, y1, x2, y2)'
|
| 31 |
+
assert input.size(1) % 4 == 0, \
|
| 32 |
+
'the channel for input feature must be divisible by factor 4'
|
| 33 |
+
|
| 34 |
+
# [B, C//4, H*W, 4]
|
| 35 |
+
output_shape = (input.size(0), input.size(1) // 4, boxes.size(1), 4)
|
| 36 |
+
output = input.new_zeros(output_shape)
|
| 37 |
+
# `argmax_idx` only used for backward
|
| 38 |
+
argmax_idx = input.new_zeros(output_shape).to(torch.int)
|
| 39 |
+
|
| 40 |
+
ext_module.border_align_forward(
|
| 41 |
+
input, boxes, output, argmax_idx, pool_size=ctx.pool_size)
|
| 42 |
+
|
| 43 |
+
ctx.save_for_backward(boxes, argmax_idx)
|
| 44 |
+
return output
|
| 45 |
+
|
| 46 |
+
@staticmethod
|
| 47 |
+
@once_differentiable
|
| 48 |
+
def backward(ctx, grad_output):
|
| 49 |
+
boxes, argmax_idx = ctx.saved_tensors
|
| 50 |
+
grad_input = grad_output.new_zeros(ctx.input_shape)
|
| 51 |
+
# complex head architecture may cause grad_output uncontiguous
|
| 52 |
+
grad_output = grad_output.contiguous()
|
| 53 |
+
ext_module.border_align_backward(
|
| 54 |
+
grad_output,
|
| 55 |
+
boxes,
|
| 56 |
+
argmax_idx,
|
| 57 |
+
grad_input,
|
| 58 |
+
pool_size=ctx.pool_size)
|
| 59 |
+
return grad_input, None, None
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
border_align = BorderAlignFunction.apply
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
class BorderAlign(nn.Module):
|
| 66 |
+
r"""Border align pooling layer.
|
| 67 |
+
|
| 68 |
+
Applies border_align over the input feature based on predicted bboxes.
|
| 69 |
+
The details were described in the paper
|
| 70 |
+
`BorderDet: Border Feature for Dense Object Detection
|
| 71 |
+
<https://arxiv.org/abs/2007.11056>`_.
|
| 72 |
+
|
| 73 |
+
For each border line (e.g. top, left, bottom or right) of each box,
|
| 74 |
+
border_align does the following:
|
| 75 |
+
1. uniformly samples `pool_size`+1 positions on this line, involving \
|
| 76 |
+
the start and end points.
|
| 77 |
+
2. the corresponding features on these points are computed by \
|
| 78 |
+
bilinear interpolation.
|
| 79 |
+
3. max pooling over all the `pool_size`+1 positions are used for \
|
| 80 |
+
computing pooled feature.
|
| 81 |
+
|
| 82 |
+
Args:
|
| 83 |
+
pool_size (int): number of positions sampled over the boxes' borders
|
| 84 |
+
(e.g. top, bottom, left, right).
|
| 85 |
+
|
| 86 |
+
"""
|
| 87 |
+
|
| 88 |
+
def __init__(self, pool_size):
|
| 89 |
+
super(BorderAlign, self).__init__()
|
| 90 |
+
self.pool_size = pool_size
|
| 91 |
+
|
| 92 |
+
def forward(self, input, boxes):
|
| 93 |
+
"""
|
| 94 |
+
Args:
|
| 95 |
+
input: Features with shape [N,4C,H,W]. Channels ranged in [0,C),
|
| 96 |
+
[C,2C), [2C,3C), [3C,4C) represent the top, left, bottom,
|
| 97 |
+
right features respectively.
|
| 98 |
+
boxes: Boxes with shape [N,H*W,4]. Coordinate format (x1,y1,x2,y2).
|
| 99 |
+
|
| 100 |
+
Returns:
|
| 101 |
+
Tensor: Pooled features with shape [N,C,H*W,4]. The order is
|
| 102 |
+
(top,left,bottom,right) for the last dimension.
|
| 103 |
+
"""
|
| 104 |
+
return border_align(input, boxes, self.pool_size)
|
| 105 |
+
|
| 106 |
+
def __repr__(self):
|
| 107 |
+
s = self.__class__.__name__
|
| 108 |
+
s += f'(pool_size={self.pool_size})'
|
| 109 |
+
return s
|
RAVE-main/annotator/mmpkg/mmcv/ops/box_iou_rotated.py
ADDED
|
@@ -0,0 +1,45 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
from ..utils import ext_loader
|
| 3 |
+
|
| 4 |
+
ext_module = ext_loader.load_ext('_ext', ['box_iou_rotated'])
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
def box_iou_rotated(bboxes1, bboxes2, mode='iou', aligned=False):
|
| 8 |
+
"""Return intersection-over-union (Jaccard index) of boxes.
|
| 9 |
+
|
| 10 |
+
Both sets of boxes are expected to be in
|
| 11 |
+
(x_center, y_center, width, height, angle) format.
|
| 12 |
+
|
| 13 |
+
If ``aligned`` is ``False``, then calculate the ious between each bbox
|
| 14 |
+
of bboxes1 and bboxes2, otherwise the ious between each aligned pair of
|
| 15 |
+
bboxes1 and bboxes2.
|
| 16 |
+
|
| 17 |
+
Arguments:
|
| 18 |
+
boxes1 (Tensor): rotated bboxes 1. \
|
| 19 |
+
It has shape (N, 5), indicating (x, y, w, h, theta) for each row.
|
| 20 |
+
Note that theta is in radian.
|
| 21 |
+
boxes2 (Tensor): rotated bboxes 2. \
|
| 22 |
+
It has shape (M, 5), indicating (x, y, w, h, theta) for each row.
|
| 23 |
+
Note that theta is in radian.
|
| 24 |
+
mode (str): "iou" (intersection over union) or iof (intersection over
|
| 25 |
+
foreground).
|
| 26 |
+
|
| 27 |
+
Returns:
|
| 28 |
+
ious(Tensor): shape (N, M) if aligned == False else shape (N,)
|
| 29 |
+
"""
|
| 30 |
+
assert mode in ['iou', 'iof']
|
| 31 |
+
mode_dict = {'iou': 0, 'iof': 1}
|
| 32 |
+
mode_flag = mode_dict[mode]
|
| 33 |
+
rows = bboxes1.size(0)
|
| 34 |
+
cols = bboxes2.size(0)
|
| 35 |
+
if aligned:
|
| 36 |
+
ious = bboxes1.new_zeros(rows)
|
| 37 |
+
else:
|
| 38 |
+
ious = bboxes1.new_zeros((rows * cols))
|
| 39 |
+
bboxes1 = bboxes1.contiguous()
|
| 40 |
+
bboxes2 = bboxes2.contiguous()
|
| 41 |
+
ext_module.box_iou_rotated(
|
| 42 |
+
bboxes1, bboxes2, ious, mode_flag=mode_flag, aligned=aligned)
|
| 43 |
+
if not aligned:
|
| 44 |
+
ious = ious.view(rows, cols)
|
| 45 |
+
return ious
|
RAVE-main/annotator/mmpkg/mmcv/ops/cc_attention.py
ADDED
|
@@ -0,0 +1,83 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
import torch
|
| 3 |
+
import torch.nn as nn
|
| 4 |
+
import torch.nn.functional as F
|
| 5 |
+
|
| 6 |
+
from annotator.mmpkg.mmcv.cnn import PLUGIN_LAYERS, Scale
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
def NEG_INF_DIAG(n, device):
|
| 10 |
+
"""Returns a diagonal matrix of size [n, n].
|
| 11 |
+
|
| 12 |
+
The diagonal are all "-inf". This is for avoiding calculating the
|
| 13 |
+
overlapped element in the Criss-Cross twice.
|
| 14 |
+
"""
|
| 15 |
+
return torch.diag(torch.tensor(float('-inf')).to(device).repeat(n), 0)
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
@PLUGIN_LAYERS.register_module()
|
| 19 |
+
class CrissCrossAttention(nn.Module):
|
| 20 |
+
"""Criss-Cross Attention Module.
|
| 21 |
+
|
| 22 |
+
.. note::
|
| 23 |
+
Before v1.3.13, we use a CUDA op. Since v1.3.13, we switch
|
| 24 |
+
to a pure PyTorch and equivalent implementation. For more
|
| 25 |
+
details, please refer to https://github.com/open-mmlab/mmcv/pull/1201.
|
| 26 |
+
|
| 27 |
+
Speed comparison for one forward pass
|
| 28 |
+
|
| 29 |
+
- Input size: [2,512,97,97]
|
| 30 |
+
- Device: 1 NVIDIA GeForce RTX 2080 Ti
|
| 31 |
+
|
| 32 |
+
+-----------------------+---------------+------------+---------------+
|
| 33 |
+
| |PyTorch version|CUDA version|Relative speed |
|
| 34 |
+
+=======================+===============+============+===============+
|
| 35 |
+
|with torch.no_grad() |0.00554402 s |0.0299619 s |5.4x |
|
| 36 |
+
+-----------------------+---------------+------------+---------------+
|
| 37 |
+
|no with torch.no_grad()|0.00562803 s |0.0301349 s |5.4x |
|
| 38 |
+
+-----------------------+---------------+------------+---------------+
|
| 39 |
+
|
| 40 |
+
Args:
|
| 41 |
+
in_channels (int): Channels of the input feature map.
|
| 42 |
+
"""
|
| 43 |
+
|
| 44 |
+
def __init__(self, in_channels):
|
| 45 |
+
super().__init__()
|
| 46 |
+
self.query_conv = nn.Conv2d(in_channels, in_channels // 8, 1)
|
| 47 |
+
self.key_conv = nn.Conv2d(in_channels, in_channels // 8, 1)
|
| 48 |
+
self.value_conv = nn.Conv2d(in_channels, in_channels, 1)
|
| 49 |
+
self.gamma = Scale(0.)
|
| 50 |
+
self.in_channels = in_channels
|
| 51 |
+
|
| 52 |
+
def forward(self, x):
|
| 53 |
+
"""forward function of Criss-Cross Attention.
|
| 54 |
+
|
| 55 |
+
Args:
|
| 56 |
+
x (Tensor): Input feature. \
|
| 57 |
+
shape (batch_size, in_channels, height, width)
|
| 58 |
+
Returns:
|
| 59 |
+
Tensor: Output of the layer, with shape of \
|
| 60 |
+
(batch_size, in_channels, height, width)
|
| 61 |
+
"""
|
| 62 |
+
B, C, H, W = x.size()
|
| 63 |
+
query = self.query_conv(x)
|
| 64 |
+
key = self.key_conv(x)
|
| 65 |
+
value = self.value_conv(x)
|
| 66 |
+
energy_H = torch.einsum('bchw,bciw->bwhi', query, key) + NEG_INF_DIAG(
|
| 67 |
+
H, query.device)
|
| 68 |
+
energy_H = energy_H.transpose(1, 2)
|
| 69 |
+
energy_W = torch.einsum('bchw,bchj->bhwj', query, key)
|
| 70 |
+
attn = F.softmax(
|
| 71 |
+
torch.cat([energy_H, energy_W], dim=-1), dim=-1) # [B,H,W,(H+W)]
|
| 72 |
+
out = torch.einsum('bciw,bhwi->bchw', value, attn[..., :H])
|
| 73 |
+
out += torch.einsum('bchj,bhwj->bchw', value, attn[..., H:])
|
| 74 |
+
|
| 75 |
+
out = self.gamma(out) + x
|
| 76 |
+
out = out.contiguous()
|
| 77 |
+
|
| 78 |
+
return out
|
| 79 |
+
|
| 80 |
+
def __repr__(self):
|
| 81 |
+
s = self.__class__.__name__
|
| 82 |
+
s += f'(in_channels={self.in_channels})'
|
| 83 |
+
return s
|
RAVE-main/annotator/mmpkg/mmcv/ops/contour_expand.py
ADDED
|
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
import numpy as np
|
| 3 |
+
import torch
|
| 4 |
+
|
| 5 |
+
from ..utils import ext_loader
|
| 6 |
+
|
| 7 |
+
ext_module = ext_loader.load_ext('_ext', ['contour_expand'])
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
def contour_expand(kernel_mask, internal_kernel_label, min_kernel_area,
|
| 11 |
+
kernel_num):
|
| 12 |
+
"""Expand kernel contours so that foreground pixels are assigned into
|
| 13 |
+
instances.
|
| 14 |
+
|
| 15 |
+
Arguments:
|
| 16 |
+
kernel_mask (np.array or Tensor): The instance kernel mask with
|
| 17 |
+
size hxw.
|
| 18 |
+
internal_kernel_label (np.array or Tensor): The instance internal
|
| 19 |
+
kernel label with size hxw.
|
| 20 |
+
min_kernel_area (int): The minimum kernel area.
|
| 21 |
+
kernel_num (int): The instance kernel number.
|
| 22 |
+
|
| 23 |
+
Returns:
|
| 24 |
+
label (list): The instance index map with size hxw.
|
| 25 |
+
"""
|
| 26 |
+
assert isinstance(kernel_mask, (torch.Tensor, np.ndarray))
|
| 27 |
+
assert isinstance(internal_kernel_label, (torch.Tensor, np.ndarray))
|
| 28 |
+
assert isinstance(min_kernel_area, int)
|
| 29 |
+
assert isinstance(kernel_num, int)
|
| 30 |
+
|
| 31 |
+
if isinstance(kernel_mask, np.ndarray):
|
| 32 |
+
kernel_mask = torch.from_numpy(kernel_mask)
|
| 33 |
+
if isinstance(internal_kernel_label, np.ndarray):
|
| 34 |
+
internal_kernel_label = torch.from_numpy(internal_kernel_label)
|
| 35 |
+
|
| 36 |
+
if torch.__version__ == 'parrots':
|
| 37 |
+
if kernel_mask.shape[0] == 0 or internal_kernel_label.shape[0] == 0:
|
| 38 |
+
label = []
|
| 39 |
+
else:
|
| 40 |
+
label = ext_module.contour_expand(
|
| 41 |
+
kernel_mask,
|
| 42 |
+
internal_kernel_label,
|
| 43 |
+
min_kernel_area=min_kernel_area,
|
| 44 |
+
kernel_num=kernel_num)
|
| 45 |
+
label = label.tolist()
|
| 46 |
+
else:
|
| 47 |
+
label = ext_module.contour_expand(kernel_mask, internal_kernel_label,
|
| 48 |
+
min_kernel_area, kernel_num)
|
| 49 |
+
return label
|
RAVE-main/annotator/mmpkg/mmcv/ops/correlation.py
ADDED
|
@@ -0,0 +1,196 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
import torch
|
| 3 |
+
from torch import Tensor, nn
|
| 4 |
+
from torch.autograd import Function
|
| 5 |
+
from torch.autograd.function import once_differentiable
|
| 6 |
+
from torch.nn.modules.utils import _pair
|
| 7 |
+
|
| 8 |
+
from ..utils import ext_loader
|
| 9 |
+
|
| 10 |
+
ext_module = ext_loader.load_ext(
|
| 11 |
+
'_ext', ['correlation_forward', 'correlation_backward'])
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
class CorrelationFunction(Function):
|
| 15 |
+
|
| 16 |
+
@staticmethod
|
| 17 |
+
def forward(ctx,
|
| 18 |
+
input1,
|
| 19 |
+
input2,
|
| 20 |
+
kernel_size=1,
|
| 21 |
+
max_displacement=1,
|
| 22 |
+
stride=1,
|
| 23 |
+
padding=1,
|
| 24 |
+
dilation=1,
|
| 25 |
+
dilation_patch=1):
|
| 26 |
+
|
| 27 |
+
ctx.save_for_backward(input1, input2)
|
| 28 |
+
|
| 29 |
+
kH, kW = ctx.kernel_size = _pair(kernel_size)
|
| 30 |
+
patch_size = max_displacement * 2 + 1
|
| 31 |
+
ctx.patch_size = patch_size
|
| 32 |
+
dH, dW = ctx.stride = _pair(stride)
|
| 33 |
+
padH, padW = ctx.padding = _pair(padding)
|
| 34 |
+
dilationH, dilationW = ctx.dilation = _pair(dilation)
|
| 35 |
+
dilation_patchH, dilation_patchW = ctx.dilation_patch = _pair(
|
| 36 |
+
dilation_patch)
|
| 37 |
+
|
| 38 |
+
output_size = CorrelationFunction._output_size(ctx, input1)
|
| 39 |
+
|
| 40 |
+
output = input1.new_zeros(output_size)
|
| 41 |
+
|
| 42 |
+
ext_module.correlation_forward(
|
| 43 |
+
input1,
|
| 44 |
+
input2,
|
| 45 |
+
output,
|
| 46 |
+
kH=kH,
|
| 47 |
+
kW=kW,
|
| 48 |
+
patchH=patch_size,
|
| 49 |
+
patchW=patch_size,
|
| 50 |
+
padH=padH,
|
| 51 |
+
padW=padW,
|
| 52 |
+
dilationH=dilationH,
|
| 53 |
+
dilationW=dilationW,
|
| 54 |
+
dilation_patchH=dilation_patchH,
|
| 55 |
+
dilation_patchW=dilation_patchW,
|
| 56 |
+
dH=dH,
|
| 57 |
+
dW=dW)
|
| 58 |
+
|
| 59 |
+
return output
|
| 60 |
+
|
| 61 |
+
@staticmethod
|
| 62 |
+
@once_differentiable
|
| 63 |
+
def backward(ctx, grad_output):
|
| 64 |
+
input1, input2 = ctx.saved_tensors
|
| 65 |
+
|
| 66 |
+
kH, kW = ctx.kernel_size
|
| 67 |
+
patch_size = ctx.patch_size
|
| 68 |
+
padH, padW = ctx.padding
|
| 69 |
+
dilationH, dilationW = ctx.dilation
|
| 70 |
+
dilation_patchH, dilation_patchW = ctx.dilation_patch
|
| 71 |
+
dH, dW = ctx.stride
|
| 72 |
+
grad_input1 = torch.zeros_like(input1)
|
| 73 |
+
grad_input2 = torch.zeros_like(input2)
|
| 74 |
+
|
| 75 |
+
ext_module.correlation_backward(
|
| 76 |
+
grad_output,
|
| 77 |
+
input1,
|
| 78 |
+
input2,
|
| 79 |
+
grad_input1,
|
| 80 |
+
grad_input2,
|
| 81 |
+
kH=kH,
|
| 82 |
+
kW=kW,
|
| 83 |
+
patchH=patch_size,
|
| 84 |
+
patchW=patch_size,
|
| 85 |
+
padH=padH,
|
| 86 |
+
padW=padW,
|
| 87 |
+
dilationH=dilationH,
|
| 88 |
+
dilationW=dilationW,
|
| 89 |
+
dilation_patchH=dilation_patchH,
|
| 90 |
+
dilation_patchW=dilation_patchW,
|
| 91 |
+
dH=dH,
|
| 92 |
+
dW=dW)
|
| 93 |
+
return grad_input1, grad_input2, None, None, None, None, None, None
|
| 94 |
+
|
| 95 |
+
@staticmethod
|
| 96 |
+
def _output_size(ctx, input1):
|
| 97 |
+
iH, iW = input1.size(2), input1.size(3)
|
| 98 |
+
batch_size = input1.size(0)
|
| 99 |
+
kH, kW = ctx.kernel_size
|
| 100 |
+
patch_size = ctx.patch_size
|
| 101 |
+
dH, dW = ctx.stride
|
| 102 |
+
padH, padW = ctx.padding
|
| 103 |
+
dilationH, dilationW = ctx.dilation
|
| 104 |
+
dilatedKH = (kH - 1) * dilationH + 1
|
| 105 |
+
dilatedKW = (kW - 1) * dilationW + 1
|
| 106 |
+
|
| 107 |
+
oH = int((iH + 2 * padH - dilatedKH) / dH + 1)
|
| 108 |
+
oW = int((iW + 2 * padW - dilatedKW) / dW + 1)
|
| 109 |
+
|
| 110 |
+
output_size = (batch_size, patch_size, patch_size, oH, oW)
|
| 111 |
+
return output_size
|
| 112 |
+
|
| 113 |
+
|
| 114 |
+
class Correlation(nn.Module):
|
| 115 |
+
r"""Correlation operator
|
| 116 |
+
|
| 117 |
+
This correlation operator works for optical flow correlation computation.
|
| 118 |
+
|
| 119 |
+
There are two batched tensors with shape :math:`(N, C, H, W)`,
|
| 120 |
+
and the correlation output's shape is :math:`(N, max\_displacement \times
|
| 121 |
+
2 + 1, max\_displacement * 2 + 1, H_{out}, W_{out})`
|
| 122 |
+
|
| 123 |
+
where
|
| 124 |
+
|
| 125 |
+
.. math::
|
| 126 |
+
H_{out} = \left\lfloor\frac{H_{in} + 2 \times padding -
|
| 127 |
+
dilation \times (kernel\_size - 1) - 1}
|
| 128 |
+
{stride} + 1\right\rfloor
|
| 129 |
+
|
| 130 |
+
.. math::
|
| 131 |
+
W_{out} = \left\lfloor\frac{W_{in} + 2 \times padding - dilation
|
| 132 |
+
\times (kernel\_size - 1) - 1}
|
| 133 |
+
{stride} + 1\right\rfloor
|
| 134 |
+
|
| 135 |
+
the correlation item :math:`(N_i, dy, dx)` is formed by taking the sliding
|
| 136 |
+
window convolution between input1 and shifted input2,
|
| 137 |
+
|
| 138 |
+
.. math::
|
| 139 |
+
Corr(N_i, dx, dy) =
|
| 140 |
+
\sum_{c=0}^{C-1}
|
| 141 |
+
input1(N_i, c) \star
|
| 142 |
+
\mathcal{S}(input2(N_i, c), dy, dx)
|
| 143 |
+
|
| 144 |
+
where :math:`\star` is the valid 2d sliding window convolution operator,
|
| 145 |
+
and :math:`\mathcal{S}` means shifting the input features (auto-complete
|
| 146 |
+
zero marginal), and :math:`dx, dy` are shifting distance, :math:`dx, dy \in
|
| 147 |
+
[-max\_displacement \times dilation\_patch, max\_displacement \times
|
| 148 |
+
dilation\_patch]`.
|
| 149 |
+
|
| 150 |
+
Args:
|
| 151 |
+
kernel_size (int): The size of sliding window i.e. local neighborhood
|
| 152 |
+
representing the center points and involved in correlation
|
| 153 |
+
computation. Defaults to 1.
|
| 154 |
+
max_displacement (int): The radius for computing correlation volume,
|
| 155 |
+
but the actual working space can be dilated by dilation_patch.
|
| 156 |
+
Defaults to 1.
|
| 157 |
+
stride (int): The stride of the sliding blocks in the input spatial
|
| 158 |
+
dimensions. Defaults to 1.
|
| 159 |
+
padding (int): Zero padding added to all four sides of the input1.
|
| 160 |
+
Defaults to 0.
|
| 161 |
+
dilation (int): The spacing of local neighborhood that will involved
|
| 162 |
+
in correlation. Defaults to 1.
|
| 163 |
+
dilation_patch (int): The spacing between position need to compute
|
| 164 |
+
correlation. Defaults to 1.
|
| 165 |
+
"""
|
| 166 |
+
|
| 167 |
+
def __init__(self,
|
| 168 |
+
kernel_size: int = 1,
|
| 169 |
+
max_displacement: int = 1,
|
| 170 |
+
stride: int = 1,
|
| 171 |
+
padding: int = 0,
|
| 172 |
+
dilation: int = 1,
|
| 173 |
+
dilation_patch: int = 1) -> None:
|
| 174 |
+
super().__init__()
|
| 175 |
+
self.kernel_size = kernel_size
|
| 176 |
+
self.max_displacement = max_displacement
|
| 177 |
+
self.stride = stride
|
| 178 |
+
self.padding = padding
|
| 179 |
+
self.dilation = dilation
|
| 180 |
+
self.dilation_patch = dilation_patch
|
| 181 |
+
|
| 182 |
+
def forward(self, input1: Tensor, input2: Tensor) -> Tensor:
|
| 183 |
+
return CorrelationFunction.apply(input1, input2, self.kernel_size,
|
| 184 |
+
self.max_displacement, self.stride,
|
| 185 |
+
self.padding, self.dilation,
|
| 186 |
+
self.dilation_patch)
|
| 187 |
+
|
| 188 |
+
def __repr__(self) -> str:
|
| 189 |
+
s = self.__class__.__name__
|
| 190 |
+
s += f'(kernel_size={self.kernel_size}, '
|
| 191 |
+
s += f'max_displacement={self.max_displacement}, '
|
| 192 |
+
s += f'stride={self.stride}, '
|
| 193 |
+
s += f'padding={self.padding}, '
|
| 194 |
+
s += f'dilation={self.dilation}, '
|
| 195 |
+
s += f'dilation_patch={self.dilation_patch})'
|
| 196 |
+
return s
|
RAVE-main/annotator/mmpkg/mmcv/ops/focal_loss.py
ADDED
|
@@ -0,0 +1,212 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
import torch
|
| 3 |
+
import torch.nn as nn
|
| 4 |
+
from torch.autograd import Function
|
| 5 |
+
from torch.autograd.function import once_differentiable
|
| 6 |
+
|
| 7 |
+
from ..utils import ext_loader
|
| 8 |
+
|
| 9 |
+
ext_module = ext_loader.load_ext('_ext', [
|
| 10 |
+
'sigmoid_focal_loss_forward', 'sigmoid_focal_loss_backward',
|
| 11 |
+
'softmax_focal_loss_forward', 'softmax_focal_loss_backward'
|
| 12 |
+
])
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
class SigmoidFocalLossFunction(Function):
|
| 16 |
+
|
| 17 |
+
@staticmethod
|
| 18 |
+
def symbolic(g, input, target, gamma, alpha, weight, reduction):
|
| 19 |
+
return g.op(
|
| 20 |
+
'mmcv::MMCVSigmoidFocalLoss',
|
| 21 |
+
input,
|
| 22 |
+
target,
|
| 23 |
+
gamma_f=gamma,
|
| 24 |
+
alpha_f=alpha,
|
| 25 |
+
weight_f=weight,
|
| 26 |
+
reduction_s=reduction)
|
| 27 |
+
|
| 28 |
+
@staticmethod
|
| 29 |
+
def forward(ctx,
|
| 30 |
+
input,
|
| 31 |
+
target,
|
| 32 |
+
gamma=2.0,
|
| 33 |
+
alpha=0.25,
|
| 34 |
+
weight=None,
|
| 35 |
+
reduction='mean'):
|
| 36 |
+
|
| 37 |
+
assert isinstance(target, (torch.LongTensor, torch.cuda.LongTensor))
|
| 38 |
+
assert input.dim() == 2
|
| 39 |
+
assert target.dim() == 1
|
| 40 |
+
assert input.size(0) == target.size(0)
|
| 41 |
+
if weight is None:
|
| 42 |
+
weight = input.new_empty(0)
|
| 43 |
+
else:
|
| 44 |
+
assert weight.dim() == 1
|
| 45 |
+
assert input.size(1) == weight.size(0)
|
| 46 |
+
ctx.reduction_dict = {'none': 0, 'mean': 1, 'sum': 2}
|
| 47 |
+
assert reduction in ctx.reduction_dict.keys()
|
| 48 |
+
|
| 49 |
+
ctx.gamma = float(gamma)
|
| 50 |
+
ctx.alpha = float(alpha)
|
| 51 |
+
ctx.reduction = ctx.reduction_dict[reduction]
|
| 52 |
+
|
| 53 |
+
output = input.new_zeros(input.size())
|
| 54 |
+
|
| 55 |
+
ext_module.sigmoid_focal_loss_forward(
|
| 56 |
+
input, target, weight, output, gamma=ctx.gamma, alpha=ctx.alpha)
|
| 57 |
+
if ctx.reduction == ctx.reduction_dict['mean']:
|
| 58 |
+
output = output.sum() / input.size(0)
|
| 59 |
+
elif ctx.reduction == ctx.reduction_dict['sum']:
|
| 60 |
+
output = output.sum()
|
| 61 |
+
ctx.save_for_backward(input, target, weight)
|
| 62 |
+
return output
|
| 63 |
+
|
| 64 |
+
@staticmethod
|
| 65 |
+
@once_differentiable
|
| 66 |
+
def backward(ctx, grad_output):
|
| 67 |
+
input, target, weight = ctx.saved_tensors
|
| 68 |
+
|
| 69 |
+
grad_input = input.new_zeros(input.size())
|
| 70 |
+
|
| 71 |
+
ext_module.sigmoid_focal_loss_backward(
|
| 72 |
+
input,
|
| 73 |
+
target,
|
| 74 |
+
weight,
|
| 75 |
+
grad_input,
|
| 76 |
+
gamma=ctx.gamma,
|
| 77 |
+
alpha=ctx.alpha)
|
| 78 |
+
|
| 79 |
+
grad_input *= grad_output
|
| 80 |
+
if ctx.reduction == ctx.reduction_dict['mean']:
|
| 81 |
+
grad_input /= input.size(0)
|
| 82 |
+
return grad_input, None, None, None, None, None
|
| 83 |
+
|
| 84 |
+
|
| 85 |
+
sigmoid_focal_loss = SigmoidFocalLossFunction.apply
|
| 86 |
+
|
| 87 |
+
|
| 88 |
+
class SigmoidFocalLoss(nn.Module):
|
| 89 |
+
|
| 90 |
+
def __init__(self, gamma, alpha, weight=None, reduction='mean'):
|
| 91 |
+
super(SigmoidFocalLoss, self).__init__()
|
| 92 |
+
self.gamma = gamma
|
| 93 |
+
self.alpha = alpha
|
| 94 |
+
self.register_buffer('weight', weight)
|
| 95 |
+
self.reduction = reduction
|
| 96 |
+
|
| 97 |
+
def forward(self, input, target):
|
| 98 |
+
return sigmoid_focal_loss(input, target, self.gamma, self.alpha,
|
| 99 |
+
self.weight, self.reduction)
|
| 100 |
+
|
| 101 |
+
def __repr__(self):
|
| 102 |
+
s = self.__class__.__name__
|
| 103 |
+
s += f'(gamma={self.gamma}, '
|
| 104 |
+
s += f'alpha={self.alpha}, '
|
| 105 |
+
s += f'reduction={self.reduction})'
|
| 106 |
+
return s
|
| 107 |
+
|
| 108 |
+
|
| 109 |
+
class SoftmaxFocalLossFunction(Function):
|
| 110 |
+
|
| 111 |
+
@staticmethod
|
| 112 |
+
def symbolic(g, input, target, gamma, alpha, weight, reduction):
|
| 113 |
+
return g.op(
|
| 114 |
+
'mmcv::MMCVSoftmaxFocalLoss',
|
| 115 |
+
input,
|
| 116 |
+
target,
|
| 117 |
+
gamma_f=gamma,
|
| 118 |
+
alpha_f=alpha,
|
| 119 |
+
weight_f=weight,
|
| 120 |
+
reduction_s=reduction)
|
| 121 |
+
|
| 122 |
+
@staticmethod
|
| 123 |
+
def forward(ctx,
|
| 124 |
+
input,
|
| 125 |
+
target,
|
| 126 |
+
gamma=2.0,
|
| 127 |
+
alpha=0.25,
|
| 128 |
+
weight=None,
|
| 129 |
+
reduction='mean'):
|
| 130 |
+
|
| 131 |
+
assert isinstance(target, (torch.LongTensor, torch.cuda.LongTensor))
|
| 132 |
+
assert input.dim() == 2
|
| 133 |
+
assert target.dim() == 1
|
| 134 |
+
assert input.size(0) == target.size(0)
|
| 135 |
+
if weight is None:
|
| 136 |
+
weight = input.new_empty(0)
|
| 137 |
+
else:
|
| 138 |
+
assert weight.dim() == 1
|
| 139 |
+
assert input.size(1) == weight.size(0)
|
| 140 |
+
ctx.reduction_dict = {'none': 0, 'mean': 1, 'sum': 2}
|
| 141 |
+
assert reduction in ctx.reduction_dict.keys()
|
| 142 |
+
|
| 143 |
+
ctx.gamma = float(gamma)
|
| 144 |
+
ctx.alpha = float(alpha)
|
| 145 |
+
ctx.reduction = ctx.reduction_dict[reduction]
|
| 146 |
+
|
| 147 |
+
channel_stats, _ = torch.max(input, dim=1)
|
| 148 |
+
input_softmax = input - channel_stats.unsqueeze(1).expand_as(input)
|
| 149 |
+
input_softmax.exp_()
|
| 150 |
+
|
| 151 |
+
channel_stats = input_softmax.sum(dim=1)
|
| 152 |
+
input_softmax /= channel_stats.unsqueeze(1).expand_as(input)
|
| 153 |
+
|
| 154 |
+
output = input.new_zeros(input.size(0))
|
| 155 |
+
ext_module.softmax_focal_loss_forward(
|
| 156 |
+
input_softmax,
|
| 157 |
+
target,
|
| 158 |
+
weight,
|
| 159 |
+
output,
|
| 160 |
+
gamma=ctx.gamma,
|
| 161 |
+
alpha=ctx.alpha)
|
| 162 |
+
|
| 163 |
+
if ctx.reduction == ctx.reduction_dict['mean']:
|
| 164 |
+
output = output.sum() / input.size(0)
|
| 165 |
+
elif ctx.reduction == ctx.reduction_dict['sum']:
|
| 166 |
+
output = output.sum()
|
| 167 |
+
ctx.save_for_backward(input_softmax, target, weight)
|
| 168 |
+
return output
|
| 169 |
+
|
| 170 |
+
@staticmethod
|
| 171 |
+
def backward(ctx, grad_output):
|
| 172 |
+
input_softmax, target, weight = ctx.saved_tensors
|
| 173 |
+
buff = input_softmax.new_zeros(input_softmax.size(0))
|
| 174 |
+
grad_input = input_softmax.new_zeros(input_softmax.size())
|
| 175 |
+
|
| 176 |
+
ext_module.softmax_focal_loss_backward(
|
| 177 |
+
input_softmax,
|
| 178 |
+
target,
|
| 179 |
+
weight,
|
| 180 |
+
buff,
|
| 181 |
+
grad_input,
|
| 182 |
+
gamma=ctx.gamma,
|
| 183 |
+
alpha=ctx.alpha)
|
| 184 |
+
|
| 185 |
+
grad_input *= grad_output
|
| 186 |
+
if ctx.reduction == ctx.reduction_dict['mean']:
|
| 187 |
+
grad_input /= input_softmax.size(0)
|
| 188 |
+
return grad_input, None, None, None, None, None
|
| 189 |
+
|
| 190 |
+
|
| 191 |
+
softmax_focal_loss = SoftmaxFocalLossFunction.apply
|
| 192 |
+
|
| 193 |
+
|
| 194 |
+
class SoftmaxFocalLoss(nn.Module):
|
| 195 |
+
|
| 196 |
+
def __init__(self, gamma, alpha, weight=None, reduction='mean'):
|
| 197 |
+
super(SoftmaxFocalLoss, self).__init__()
|
| 198 |
+
self.gamma = gamma
|
| 199 |
+
self.alpha = alpha
|
| 200 |
+
self.register_buffer('weight', weight)
|
| 201 |
+
self.reduction = reduction
|
| 202 |
+
|
| 203 |
+
def forward(self, input, target):
|
| 204 |
+
return softmax_focal_loss(input, target, self.gamma, self.alpha,
|
| 205 |
+
self.weight, self.reduction)
|
| 206 |
+
|
| 207 |
+
def __repr__(self):
|
| 208 |
+
s = self.__class__.__name__
|
| 209 |
+
s += f'(gamma={self.gamma}, '
|
| 210 |
+
s += f'alpha={self.alpha}, '
|
| 211 |
+
s += f'reduction={self.reduction})'
|
| 212 |
+
return s
|
RAVE-main/annotator/mmpkg/mmcv/ops/gather_points.py
ADDED
|
@@ -0,0 +1,57 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
from torch.autograd import Function
|
| 3 |
+
|
| 4 |
+
from ..utils import ext_loader
|
| 5 |
+
|
| 6 |
+
ext_module = ext_loader.load_ext(
|
| 7 |
+
'_ext', ['gather_points_forward', 'gather_points_backward'])
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
class GatherPoints(Function):
|
| 11 |
+
"""Gather points with given index."""
|
| 12 |
+
|
| 13 |
+
@staticmethod
|
| 14 |
+
def forward(ctx, features: torch.Tensor,
|
| 15 |
+
indices: torch.Tensor) -> torch.Tensor:
|
| 16 |
+
"""
|
| 17 |
+
Args:
|
| 18 |
+
features (Tensor): (B, C, N) features to gather.
|
| 19 |
+
indices (Tensor): (B, M) where M is the number of points.
|
| 20 |
+
|
| 21 |
+
Returns:
|
| 22 |
+
Tensor: (B, C, M) where M is the number of points.
|
| 23 |
+
"""
|
| 24 |
+
assert features.is_contiguous()
|
| 25 |
+
assert indices.is_contiguous()
|
| 26 |
+
|
| 27 |
+
B, npoint = indices.size()
|
| 28 |
+
_, C, N = features.size()
|
| 29 |
+
output = torch.cuda.FloatTensor(B, C, npoint)
|
| 30 |
+
|
| 31 |
+
ext_module.gather_points_forward(
|
| 32 |
+
features, indices, output, b=B, c=C, n=N, npoints=npoint)
|
| 33 |
+
|
| 34 |
+
ctx.for_backwards = (indices, C, N)
|
| 35 |
+
if torch.__version__ != 'parrots':
|
| 36 |
+
ctx.mark_non_differentiable(indices)
|
| 37 |
+
return output
|
| 38 |
+
|
| 39 |
+
@staticmethod
|
| 40 |
+
def backward(ctx, grad_out):
|
| 41 |
+
idx, C, N = ctx.for_backwards
|
| 42 |
+
B, npoint = idx.size()
|
| 43 |
+
|
| 44 |
+
grad_features = torch.cuda.FloatTensor(B, C, N).zero_()
|
| 45 |
+
grad_out_data = grad_out.data.contiguous()
|
| 46 |
+
ext_module.gather_points_backward(
|
| 47 |
+
grad_out_data,
|
| 48 |
+
idx,
|
| 49 |
+
grad_features.data,
|
| 50 |
+
b=B,
|
| 51 |
+
c=C,
|
| 52 |
+
n=N,
|
| 53 |
+
npoints=npoint)
|
| 54 |
+
return grad_features, None
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
gather_points = GatherPoints.apply
|
RAVE-main/annotator/mmpkg/mmcv/ops/iou3d.py
ADDED
|
@@ -0,0 +1,85 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
import torch
|
| 3 |
+
|
| 4 |
+
from ..utils import ext_loader
|
| 5 |
+
|
| 6 |
+
ext_module = ext_loader.load_ext('_ext', [
|
| 7 |
+
'iou3d_boxes_iou_bev_forward', 'iou3d_nms_forward',
|
| 8 |
+
'iou3d_nms_normal_forward'
|
| 9 |
+
])
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
def boxes_iou_bev(boxes_a, boxes_b):
|
| 13 |
+
"""Calculate boxes IoU in the Bird's Eye View.
|
| 14 |
+
|
| 15 |
+
Args:
|
| 16 |
+
boxes_a (torch.Tensor): Input boxes a with shape (M, 5).
|
| 17 |
+
boxes_b (torch.Tensor): Input boxes b with shape (N, 5).
|
| 18 |
+
|
| 19 |
+
Returns:
|
| 20 |
+
ans_iou (torch.Tensor): IoU result with shape (M, N).
|
| 21 |
+
"""
|
| 22 |
+
ans_iou = boxes_a.new_zeros(
|
| 23 |
+
torch.Size((boxes_a.shape[0], boxes_b.shape[0])))
|
| 24 |
+
|
| 25 |
+
ext_module.iou3d_boxes_iou_bev_forward(boxes_a.contiguous(),
|
| 26 |
+
boxes_b.contiguous(), ans_iou)
|
| 27 |
+
|
| 28 |
+
return ans_iou
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
def nms_bev(boxes, scores, thresh, pre_max_size=None, post_max_size=None):
|
| 32 |
+
"""NMS function GPU implementation (for BEV boxes). The overlap of two
|
| 33 |
+
boxes for IoU calculation is defined as the exact overlapping area of the
|
| 34 |
+
two boxes. In this function, one can also set ``pre_max_size`` and
|
| 35 |
+
``post_max_size``.
|
| 36 |
+
|
| 37 |
+
Args:
|
| 38 |
+
boxes (torch.Tensor): Input boxes with the shape of [N, 5]
|
| 39 |
+
([x1, y1, x2, y2, ry]).
|
| 40 |
+
scores (torch.Tensor): Scores of boxes with the shape of [N].
|
| 41 |
+
thresh (float): Overlap threshold of NMS.
|
| 42 |
+
pre_max_size (int, optional): Max size of boxes before NMS.
|
| 43 |
+
Default: None.
|
| 44 |
+
post_max_size (int, optional): Max size of boxes after NMS.
|
| 45 |
+
Default: None.
|
| 46 |
+
|
| 47 |
+
Returns:
|
| 48 |
+
torch.Tensor: Indexes after NMS.
|
| 49 |
+
"""
|
| 50 |
+
assert boxes.size(1) == 5, 'Input boxes shape should be [N, 5]'
|
| 51 |
+
order = scores.sort(0, descending=True)[1]
|
| 52 |
+
|
| 53 |
+
if pre_max_size is not None:
|
| 54 |
+
order = order[:pre_max_size]
|
| 55 |
+
boxes = boxes[order].contiguous()
|
| 56 |
+
|
| 57 |
+
keep = torch.zeros(boxes.size(0), dtype=torch.long)
|
| 58 |
+
num_out = ext_module.iou3d_nms_forward(boxes, keep, thresh)
|
| 59 |
+
keep = order[keep[:num_out].cuda(boxes.device)].contiguous()
|
| 60 |
+
if post_max_size is not None:
|
| 61 |
+
keep = keep[:post_max_size]
|
| 62 |
+
return keep
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
def nms_normal_bev(boxes, scores, thresh):
|
| 66 |
+
"""Normal NMS function GPU implementation (for BEV boxes). The overlap of
|
| 67 |
+
two boxes for IoU calculation is defined as the exact overlapping area of
|
| 68 |
+
the two boxes WITH their yaw angle set to 0.
|
| 69 |
+
|
| 70 |
+
Args:
|
| 71 |
+
boxes (torch.Tensor): Input boxes with shape (N, 5).
|
| 72 |
+
scores (torch.Tensor): Scores of predicted boxes with shape (N).
|
| 73 |
+
thresh (float): Overlap threshold of NMS.
|
| 74 |
+
|
| 75 |
+
Returns:
|
| 76 |
+
torch.Tensor: Remaining indices with scores in descending order.
|
| 77 |
+
"""
|
| 78 |
+
assert boxes.shape[1] == 5, 'Input boxes shape should be [N, 5]'
|
| 79 |
+
order = scores.sort(0, descending=True)[1]
|
| 80 |
+
|
| 81 |
+
boxes = boxes[order].contiguous()
|
| 82 |
+
|
| 83 |
+
keep = torch.zeros(boxes.size(0), dtype=torch.long)
|
| 84 |
+
num_out = ext_module.iou3d_nms_normal_forward(boxes, keep, thresh)
|
| 85 |
+
return order[keep[:num_out].cuda(boxes.device)].contiguous()
|
RAVE-main/annotator/mmpkg/mmcv/ops/knn.py
ADDED
|
@@ -0,0 +1,77 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
from torch.autograd import Function
|
| 3 |
+
|
| 4 |
+
from ..utils import ext_loader
|
| 5 |
+
|
| 6 |
+
ext_module = ext_loader.load_ext('_ext', ['knn_forward'])
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
class KNN(Function):
|
| 10 |
+
r"""KNN (CUDA) based on heap data structure.
|
| 11 |
+
Modified from `PAConv <https://github.com/CVMI-Lab/PAConv/tree/main/
|
| 12 |
+
scene_seg/lib/pointops/src/knnquery_heap>`_.
|
| 13 |
+
|
| 14 |
+
Find k-nearest points.
|
| 15 |
+
"""
|
| 16 |
+
|
| 17 |
+
@staticmethod
|
| 18 |
+
def forward(ctx,
|
| 19 |
+
k: int,
|
| 20 |
+
xyz: torch.Tensor,
|
| 21 |
+
center_xyz: torch.Tensor = None,
|
| 22 |
+
transposed: bool = False) -> torch.Tensor:
|
| 23 |
+
"""
|
| 24 |
+
Args:
|
| 25 |
+
k (int): number of nearest neighbors.
|
| 26 |
+
xyz (Tensor): (B, N, 3) if transposed == False, else (B, 3, N).
|
| 27 |
+
xyz coordinates of the features.
|
| 28 |
+
center_xyz (Tensor, optional): (B, npoint, 3) if transposed ==
|
| 29 |
+
False, else (B, 3, npoint). centers of the knn query.
|
| 30 |
+
Default: None.
|
| 31 |
+
transposed (bool, optional): whether the input tensors are
|
| 32 |
+
transposed. Should not explicitly use this keyword when
|
| 33 |
+
calling knn (=KNN.apply), just add the fourth param.
|
| 34 |
+
Default: False.
|
| 35 |
+
|
| 36 |
+
Returns:
|
| 37 |
+
Tensor: (B, k, npoint) tensor with the indices of
|
| 38 |
+
the features that form k-nearest neighbours.
|
| 39 |
+
"""
|
| 40 |
+
assert (k > 0) & (k < 100), 'k should be in range(0, 100)'
|
| 41 |
+
|
| 42 |
+
if center_xyz is None:
|
| 43 |
+
center_xyz = xyz
|
| 44 |
+
|
| 45 |
+
if transposed:
|
| 46 |
+
xyz = xyz.transpose(2, 1).contiguous()
|
| 47 |
+
center_xyz = center_xyz.transpose(2, 1).contiguous()
|
| 48 |
+
|
| 49 |
+
assert xyz.is_contiguous() # [B, N, 3]
|
| 50 |
+
assert center_xyz.is_contiguous() # [B, npoint, 3]
|
| 51 |
+
|
| 52 |
+
center_xyz_device = center_xyz.get_device()
|
| 53 |
+
assert center_xyz_device == xyz.get_device(), \
|
| 54 |
+
'center_xyz and xyz should be put on the same device'
|
| 55 |
+
if torch.cuda.current_device() != center_xyz_device:
|
| 56 |
+
torch.cuda.set_device(center_xyz_device)
|
| 57 |
+
|
| 58 |
+
B, npoint, _ = center_xyz.shape
|
| 59 |
+
N = xyz.shape[1]
|
| 60 |
+
|
| 61 |
+
idx = center_xyz.new_zeros((B, npoint, k)).int()
|
| 62 |
+
dist2 = center_xyz.new_zeros((B, npoint, k)).float()
|
| 63 |
+
|
| 64 |
+
ext_module.knn_forward(
|
| 65 |
+
xyz, center_xyz, idx, dist2, b=B, n=N, m=npoint, nsample=k)
|
| 66 |
+
# idx shape to [B, k, npoint]
|
| 67 |
+
idx = idx.transpose(2, 1).contiguous()
|
| 68 |
+
if torch.__version__ != 'parrots':
|
| 69 |
+
ctx.mark_non_differentiable(idx)
|
| 70 |
+
return idx
|
| 71 |
+
|
| 72 |
+
@staticmethod
|
| 73 |
+
def backward(ctx, a=None):
|
| 74 |
+
return None, None, None
|
| 75 |
+
|
| 76 |
+
|
| 77 |
+
knn = KNN.apply
|
RAVE-main/annotator/mmpkg/mmcv/ops/multi_scale_deform_attn.py
ADDED
|
@@ -0,0 +1,358 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
import math
|
| 3 |
+
import warnings
|
| 4 |
+
|
| 5 |
+
import torch
|
| 6 |
+
import torch.nn as nn
|
| 7 |
+
import torch.nn.functional as F
|
| 8 |
+
from torch.autograd.function import Function, once_differentiable
|
| 9 |
+
|
| 10 |
+
from annotator.mmpkg.mmcv import deprecated_api_warning
|
| 11 |
+
from annotator.mmpkg.mmcv.cnn import constant_init, xavier_init
|
| 12 |
+
from annotator.mmpkg.mmcv.cnn.bricks.registry import ATTENTION
|
| 13 |
+
from annotator.mmpkg.mmcv.runner import BaseModule
|
| 14 |
+
from ..utils import ext_loader
|
| 15 |
+
|
| 16 |
+
ext_module = ext_loader.load_ext(
|
| 17 |
+
'_ext', ['ms_deform_attn_backward', 'ms_deform_attn_forward'])
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
class MultiScaleDeformableAttnFunction(Function):
|
| 21 |
+
|
| 22 |
+
@staticmethod
|
| 23 |
+
def forward(ctx, value, value_spatial_shapes, value_level_start_index,
|
| 24 |
+
sampling_locations, attention_weights, im2col_step):
|
| 25 |
+
"""GPU version of multi-scale deformable attention.
|
| 26 |
+
|
| 27 |
+
Args:
|
| 28 |
+
value (Tensor): The value has shape
|
| 29 |
+
(bs, num_keys, mum_heads, embed_dims//num_heads)
|
| 30 |
+
value_spatial_shapes (Tensor): Spatial shape of
|
| 31 |
+
each feature map, has shape (num_levels, 2),
|
| 32 |
+
last dimension 2 represent (h, w)
|
| 33 |
+
sampling_locations (Tensor): The location of sampling points,
|
| 34 |
+
has shape
|
| 35 |
+
(bs ,num_queries, num_heads, num_levels, num_points, 2),
|
| 36 |
+
the last dimension 2 represent (x, y).
|
| 37 |
+
attention_weights (Tensor): The weight of sampling points used
|
| 38 |
+
when calculate the attention, has shape
|
| 39 |
+
(bs ,num_queries, num_heads, num_levels, num_points),
|
| 40 |
+
im2col_step (Tensor): The step used in image to column.
|
| 41 |
+
|
| 42 |
+
Returns:
|
| 43 |
+
Tensor: has shape (bs, num_queries, embed_dims)
|
| 44 |
+
"""
|
| 45 |
+
|
| 46 |
+
ctx.im2col_step = im2col_step
|
| 47 |
+
output = ext_module.ms_deform_attn_forward(
|
| 48 |
+
value,
|
| 49 |
+
value_spatial_shapes,
|
| 50 |
+
value_level_start_index,
|
| 51 |
+
sampling_locations,
|
| 52 |
+
attention_weights,
|
| 53 |
+
im2col_step=ctx.im2col_step)
|
| 54 |
+
ctx.save_for_backward(value, value_spatial_shapes,
|
| 55 |
+
value_level_start_index, sampling_locations,
|
| 56 |
+
attention_weights)
|
| 57 |
+
return output
|
| 58 |
+
|
| 59 |
+
@staticmethod
|
| 60 |
+
@once_differentiable
|
| 61 |
+
def backward(ctx, grad_output):
|
| 62 |
+
"""GPU version of backward function.
|
| 63 |
+
|
| 64 |
+
Args:
|
| 65 |
+
grad_output (Tensor): Gradient
|
| 66 |
+
of output tensor of forward.
|
| 67 |
+
|
| 68 |
+
Returns:
|
| 69 |
+
Tuple[Tensor]: Gradient
|
| 70 |
+
of input tensors in forward.
|
| 71 |
+
"""
|
| 72 |
+
value, value_spatial_shapes, value_level_start_index,\
|
| 73 |
+
sampling_locations, attention_weights = ctx.saved_tensors
|
| 74 |
+
grad_value = torch.zeros_like(value)
|
| 75 |
+
grad_sampling_loc = torch.zeros_like(sampling_locations)
|
| 76 |
+
grad_attn_weight = torch.zeros_like(attention_weights)
|
| 77 |
+
|
| 78 |
+
ext_module.ms_deform_attn_backward(
|
| 79 |
+
value,
|
| 80 |
+
value_spatial_shapes,
|
| 81 |
+
value_level_start_index,
|
| 82 |
+
sampling_locations,
|
| 83 |
+
attention_weights,
|
| 84 |
+
grad_output.contiguous(),
|
| 85 |
+
grad_value,
|
| 86 |
+
grad_sampling_loc,
|
| 87 |
+
grad_attn_weight,
|
| 88 |
+
im2col_step=ctx.im2col_step)
|
| 89 |
+
|
| 90 |
+
return grad_value, None, None, \
|
| 91 |
+
grad_sampling_loc, grad_attn_weight, None
|
| 92 |
+
|
| 93 |
+
|
| 94 |
+
def multi_scale_deformable_attn_pytorch(value, value_spatial_shapes,
|
| 95 |
+
sampling_locations, attention_weights):
|
| 96 |
+
"""CPU version of multi-scale deformable attention.
|
| 97 |
+
|
| 98 |
+
Args:
|
| 99 |
+
value (Tensor): The value has shape
|
| 100 |
+
(bs, num_keys, mum_heads, embed_dims//num_heads)
|
| 101 |
+
value_spatial_shapes (Tensor): Spatial shape of
|
| 102 |
+
each feature map, has shape (num_levels, 2),
|
| 103 |
+
last dimension 2 represent (h, w)
|
| 104 |
+
sampling_locations (Tensor): The location of sampling points,
|
| 105 |
+
has shape
|
| 106 |
+
(bs ,num_queries, num_heads, num_levels, num_points, 2),
|
| 107 |
+
the last dimension 2 represent (x, y).
|
| 108 |
+
attention_weights (Tensor): The weight of sampling points used
|
| 109 |
+
when calculate the attention, has shape
|
| 110 |
+
(bs ,num_queries, num_heads, num_levels, num_points),
|
| 111 |
+
|
| 112 |
+
Returns:
|
| 113 |
+
Tensor: has shape (bs, num_queries, embed_dims)
|
| 114 |
+
"""
|
| 115 |
+
|
| 116 |
+
bs, _, num_heads, embed_dims = value.shape
|
| 117 |
+
_, num_queries, num_heads, num_levels, num_points, _ =\
|
| 118 |
+
sampling_locations.shape
|
| 119 |
+
value_list = value.split([H_ * W_ for H_, W_ in value_spatial_shapes],
|
| 120 |
+
dim=1)
|
| 121 |
+
sampling_grids = 2 * sampling_locations - 1
|
| 122 |
+
sampling_value_list = []
|
| 123 |
+
for level, (H_, W_) in enumerate(value_spatial_shapes):
|
| 124 |
+
# bs, H_*W_, num_heads, embed_dims ->
|
| 125 |
+
# bs, H_*W_, num_heads*embed_dims ->
|
| 126 |
+
# bs, num_heads*embed_dims, H_*W_ ->
|
| 127 |
+
# bs*num_heads, embed_dims, H_, W_
|
| 128 |
+
value_l_ = value_list[level].flatten(2).transpose(1, 2).reshape(
|
| 129 |
+
bs * num_heads, embed_dims, H_, W_)
|
| 130 |
+
# bs, num_queries, num_heads, num_points, 2 ->
|
| 131 |
+
# bs, num_heads, num_queries, num_points, 2 ->
|
| 132 |
+
# bs*num_heads, num_queries, num_points, 2
|
| 133 |
+
sampling_grid_l_ = sampling_grids[:, :, :,
|
| 134 |
+
level].transpose(1, 2).flatten(0, 1)
|
| 135 |
+
# bs*num_heads, embed_dims, num_queries, num_points
|
| 136 |
+
sampling_value_l_ = F.grid_sample(
|
| 137 |
+
value_l_,
|
| 138 |
+
sampling_grid_l_,
|
| 139 |
+
mode='bilinear',
|
| 140 |
+
padding_mode='zeros',
|
| 141 |
+
align_corners=False)
|
| 142 |
+
sampling_value_list.append(sampling_value_l_)
|
| 143 |
+
# (bs, num_queries, num_heads, num_levels, num_points) ->
|
| 144 |
+
# (bs, num_heads, num_queries, num_levels, num_points) ->
|
| 145 |
+
# (bs, num_heads, 1, num_queries, num_levels*num_points)
|
| 146 |
+
attention_weights = attention_weights.transpose(1, 2).reshape(
|
| 147 |
+
bs * num_heads, 1, num_queries, num_levels * num_points)
|
| 148 |
+
output = (torch.stack(sampling_value_list, dim=-2).flatten(-2) *
|
| 149 |
+
attention_weights).sum(-1).view(bs, num_heads * embed_dims,
|
| 150 |
+
num_queries)
|
| 151 |
+
return output.transpose(1, 2).contiguous()
|
| 152 |
+
|
| 153 |
+
|
| 154 |
+
@ATTENTION.register_module()
|
| 155 |
+
class MultiScaleDeformableAttention(BaseModule):
|
| 156 |
+
"""An attention module used in Deformable-Detr.
|
| 157 |
+
|
| 158 |
+
`Deformable DETR: Deformable Transformers for End-to-End Object Detection.
|
| 159 |
+
<https://arxiv.org/pdf/2010.04159.pdf>`_.
|
| 160 |
+
|
| 161 |
+
Args:
|
| 162 |
+
embed_dims (int): The embedding dimension of Attention.
|
| 163 |
+
Default: 256.
|
| 164 |
+
num_heads (int): Parallel attention heads. Default: 64.
|
| 165 |
+
num_levels (int): The number of feature map used in
|
| 166 |
+
Attention. Default: 4.
|
| 167 |
+
num_points (int): The number of sampling points for
|
| 168 |
+
each query in each head. Default: 4.
|
| 169 |
+
im2col_step (int): The step used in image_to_column.
|
| 170 |
+
Default: 64.
|
| 171 |
+
dropout (float): A Dropout layer on `inp_identity`.
|
| 172 |
+
Default: 0.1.
|
| 173 |
+
batch_first (bool): Key, Query and Value are shape of
|
| 174 |
+
(batch, n, embed_dim)
|
| 175 |
+
or (n, batch, embed_dim). Default to False.
|
| 176 |
+
norm_cfg (dict): Config dict for normalization layer.
|
| 177 |
+
Default: None.
|
| 178 |
+
init_cfg (obj:`mmcv.ConfigDict`): The Config for initialization.
|
| 179 |
+
Default: None.
|
| 180 |
+
"""
|
| 181 |
+
|
| 182 |
+
def __init__(self,
|
| 183 |
+
embed_dims=256,
|
| 184 |
+
num_heads=8,
|
| 185 |
+
num_levels=4,
|
| 186 |
+
num_points=4,
|
| 187 |
+
im2col_step=64,
|
| 188 |
+
dropout=0.1,
|
| 189 |
+
batch_first=False,
|
| 190 |
+
norm_cfg=None,
|
| 191 |
+
init_cfg=None):
|
| 192 |
+
super().__init__(init_cfg)
|
| 193 |
+
if embed_dims % num_heads != 0:
|
| 194 |
+
raise ValueError(f'embed_dims must be divisible by num_heads, '
|
| 195 |
+
f'but got {embed_dims} and {num_heads}')
|
| 196 |
+
dim_per_head = embed_dims // num_heads
|
| 197 |
+
self.norm_cfg = norm_cfg
|
| 198 |
+
self.dropout = nn.Dropout(dropout)
|
| 199 |
+
self.batch_first = batch_first
|
| 200 |
+
|
| 201 |
+
# you'd better set dim_per_head to a power of 2
|
| 202 |
+
# which is more efficient in the CUDA implementation
|
| 203 |
+
def _is_power_of_2(n):
|
| 204 |
+
if (not isinstance(n, int)) or (n < 0):
|
| 205 |
+
raise ValueError(
|
| 206 |
+
'invalid input for _is_power_of_2: {} (type: {})'.format(
|
| 207 |
+
n, type(n)))
|
| 208 |
+
return (n & (n - 1) == 0) and n != 0
|
| 209 |
+
|
| 210 |
+
if not _is_power_of_2(dim_per_head):
|
| 211 |
+
warnings.warn(
|
| 212 |
+
"You'd better set embed_dims in "
|
| 213 |
+
'MultiScaleDeformAttention to make '
|
| 214 |
+
'the dimension of each attention head a power of 2 '
|
| 215 |
+
'which is more efficient in our CUDA implementation.')
|
| 216 |
+
|
| 217 |
+
self.im2col_step = im2col_step
|
| 218 |
+
self.embed_dims = embed_dims
|
| 219 |
+
self.num_levels = num_levels
|
| 220 |
+
self.num_heads = num_heads
|
| 221 |
+
self.num_points = num_points
|
| 222 |
+
self.sampling_offsets = nn.Linear(
|
| 223 |
+
embed_dims, num_heads * num_levels * num_points * 2)
|
| 224 |
+
self.attention_weights = nn.Linear(embed_dims,
|
| 225 |
+
num_heads * num_levels * num_points)
|
| 226 |
+
self.value_proj = nn.Linear(embed_dims, embed_dims)
|
| 227 |
+
self.output_proj = nn.Linear(embed_dims, embed_dims)
|
| 228 |
+
self.init_weights()
|
| 229 |
+
|
| 230 |
+
def init_weights(self):
|
| 231 |
+
"""Default initialization for Parameters of Module."""
|
| 232 |
+
constant_init(self.sampling_offsets, 0.)
|
| 233 |
+
thetas = torch.arange(
|
| 234 |
+
self.num_heads,
|
| 235 |
+
dtype=torch.float32) * (2.0 * math.pi / self.num_heads)
|
| 236 |
+
grid_init = torch.stack([thetas.cos(), thetas.sin()], -1)
|
| 237 |
+
grid_init = (grid_init /
|
| 238 |
+
grid_init.abs().max(-1, keepdim=True)[0]).view(
|
| 239 |
+
self.num_heads, 1, 1,
|
| 240 |
+
2).repeat(1, self.num_levels, self.num_points, 1)
|
| 241 |
+
for i in range(self.num_points):
|
| 242 |
+
grid_init[:, :, i, :] *= i + 1
|
| 243 |
+
|
| 244 |
+
self.sampling_offsets.bias.data = grid_init.view(-1)
|
| 245 |
+
constant_init(self.attention_weights, val=0., bias=0.)
|
| 246 |
+
xavier_init(self.value_proj, distribution='uniform', bias=0.)
|
| 247 |
+
xavier_init(self.output_proj, distribution='uniform', bias=0.)
|
| 248 |
+
self._is_init = True
|
| 249 |
+
|
| 250 |
+
@deprecated_api_warning({'residual': 'identity'},
|
| 251 |
+
cls_name='MultiScaleDeformableAttention')
|
| 252 |
+
def forward(self,
|
| 253 |
+
query,
|
| 254 |
+
key=None,
|
| 255 |
+
value=None,
|
| 256 |
+
identity=None,
|
| 257 |
+
query_pos=None,
|
| 258 |
+
key_padding_mask=None,
|
| 259 |
+
reference_points=None,
|
| 260 |
+
spatial_shapes=None,
|
| 261 |
+
level_start_index=None,
|
| 262 |
+
**kwargs):
|
| 263 |
+
"""Forward Function of MultiScaleDeformAttention.
|
| 264 |
+
|
| 265 |
+
Args:
|
| 266 |
+
query (Tensor): Query of Transformer with shape
|
| 267 |
+
(num_query, bs, embed_dims).
|
| 268 |
+
key (Tensor): The key tensor with shape
|
| 269 |
+
`(num_key, bs, embed_dims)`.
|
| 270 |
+
value (Tensor): The value tensor with shape
|
| 271 |
+
`(num_key, bs, embed_dims)`.
|
| 272 |
+
identity (Tensor): The tensor used for addition, with the
|
| 273 |
+
same shape as `query`. Default None. If None,
|
| 274 |
+
`query` will be used.
|
| 275 |
+
query_pos (Tensor): The positional encoding for `query`.
|
| 276 |
+
Default: None.
|
| 277 |
+
key_pos (Tensor): The positional encoding for `key`. Default
|
| 278 |
+
None.
|
| 279 |
+
reference_points (Tensor): The normalized reference
|
| 280 |
+
points with shape (bs, num_query, num_levels, 2),
|
| 281 |
+
all elements is range in [0, 1], top-left (0,0),
|
| 282 |
+
bottom-right (1, 1), including padding area.
|
| 283 |
+
or (N, Length_{query}, num_levels, 4), add
|
| 284 |
+
additional two dimensions is (w, h) to
|
| 285 |
+
form reference boxes.
|
| 286 |
+
key_padding_mask (Tensor): ByteTensor for `query`, with
|
| 287 |
+
shape [bs, num_key].
|
| 288 |
+
spatial_shapes (Tensor): Spatial shape of features in
|
| 289 |
+
different levels. With shape (num_levels, 2),
|
| 290 |
+
last dimension represents (h, w).
|
| 291 |
+
level_start_index (Tensor): The start index of each level.
|
| 292 |
+
A tensor has shape ``(num_levels, )`` and can be represented
|
| 293 |
+
as [0, h_0*w_0, h_0*w_0+h_1*w_1, ...].
|
| 294 |
+
|
| 295 |
+
Returns:
|
| 296 |
+
Tensor: forwarded results with shape [num_query, bs, embed_dims].
|
| 297 |
+
"""
|
| 298 |
+
|
| 299 |
+
if value is None:
|
| 300 |
+
value = query
|
| 301 |
+
|
| 302 |
+
if identity is None:
|
| 303 |
+
identity = query
|
| 304 |
+
if query_pos is not None:
|
| 305 |
+
query = query + query_pos
|
| 306 |
+
if not self.batch_first:
|
| 307 |
+
# change to (bs, num_query ,embed_dims)
|
| 308 |
+
query = query.permute(1, 0, 2)
|
| 309 |
+
value = value.permute(1, 0, 2)
|
| 310 |
+
|
| 311 |
+
bs, num_query, _ = query.shape
|
| 312 |
+
bs, num_value, _ = value.shape
|
| 313 |
+
assert (spatial_shapes[:, 0] * spatial_shapes[:, 1]).sum() == num_value
|
| 314 |
+
|
| 315 |
+
value = self.value_proj(value)
|
| 316 |
+
if key_padding_mask is not None:
|
| 317 |
+
value = value.masked_fill(key_padding_mask[..., None], 0.0)
|
| 318 |
+
value = value.view(bs, num_value, self.num_heads, -1)
|
| 319 |
+
sampling_offsets = self.sampling_offsets(query).view(
|
| 320 |
+
bs, num_query, self.num_heads, self.num_levels, self.num_points, 2)
|
| 321 |
+
attention_weights = self.attention_weights(query).view(
|
| 322 |
+
bs, num_query, self.num_heads, self.num_levels * self.num_points)
|
| 323 |
+
attention_weights = attention_weights.softmax(-1)
|
| 324 |
+
|
| 325 |
+
attention_weights = attention_weights.view(bs, num_query,
|
| 326 |
+
self.num_heads,
|
| 327 |
+
self.num_levels,
|
| 328 |
+
self.num_points)
|
| 329 |
+
if reference_points.shape[-1] == 2:
|
| 330 |
+
offset_normalizer = torch.stack(
|
| 331 |
+
[spatial_shapes[..., 1], spatial_shapes[..., 0]], -1)
|
| 332 |
+
sampling_locations = reference_points[:, :, None, :, None, :] \
|
| 333 |
+
+ sampling_offsets \
|
| 334 |
+
/ offset_normalizer[None, None, None, :, None, :]
|
| 335 |
+
elif reference_points.shape[-1] == 4:
|
| 336 |
+
sampling_locations = reference_points[:, :, None, :, None, :2] \
|
| 337 |
+
+ sampling_offsets / self.num_points \
|
| 338 |
+
* reference_points[:, :, None, :, None, 2:] \
|
| 339 |
+
* 0.5
|
| 340 |
+
else:
|
| 341 |
+
raise ValueError(
|
| 342 |
+
f'Last dim of reference_points must be'
|
| 343 |
+
f' 2 or 4, but get {reference_points.shape[-1]} instead.')
|
| 344 |
+
if torch.cuda.is_available() and value.is_cuda:
|
| 345 |
+
output = MultiScaleDeformableAttnFunction.apply(
|
| 346 |
+
value, spatial_shapes, level_start_index, sampling_locations,
|
| 347 |
+
attention_weights, self.im2col_step)
|
| 348 |
+
else:
|
| 349 |
+
output = multi_scale_deformable_attn_pytorch(
|
| 350 |
+
value, spatial_shapes, sampling_locations, attention_weights)
|
| 351 |
+
|
| 352 |
+
output = self.output_proj(output)
|
| 353 |
+
|
| 354 |
+
if not self.batch_first:
|
| 355 |
+
# (num_query, bs ,embed_dims)
|
| 356 |
+
output = output.permute(1, 0, 2)
|
| 357 |
+
|
| 358 |
+
return self.dropout(output) + identity
|
RAVE-main/annotator/mmpkg/mmcv/ops/pixel_group.py
ADDED
|
@@ -0,0 +1,75 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
import numpy as np
|
| 3 |
+
import torch
|
| 4 |
+
|
| 5 |
+
from ..utils import ext_loader
|
| 6 |
+
|
| 7 |
+
ext_module = ext_loader.load_ext('_ext', ['pixel_group'])
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
def pixel_group(score, mask, embedding, kernel_label, kernel_contour,
|
| 11 |
+
kernel_region_num, distance_threshold):
|
| 12 |
+
"""Group pixels into text instances, which is widely used text detection
|
| 13 |
+
methods.
|
| 14 |
+
|
| 15 |
+
Arguments:
|
| 16 |
+
score (np.array or Tensor): The foreground score with size hxw.
|
| 17 |
+
mask (np.array or Tensor): The foreground mask with size hxw.
|
| 18 |
+
embedding (np.array or Tensor): The embedding with size hxwxc to
|
| 19 |
+
distinguish instances.
|
| 20 |
+
kernel_label (np.array or Tensor): The instance kernel index with
|
| 21 |
+
size hxw.
|
| 22 |
+
kernel_contour (np.array or Tensor): The kernel contour with size hxw.
|
| 23 |
+
kernel_region_num (int): The instance kernel region number.
|
| 24 |
+
distance_threshold (float): The embedding distance threshold between
|
| 25 |
+
kernel and pixel in one instance.
|
| 26 |
+
|
| 27 |
+
Returns:
|
| 28 |
+
pixel_assignment (List[List[float]]): The instance coordinate list.
|
| 29 |
+
Each element consists of averaged confidence, pixel number, and
|
| 30 |
+
coordinates (x_i, y_i for all pixels) in order.
|
| 31 |
+
"""
|
| 32 |
+
assert isinstance(score, (torch.Tensor, np.ndarray))
|
| 33 |
+
assert isinstance(mask, (torch.Tensor, np.ndarray))
|
| 34 |
+
assert isinstance(embedding, (torch.Tensor, np.ndarray))
|
| 35 |
+
assert isinstance(kernel_label, (torch.Tensor, np.ndarray))
|
| 36 |
+
assert isinstance(kernel_contour, (torch.Tensor, np.ndarray))
|
| 37 |
+
assert isinstance(kernel_region_num, int)
|
| 38 |
+
assert isinstance(distance_threshold, float)
|
| 39 |
+
|
| 40 |
+
if isinstance(score, np.ndarray):
|
| 41 |
+
score = torch.from_numpy(score)
|
| 42 |
+
if isinstance(mask, np.ndarray):
|
| 43 |
+
mask = torch.from_numpy(mask)
|
| 44 |
+
if isinstance(embedding, np.ndarray):
|
| 45 |
+
embedding = torch.from_numpy(embedding)
|
| 46 |
+
if isinstance(kernel_label, np.ndarray):
|
| 47 |
+
kernel_label = torch.from_numpy(kernel_label)
|
| 48 |
+
if isinstance(kernel_contour, np.ndarray):
|
| 49 |
+
kernel_contour = torch.from_numpy(kernel_contour)
|
| 50 |
+
|
| 51 |
+
if torch.__version__ == 'parrots':
|
| 52 |
+
label = ext_module.pixel_group(
|
| 53 |
+
score,
|
| 54 |
+
mask,
|
| 55 |
+
embedding,
|
| 56 |
+
kernel_label,
|
| 57 |
+
kernel_contour,
|
| 58 |
+
kernel_region_num=kernel_region_num,
|
| 59 |
+
distance_threshold=distance_threshold)
|
| 60 |
+
label = label.tolist()
|
| 61 |
+
label = label[0]
|
| 62 |
+
list_index = kernel_region_num
|
| 63 |
+
pixel_assignment = []
|
| 64 |
+
for x in range(kernel_region_num):
|
| 65 |
+
pixel_assignment.append(
|
| 66 |
+
np.array(
|
| 67 |
+
label[list_index:list_index + int(label[x])],
|
| 68 |
+
dtype=np.float))
|
| 69 |
+
list_index = list_index + int(label[x])
|
| 70 |
+
else:
|
| 71 |
+
pixel_assignment = ext_module.pixel_group(score, mask, embedding,
|
| 72 |
+
kernel_label, kernel_contour,
|
| 73 |
+
kernel_region_num,
|
| 74 |
+
distance_threshold)
|
| 75 |
+
return pixel_assignment
|
RAVE-main/annotator/mmpkg/mmcv/ops/points_sampler.py
ADDED
|
@@ -0,0 +1,177 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import List
|
| 2 |
+
|
| 3 |
+
import torch
|
| 4 |
+
from torch import nn as nn
|
| 5 |
+
|
| 6 |
+
from annotator.mmpkg.mmcv.runner import force_fp32
|
| 7 |
+
from .furthest_point_sample import (furthest_point_sample,
|
| 8 |
+
furthest_point_sample_with_dist)
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
def calc_square_dist(point_feat_a, point_feat_b, norm=True):
|
| 12 |
+
"""Calculating square distance between a and b.
|
| 13 |
+
|
| 14 |
+
Args:
|
| 15 |
+
point_feat_a (Tensor): (B, N, C) Feature vector of each point.
|
| 16 |
+
point_feat_b (Tensor): (B, M, C) Feature vector of each point.
|
| 17 |
+
norm (Bool, optional): Whether to normalize the distance.
|
| 18 |
+
Default: True.
|
| 19 |
+
|
| 20 |
+
Returns:
|
| 21 |
+
Tensor: (B, N, M) Distance between each pair points.
|
| 22 |
+
"""
|
| 23 |
+
num_channel = point_feat_a.shape[-1]
|
| 24 |
+
# [bs, n, 1]
|
| 25 |
+
a_square = torch.sum(point_feat_a.unsqueeze(dim=2).pow(2), dim=-1)
|
| 26 |
+
# [bs, 1, m]
|
| 27 |
+
b_square = torch.sum(point_feat_b.unsqueeze(dim=1).pow(2), dim=-1)
|
| 28 |
+
|
| 29 |
+
corr_matrix = torch.matmul(point_feat_a, point_feat_b.transpose(1, 2))
|
| 30 |
+
|
| 31 |
+
dist = a_square + b_square - 2 * corr_matrix
|
| 32 |
+
if norm:
|
| 33 |
+
dist = torch.sqrt(dist) / num_channel
|
| 34 |
+
return dist
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
def get_sampler_cls(sampler_type):
|
| 38 |
+
"""Get the type and mode of points sampler.
|
| 39 |
+
|
| 40 |
+
Args:
|
| 41 |
+
sampler_type (str): The type of points sampler.
|
| 42 |
+
The valid value are "D-FPS", "F-FPS", or "FS".
|
| 43 |
+
|
| 44 |
+
Returns:
|
| 45 |
+
class: Points sampler type.
|
| 46 |
+
"""
|
| 47 |
+
sampler_mappings = {
|
| 48 |
+
'D-FPS': DFPSSampler,
|
| 49 |
+
'F-FPS': FFPSSampler,
|
| 50 |
+
'FS': FSSampler,
|
| 51 |
+
}
|
| 52 |
+
try:
|
| 53 |
+
return sampler_mappings[sampler_type]
|
| 54 |
+
except KeyError:
|
| 55 |
+
raise KeyError(
|
| 56 |
+
f'Supported `sampler_type` are {sampler_mappings.keys()}, but got \
|
| 57 |
+
{sampler_type}')
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
class PointsSampler(nn.Module):
|
| 61 |
+
"""Points sampling.
|
| 62 |
+
|
| 63 |
+
Args:
|
| 64 |
+
num_point (list[int]): Number of sample points.
|
| 65 |
+
fps_mod_list (list[str], optional): Type of FPS method, valid mod
|
| 66 |
+
['F-FPS', 'D-FPS', 'FS'], Default: ['D-FPS'].
|
| 67 |
+
F-FPS: using feature distances for FPS.
|
| 68 |
+
D-FPS: using Euclidean distances of points for FPS.
|
| 69 |
+
FS: using F-FPS and D-FPS simultaneously.
|
| 70 |
+
fps_sample_range_list (list[int], optional):
|
| 71 |
+
Range of points to apply FPS. Default: [-1].
|
| 72 |
+
"""
|
| 73 |
+
|
| 74 |
+
def __init__(self,
|
| 75 |
+
num_point: List[int],
|
| 76 |
+
fps_mod_list: List[str] = ['D-FPS'],
|
| 77 |
+
fps_sample_range_list: List[int] = [-1]):
|
| 78 |
+
super().__init__()
|
| 79 |
+
# FPS would be applied to different fps_mod in the list,
|
| 80 |
+
# so the length of the num_point should be equal to
|
| 81 |
+
# fps_mod_list and fps_sample_range_list.
|
| 82 |
+
assert len(num_point) == len(fps_mod_list) == len(
|
| 83 |
+
fps_sample_range_list)
|
| 84 |
+
self.num_point = num_point
|
| 85 |
+
self.fps_sample_range_list = fps_sample_range_list
|
| 86 |
+
self.samplers = nn.ModuleList()
|
| 87 |
+
for fps_mod in fps_mod_list:
|
| 88 |
+
self.samplers.append(get_sampler_cls(fps_mod)())
|
| 89 |
+
self.fp16_enabled = False
|
| 90 |
+
|
| 91 |
+
@force_fp32()
|
| 92 |
+
def forward(self, points_xyz, features):
|
| 93 |
+
"""
|
| 94 |
+
Args:
|
| 95 |
+
points_xyz (Tensor): (B, N, 3) xyz coordinates of the features.
|
| 96 |
+
features (Tensor): (B, C, N) Descriptors of the features.
|
| 97 |
+
|
| 98 |
+
Returns:
|
| 99 |
+
Tensor: (B, npoint, sample_num) Indices of sampled points.
|
| 100 |
+
"""
|
| 101 |
+
indices = []
|
| 102 |
+
last_fps_end_index = 0
|
| 103 |
+
|
| 104 |
+
for fps_sample_range, sampler, npoint in zip(
|
| 105 |
+
self.fps_sample_range_list, self.samplers, self.num_point):
|
| 106 |
+
assert fps_sample_range < points_xyz.shape[1]
|
| 107 |
+
|
| 108 |
+
if fps_sample_range == -1:
|
| 109 |
+
sample_points_xyz = points_xyz[:, last_fps_end_index:]
|
| 110 |
+
if features is not None:
|
| 111 |
+
sample_features = features[:, :, last_fps_end_index:]
|
| 112 |
+
else:
|
| 113 |
+
sample_features = None
|
| 114 |
+
else:
|
| 115 |
+
sample_points_xyz = \
|
| 116 |
+
points_xyz[:, last_fps_end_index:fps_sample_range]
|
| 117 |
+
if features is not None:
|
| 118 |
+
sample_features = features[:, :, last_fps_end_index:
|
| 119 |
+
fps_sample_range]
|
| 120 |
+
else:
|
| 121 |
+
sample_features = None
|
| 122 |
+
|
| 123 |
+
fps_idx = sampler(sample_points_xyz.contiguous(), sample_features,
|
| 124 |
+
npoint)
|
| 125 |
+
|
| 126 |
+
indices.append(fps_idx + last_fps_end_index)
|
| 127 |
+
last_fps_end_index += fps_sample_range
|
| 128 |
+
indices = torch.cat(indices, dim=1)
|
| 129 |
+
|
| 130 |
+
return indices
|
| 131 |
+
|
| 132 |
+
|
| 133 |
+
class DFPSSampler(nn.Module):
|
| 134 |
+
"""Using Euclidean distances of points for FPS."""
|
| 135 |
+
|
| 136 |
+
def __init__(self):
|
| 137 |
+
super().__init__()
|
| 138 |
+
|
| 139 |
+
def forward(self, points, features, npoint):
|
| 140 |
+
"""Sampling points with D-FPS."""
|
| 141 |
+
fps_idx = furthest_point_sample(points.contiguous(), npoint)
|
| 142 |
+
return fps_idx
|
| 143 |
+
|
| 144 |
+
|
| 145 |
+
class FFPSSampler(nn.Module):
|
| 146 |
+
"""Using feature distances for FPS."""
|
| 147 |
+
|
| 148 |
+
def __init__(self):
|
| 149 |
+
super().__init__()
|
| 150 |
+
|
| 151 |
+
def forward(self, points, features, npoint):
|
| 152 |
+
"""Sampling points with F-FPS."""
|
| 153 |
+
assert features is not None, \
|
| 154 |
+
'feature input to FFPS_Sampler should not be None'
|
| 155 |
+
features_for_fps = torch.cat([points, features.transpose(1, 2)], dim=2)
|
| 156 |
+
features_dist = calc_square_dist(
|
| 157 |
+
features_for_fps, features_for_fps, norm=False)
|
| 158 |
+
fps_idx = furthest_point_sample_with_dist(features_dist, npoint)
|
| 159 |
+
return fps_idx
|
| 160 |
+
|
| 161 |
+
|
| 162 |
+
class FSSampler(nn.Module):
|
| 163 |
+
"""Using F-FPS and D-FPS simultaneously."""
|
| 164 |
+
|
| 165 |
+
def __init__(self):
|
| 166 |
+
super().__init__()
|
| 167 |
+
|
| 168 |
+
def forward(self, points, features, npoint):
|
| 169 |
+
"""Sampling points with FS_Sampling."""
|
| 170 |
+
assert features is not None, \
|
| 171 |
+
'feature input to FS_Sampler should not be None'
|
| 172 |
+
ffps_sampler = FFPSSampler()
|
| 173 |
+
dfps_sampler = DFPSSampler()
|
| 174 |
+
fps_idx_ffps = ffps_sampler(points, features, npoint)
|
| 175 |
+
fps_idx_dfps = dfps_sampler(points, features, npoint)
|
| 176 |
+
fps_idx = torch.cat([fps_idx_ffps, fps_idx_dfps], dim=1)
|
| 177 |
+
return fps_idx
|
RAVE-main/annotator/mmpkg/mmcv/ops/scatter_points.py
ADDED
|
@@ -0,0 +1,135 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
import torch
|
| 3 |
+
from torch import nn
|
| 4 |
+
from torch.autograd import Function
|
| 5 |
+
|
| 6 |
+
from ..utils import ext_loader
|
| 7 |
+
|
| 8 |
+
ext_module = ext_loader.load_ext(
|
| 9 |
+
'_ext',
|
| 10 |
+
['dynamic_point_to_voxel_forward', 'dynamic_point_to_voxel_backward'])
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
class _DynamicScatter(Function):
|
| 14 |
+
|
| 15 |
+
@staticmethod
|
| 16 |
+
def forward(ctx, feats, coors, reduce_type='max'):
|
| 17 |
+
"""convert kitti points(N, >=3) to voxels.
|
| 18 |
+
|
| 19 |
+
Args:
|
| 20 |
+
feats (torch.Tensor): [N, C]. Points features to be reduced
|
| 21 |
+
into voxels.
|
| 22 |
+
coors (torch.Tensor): [N, ndim]. Corresponding voxel coordinates
|
| 23 |
+
(specifically multi-dim voxel index) of each points.
|
| 24 |
+
reduce_type (str, optional): Reduce op. support 'max', 'sum' and
|
| 25 |
+
'mean'. Default: 'max'.
|
| 26 |
+
|
| 27 |
+
Returns:
|
| 28 |
+
voxel_feats (torch.Tensor): [M, C]. Reduced features, input
|
| 29 |
+
features that shares the same voxel coordinates are reduced to
|
| 30 |
+
one row.
|
| 31 |
+
voxel_coors (torch.Tensor): [M, ndim]. Voxel coordinates.
|
| 32 |
+
"""
|
| 33 |
+
results = ext_module.dynamic_point_to_voxel_forward(
|
| 34 |
+
feats, coors, reduce_type)
|
| 35 |
+
(voxel_feats, voxel_coors, point2voxel_map,
|
| 36 |
+
voxel_points_count) = results
|
| 37 |
+
ctx.reduce_type = reduce_type
|
| 38 |
+
ctx.save_for_backward(feats, voxel_feats, point2voxel_map,
|
| 39 |
+
voxel_points_count)
|
| 40 |
+
ctx.mark_non_differentiable(voxel_coors)
|
| 41 |
+
return voxel_feats, voxel_coors
|
| 42 |
+
|
| 43 |
+
@staticmethod
|
| 44 |
+
def backward(ctx, grad_voxel_feats, grad_voxel_coors=None):
|
| 45 |
+
(feats, voxel_feats, point2voxel_map,
|
| 46 |
+
voxel_points_count) = ctx.saved_tensors
|
| 47 |
+
grad_feats = torch.zeros_like(feats)
|
| 48 |
+
# TODO: whether to use index put or use cuda_backward
|
| 49 |
+
# To use index put, need point to voxel index
|
| 50 |
+
ext_module.dynamic_point_to_voxel_backward(
|
| 51 |
+
grad_feats, grad_voxel_feats.contiguous(), feats, voxel_feats,
|
| 52 |
+
point2voxel_map, voxel_points_count, ctx.reduce_type)
|
| 53 |
+
return grad_feats, None, None
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
dynamic_scatter = _DynamicScatter.apply
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
class DynamicScatter(nn.Module):
|
| 60 |
+
"""Scatters points into voxels, used in the voxel encoder with dynamic
|
| 61 |
+
voxelization.
|
| 62 |
+
|
| 63 |
+
Note:
|
| 64 |
+
The CPU and GPU implementation get the same output, but have numerical
|
| 65 |
+
difference after summation and division (e.g., 5e-7).
|
| 66 |
+
|
| 67 |
+
Args:
|
| 68 |
+
voxel_size (list): list [x, y, z] size of three dimension.
|
| 69 |
+
point_cloud_range (list): The coordinate range of points, [x_min,
|
| 70 |
+
y_min, z_min, x_max, y_max, z_max].
|
| 71 |
+
average_points (bool): whether to use avg pooling to scatter points
|
| 72 |
+
into voxel.
|
| 73 |
+
"""
|
| 74 |
+
|
| 75 |
+
def __init__(self, voxel_size, point_cloud_range, average_points: bool):
|
| 76 |
+
super().__init__()
|
| 77 |
+
|
| 78 |
+
self.voxel_size = voxel_size
|
| 79 |
+
self.point_cloud_range = point_cloud_range
|
| 80 |
+
self.average_points = average_points
|
| 81 |
+
|
| 82 |
+
def forward_single(self, points, coors):
|
| 83 |
+
"""Scatters points into voxels.
|
| 84 |
+
|
| 85 |
+
Args:
|
| 86 |
+
points (torch.Tensor): Points to be reduced into voxels.
|
| 87 |
+
coors (torch.Tensor): Corresponding voxel coordinates (specifically
|
| 88 |
+
multi-dim voxel index) of each points.
|
| 89 |
+
|
| 90 |
+
Returns:
|
| 91 |
+
voxel_feats (torch.Tensor): Reduced features, input features that
|
| 92 |
+
shares the same voxel coordinates are reduced to one row.
|
| 93 |
+
voxel_coors (torch.Tensor): Voxel coordinates.
|
| 94 |
+
"""
|
| 95 |
+
reduce = 'mean' if self.average_points else 'max'
|
| 96 |
+
return dynamic_scatter(points.contiguous(), coors.contiguous(), reduce)
|
| 97 |
+
|
| 98 |
+
def forward(self, points, coors):
|
| 99 |
+
"""Scatters points/features into voxels.
|
| 100 |
+
|
| 101 |
+
Args:
|
| 102 |
+
points (torch.Tensor): Points to be reduced into voxels.
|
| 103 |
+
coors (torch.Tensor): Corresponding voxel coordinates (specifically
|
| 104 |
+
multi-dim voxel index) of each points.
|
| 105 |
+
|
| 106 |
+
Returns:
|
| 107 |
+
voxel_feats (torch.Tensor): Reduced features, input features that
|
| 108 |
+
shares the same voxel coordinates are reduced to one row.
|
| 109 |
+
voxel_coors (torch.Tensor): Voxel coordinates.
|
| 110 |
+
"""
|
| 111 |
+
if coors.size(-1) == 3:
|
| 112 |
+
return self.forward_single(points, coors)
|
| 113 |
+
else:
|
| 114 |
+
batch_size = coors[-1, 0] + 1
|
| 115 |
+
voxels, voxel_coors = [], []
|
| 116 |
+
for i in range(batch_size):
|
| 117 |
+
inds = torch.where(coors[:, 0] == i)
|
| 118 |
+
voxel, voxel_coor = self.forward_single(
|
| 119 |
+
points[inds], coors[inds][:, 1:])
|
| 120 |
+
coor_pad = nn.functional.pad(
|
| 121 |
+
voxel_coor, (1, 0), mode='constant', value=i)
|
| 122 |
+
voxel_coors.append(coor_pad)
|
| 123 |
+
voxels.append(voxel)
|
| 124 |
+
features = torch.cat(voxels, dim=0)
|
| 125 |
+
feature_coors = torch.cat(voxel_coors, dim=0)
|
| 126 |
+
|
| 127 |
+
return features, feature_coors
|
| 128 |
+
|
| 129 |
+
def __repr__(self):
|
| 130 |
+
s = self.__class__.__name__ + '('
|
| 131 |
+
s += 'voxel_size=' + str(self.voxel_size)
|
| 132 |
+
s += ', point_cloud_range=' + str(self.point_cloud_range)
|
| 133 |
+
s += ', average_points=' + str(self.average_points)
|
| 134 |
+
s += ')'
|
| 135 |
+
return s
|
RAVE-main/annotator/mmpkg/mmcv/ops/upfirdn2d.py
ADDED
|
@@ -0,0 +1,330 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# modified from https://github.com/rosinality/stylegan2-pytorch/blob/master/op/upfirdn2d.py # noqa:E501
|
| 2 |
+
|
| 3 |
+
# Copyright (c) 2021, NVIDIA Corporation. All rights reserved.
|
| 4 |
+
# NVIDIA Source Code License for StyleGAN2 with Adaptive Discriminator
|
| 5 |
+
# Augmentation (ADA)
|
| 6 |
+
# =======================================================================
|
| 7 |
+
|
| 8 |
+
# 1. Definitions
|
| 9 |
+
|
| 10 |
+
# "Licensor" means any person or entity that distributes its Work.
|
| 11 |
+
|
| 12 |
+
# "Software" means the original work of authorship made available under
|
| 13 |
+
# this License.
|
| 14 |
+
|
| 15 |
+
# "Work" means the Software and any additions to or derivative works of
|
| 16 |
+
# the Software that are made available under this License.
|
| 17 |
+
|
| 18 |
+
# The terms "reproduce," "reproduction," "derivative works," and
|
| 19 |
+
# "distribution" have the meaning as provided under U.S. copyright law;
|
| 20 |
+
# provided, however, that for the purposes of this License, derivative
|
| 21 |
+
# works shall not include works that remain separable from, or merely
|
| 22 |
+
# link (or bind by name) to the interfaces of, the Work.
|
| 23 |
+
|
| 24 |
+
# Works, including the Software, are "made available" under this License
|
| 25 |
+
# by including in or with the Work either (a) a copyright notice
|
| 26 |
+
# referencing the applicability of this License to the Work, or (b) a
|
| 27 |
+
# copy of this License.
|
| 28 |
+
|
| 29 |
+
# 2. License Grants
|
| 30 |
+
|
| 31 |
+
# 2.1 Copyright Grant. Subject to the terms and conditions of this
|
| 32 |
+
# License, each Licensor grants to you a perpetual, worldwide,
|
| 33 |
+
# non-exclusive, royalty-free, copyright license to reproduce,
|
| 34 |
+
# prepare derivative works of, publicly display, publicly perform,
|
| 35 |
+
# sublicense and distribute its Work and any resulting derivative
|
| 36 |
+
# works in any form.
|
| 37 |
+
|
| 38 |
+
# 3. Limitations
|
| 39 |
+
|
| 40 |
+
# 3.1 Redistribution. You may reproduce or distribute the Work only
|
| 41 |
+
# if (a) you do so under this License, (b) you include a complete
|
| 42 |
+
# copy of this License with your distribution, and (c) you retain
|
| 43 |
+
# without modification any copyright, patent, trademark, or
|
| 44 |
+
# attribution notices that are present in the Work.
|
| 45 |
+
|
| 46 |
+
# 3.2 Derivative Works. You may specify that additional or different
|
| 47 |
+
# terms apply to the use, reproduction, and distribution of your
|
| 48 |
+
# derivative works of the Work ("Your Terms") only if (a) Your Terms
|
| 49 |
+
# provide that the use limitation in Section 3.3 applies to your
|
| 50 |
+
# derivative works, and (b) you identify the specific derivative
|
| 51 |
+
# works that are subject to Your Terms. Notwithstanding Your Terms,
|
| 52 |
+
# this License (including the redistribution requirements in Section
|
| 53 |
+
# 3.1) will continue to apply to the Work itself.
|
| 54 |
+
|
| 55 |
+
# 3.3 Use Limitation. The Work and any derivative works thereof only
|
| 56 |
+
# may be used or intended for use non-commercially. Notwithstanding
|
| 57 |
+
# the foregoing, NVIDIA and its affiliates may use the Work and any
|
| 58 |
+
# derivative works commercially. As used herein, "non-commercially"
|
| 59 |
+
# means for research or evaluation purposes only.
|
| 60 |
+
|
| 61 |
+
# 3.4 Patent Claims. If you bring or threaten to bring a patent claim
|
| 62 |
+
# against any Licensor (including any claim, cross-claim or
|
| 63 |
+
# counterclaim in a lawsuit) to enforce any patents that you allege
|
| 64 |
+
# are infringed by any Work, then your rights under this License from
|
| 65 |
+
# such Licensor (including the grant in Section 2.1) will terminate
|
| 66 |
+
# immediately.
|
| 67 |
+
|
| 68 |
+
# 3.5 Trademarks. This License does not grant any rights to use any
|
| 69 |
+
# Licensor’s or its affiliates’ names, logos, or trademarks, except
|
| 70 |
+
# as necessary to reproduce the notices described in this License.
|
| 71 |
+
|
| 72 |
+
# 3.6 Termination. If you violate any term of this License, then your
|
| 73 |
+
# rights under this License (including the grant in Section 2.1) will
|
| 74 |
+
# terminate immediately.
|
| 75 |
+
|
| 76 |
+
# 4. Disclaimer of Warranty.
|
| 77 |
+
|
| 78 |
+
# THE WORK IS PROVIDED "AS IS" WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
| 79 |
+
# KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WARRANTIES OR CONDITIONS OF
|
| 80 |
+
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE OR
|
| 81 |
+
# NON-INFRINGEMENT. YOU BEAR THE RISK OF UNDERTAKING ANY ACTIVITIES UNDER
|
| 82 |
+
# THIS LICENSE.
|
| 83 |
+
|
| 84 |
+
# 5. Limitation of Liability.
|
| 85 |
+
|
| 86 |
+
# EXCEPT AS PROHIBITED BY APPLICABLE LAW, IN NO EVENT AND UNDER NO LEGAL
|
| 87 |
+
# THEORY, WHETHER IN TORT (INCLUDING NEGLIGENCE), CONTRACT, OR OTHERWISE
|
| 88 |
+
# SHALL ANY LICENSOR BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY DIRECT,
|
| 89 |
+
# INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING OUT OF
|
| 90 |
+
# OR RELATED TO THIS LICENSE, THE USE OR INABILITY TO USE THE WORK
|
| 91 |
+
# (INCLUDING BUT NOT LIMITED TO LOSS OF GOODWILL, BUSINESS INTERRUPTION,
|
| 92 |
+
# LOST PROFITS OR DATA, COMPUTER FAILURE OR MALFUNCTION, OR ANY OTHER
|
| 93 |
+
# COMMERCIAL DAMAGES OR LOSSES), EVEN IF THE LICENSOR HAS BEEN ADVISED OF
|
| 94 |
+
# THE POSSIBILITY OF SUCH DAMAGES.
|
| 95 |
+
|
| 96 |
+
# =======================================================================
|
| 97 |
+
|
| 98 |
+
import torch
|
| 99 |
+
from torch.autograd import Function
|
| 100 |
+
from torch.nn import functional as F
|
| 101 |
+
|
| 102 |
+
from annotator.mmpkg.mmcv.utils import to_2tuple
|
| 103 |
+
from ..utils import ext_loader
|
| 104 |
+
|
| 105 |
+
upfirdn2d_ext = ext_loader.load_ext('_ext', ['upfirdn2d'])
|
| 106 |
+
|
| 107 |
+
|
| 108 |
+
class UpFirDn2dBackward(Function):
|
| 109 |
+
|
| 110 |
+
@staticmethod
|
| 111 |
+
def forward(ctx, grad_output, kernel, grad_kernel, up, down, pad, g_pad,
|
| 112 |
+
in_size, out_size):
|
| 113 |
+
|
| 114 |
+
up_x, up_y = up
|
| 115 |
+
down_x, down_y = down
|
| 116 |
+
g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1 = g_pad
|
| 117 |
+
|
| 118 |
+
grad_output = grad_output.reshape(-1, out_size[0], out_size[1], 1)
|
| 119 |
+
|
| 120 |
+
grad_input = upfirdn2d_ext.upfirdn2d(
|
| 121 |
+
grad_output,
|
| 122 |
+
grad_kernel,
|
| 123 |
+
up_x=down_x,
|
| 124 |
+
up_y=down_y,
|
| 125 |
+
down_x=up_x,
|
| 126 |
+
down_y=up_y,
|
| 127 |
+
pad_x0=g_pad_x0,
|
| 128 |
+
pad_x1=g_pad_x1,
|
| 129 |
+
pad_y0=g_pad_y0,
|
| 130 |
+
pad_y1=g_pad_y1)
|
| 131 |
+
grad_input = grad_input.view(in_size[0], in_size[1], in_size[2],
|
| 132 |
+
in_size[3])
|
| 133 |
+
|
| 134 |
+
ctx.save_for_backward(kernel)
|
| 135 |
+
|
| 136 |
+
pad_x0, pad_x1, pad_y0, pad_y1 = pad
|
| 137 |
+
|
| 138 |
+
ctx.up_x = up_x
|
| 139 |
+
ctx.up_y = up_y
|
| 140 |
+
ctx.down_x = down_x
|
| 141 |
+
ctx.down_y = down_y
|
| 142 |
+
ctx.pad_x0 = pad_x0
|
| 143 |
+
ctx.pad_x1 = pad_x1
|
| 144 |
+
ctx.pad_y0 = pad_y0
|
| 145 |
+
ctx.pad_y1 = pad_y1
|
| 146 |
+
ctx.in_size = in_size
|
| 147 |
+
ctx.out_size = out_size
|
| 148 |
+
|
| 149 |
+
return grad_input
|
| 150 |
+
|
| 151 |
+
@staticmethod
|
| 152 |
+
def backward(ctx, gradgrad_input):
|
| 153 |
+
kernel, = ctx.saved_tensors
|
| 154 |
+
|
| 155 |
+
gradgrad_input = gradgrad_input.reshape(-1, ctx.in_size[2],
|
| 156 |
+
ctx.in_size[3], 1)
|
| 157 |
+
|
| 158 |
+
gradgrad_out = upfirdn2d_ext.upfirdn2d(
|
| 159 |
+
gradgrad_input,
|
| 160 |
+
kernel,
|
| 161 |
+
up_x=ctx.up_x,
|
| 162 |
+
up_y=ctx.up_y,
|
| 163 |
+
down_x=ctx.down_x,
|
| 164 |
+
down_y=ctx.down_y,
|
| 165 |
+
pad_x0=ctx.pad_x0,
|
| 166 |
+
pad_x1=ctx.pad_x1,
|
| 167 |
+
pad_y0=ctx.pad_y0,
|
| 168 |
+
pad_y1=ctx.pad_y1)
|
| 169 |
+
# gradgrad_out = gradgrad_out.view(ctx.in_size[0], ctx.out_size[0],
|
| 170 |
+
# ctx.out_size[1], ctx.in_size[3])
|
| 171 |
+
gradgrad_out = gradgrad_out.view(ctx.in_size[0], ctx.in_size[1],
|
| 172 |
+
ctx.out_size[0], ctx.out_size[1])
|
| 173 |
+
|
| 174 |
+
return gradgrad_out, None, None, None, None, None, None, None, None
|
| 175 |
+
|
| 176 |
+
|
| 177 |
+
class UpFirDn2d(Function):
|
| 178 |
+
|
| 179 |
+
@staticmethod
|
| 180 |
+
def forward(ctx, input, kernel, up, down, pad):
|
| 181 |
+
up_x, up_y = up
|
| 182 |
+
down_x, down_y = down
|
| 183 |
+
pad_x0, pad_x1, pad_y0, pad_y1 = pad
|
| 184 |
+
|
| 185 |
+
kernel_h, kernel_w = kernel.shape
|
| 186 |
+
batch, channel, in_h, in_w = input.shape
|
| 187 |
+
ctx.in_size = input.shape
|
| 188 |
+
|
| 189 |
+
input = input.reshape(-1, in_h, in_w, 1)
|
| 190 |
+
|
| 191 |
+
ctx.save_for_backward(kernel, torch.flip(kernel, [0, 1]))
|
| 192 |
+
|
| 193 |
+
out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h) // down_y + 1
|
| 194 |
+
out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w) // down_x + 1
|
| 195 |
+
ctx.out_size = (out_h, out_w)
|
| 196 |
+
|
| 197 |
+
ctx.up = (up_x, up_y)
|
| 198 |
+
ctx.down = (down_x, down_y)
|
| 199 |
+
ctx.pad = (pad_x0, pad_x1, pad_y0, pad_y1)
|
| 200 |
+
|
| 201 |
+
g_pad_x0 = kernel_w - pad_x0 - 1
|
| 202 |
+
g_pad_y0 = kernel_h - pad_y0 - 1
|
| 203 |
+
g_pad_x1 = in_w * up_x - out_w * down_x + pad_x0 - up_x + 1
|
| 204 |
+
g_pad_y1 = in_h * up_y - out_h * down_y + pad_y0 - up_y + 1
|
| 205 |
+
|
| 206 |
+
ctx.g_pad = (g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1)
|
| 207 |
+
|
| 208 |
+
out = upfirdn2d_ext.upfirdn2d(
|
| 209 |
+
input,
|
| 210 |
+
kernel,
|
| 211 |
+
up_x=up_x,
|
| 212 |
+
up_y=up_y,
|
| 213 |
+
down_x=down_x,
|
| 214 |
+
down_y=down_y,
|
| 215 |
+
pad_x0=pad_x0,
|
| 216 |
+
pad_x1=pad_x1,
|
| 217 |
+
pad_y0=pad_y0,
|
| 218 |
+
pad_y1=pad_y1)
|
| 219 |
+
# out = out.view(major, out_h, out_w, minor)
|
| 220 |
+
out = out.view(-1, channel, out_h, out_w)
|
| 221 |
+
|
| 222 |
+
return out
|
| 223 |
+
|
| 224 |
+
@staticmethod
|
| 225 |
+
def backward(ctx, grad_output):
|
| 226 |
+
kernel, grad_kernel = ctx.saved_tensors
|
| 227 |
+
|
| 228 |
+
grad_input = UpFirDn2dBackward.apply(
|
| 229 |
+
grad_output,
|
| 230 |
+
kernel,
|
| 231 |
+
grad_kernel,
|
| 232 |
+
ctx.up,
|
| 233 |
+
ctx.down,
|
| 234 |
+
ctx.pad,
|
| 235 |
+
ctx.g_pad,
|
| 236 |
+
ctx.in_size,
|
| 237 |
+
ctx.out_size,
|
| 238 |
+
)
|
| 239 |
+
|
| 240 |
+
return grad_input, None, None, None, None
|
| 241 |
+
|
| 242 |
+
|
| 243 |
+
def upfirdn2d(input, kernel, up=1, down=1, pad=(0, 0)):
|
| 244 |
+
"""UpFRIDn for 2d features.
|
| 245 |
+
|
| 246 |
+
UpFIRDn is short for upsample, apply FIR filter and downsample. More
|
| 247 |
+
details can be found in:
|
| 248 |
+
https://www.mathworks.com/help/signal/ref/upfirdn.html
|
| 249 |
+
|
| 250 |
+
Args:
|
| 251 |
+
input (Tensor): Tensor with shape of (n, c, h, w).
|
| 252 |
+
kernel (Tensor): Filter kernel.
|
| 253 |
+
up (int | tuple[int], optional): Upsampling factor. If given a number,
|
| 254 |
+
we will use this factor for the both height and width side.
|
| 255 |
+
Defaults to 1.
|
| 256 |
+
down (int | tuple[int], optional): Downsampling factor. If given a
|
| 257 |
+
number, we will use this factor for the both height and width side.
|
| 258 |
+
Defaults to 1.
|
| 259 |
+
pad (tuple[int], optional): Padding for tensors, (x_pad, y_pad) or
|
| 260 |
+
(x_pad_0, x_pad_1, y_pad_0, y_pad_1). Defaults to (0, 0).
|
| 261 |
+
|
| 262 |
+
Returns:
|
| 263 |
+
Tensor: Tensor after UpFIRDn.
|
| 264 |
+
"""
|
| 265 |
+
if input.device.type == 'cpu':
|
| 266 |
+
if len(pad) == 2:
|
| 267 |
+
pad = (pad[0], pad[1], pad[0], pad[1])
|
| 268 |
+
|
| 269 |
+
up = to_2tuple(up)
|
| 270 |
+
|
| 271 |
+
down = to_2tuple(down)
|
| 272 |
+
|
| 273 |
+
out = upfirdn2d_native(input, kernel, up[0], up[1], down[0], down[1],
|
| 274 |
+
pad[0], pad[1], pad[2], pad[3])
|
| 275 |
+
else:
|
| 276 |
+
_up = to_2tuple(up)
|
| 277 |
+
|
| 278 |
+
_down = to_2tuple(down)
|
| 279 |
+
|
| 280 |
+
if len(pad) == 4:
|
| 281 |
+
_pad = pad
|
| 282 |
+
elif len(pad) == 2:
|
| 283 |
+
_pad = (pad[0], pad[1], pad[0], pad[1])
|
| 284 |
+
|
| 285 |
+
out = UpFirDn2d.apply(input, kernel, _up, _down, _pad)
|
| 286 |
+
|
| 287 |
+
return out
|
| 288 |
+
|
| 289 |
+
|
| 290 |
+
def upfirdn2d_native(input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1,
|
| 291 |
+
pad_y0, pad_y1):
|
| 292 |
+
_, channel, in_h, in_w = input.shape
|
| 293 |
+
input = input.reshape(-1, in_h, in_w, 1)
|
| 294 |
+
|
| 295 |
+
_, in_h, in_w, minor = input.shape
|
| 296 |
+
kernel_h, kernel_w = kernel.shape
|
| 297 |
+
|
| 298 |
+
out = input.view(-1, in_h, 1, in_w, 1, minor)
|
| 299 |
+
out = F.pad(out, [0, 0, 0, up_x - 1, 0, 0, 0, up_y - 1])
|
| 300 |
+
out = out.view(-1, in_h * up_y, in_w * up_x, minor)
|
| 301 |
+
|
| 302 |
+
out = F.pad(
|
| 303 |
+
out,
|
| 304 |
+
[0, 0,
|
| 305 |
+
max(pad_x0, 0),
|
| 306 |
+
max(pad_x1, 0),
|
| 307 |
+
max(pad_y0, 0),
|
| 308 |
+
max(pad_y1, 0)])
|
| 309 |
+
out = out[:,
|
| 310 |
+
max(-pad_y0, 0):out.shape[1] - max(-pad_y1, 0),
|
| 311 |
+
max(-pad_x0, 0):out.shape[2] - max(-pad_x1, 0), :, ]
|
| 312 |
+
|
| 313 |
+
out = out.permute(0, 3, 1, 2)
|
| 314 |
+
out = out.reshape(
|
| 315 |
+
[-1, 1, in_h * up_y + pad_y0 + pad_y1, in_w * up_x + pad_x0 + pad_x1])
|
| 316 |
+
w = torch.flip(kernel, [0, 1]).view(1, 1, kernel_h, kernel_w)
|
| 317 |
+
out = F.conv2d(out, w)
|
| 318 |
+
out = out.reshape(
|
| 319 |
+
-1,
|
| 320 |
+
minor,
|
| 321 |
+
in_h * up_y + pad_y0 + pad_y1 - kernel_h + 1,
|
| 322 |
+
in_w * up_x + pad_x0 + pad_x1 - kernel_w + 1,
|
| 323 |
+
)
|
| 324 |
+
out = out.permute(0, 2, 3, 1)
|
| 325 |
+
out = out[:, ::down_y, ::down_x, :]
|
| 326 |
+
|
| 327 |
+
out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h) // down_y + 1
|
| 328 |
+
out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w) // down_x + 1
|
| 329 |
+
|
| 330 |
+
return out.view(-1, channel, out_h, out_w)
|
RAVE-main/annotator/mmpkg/mmcv/parallel/__init__.py
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
from .collate import collate
|
| 3 |
+
from .data_container import DataContainer
|
| 4 |
+
from .data_parallel import MMDataParallel
|
| 5 |
+
from .distributed import MMDistributedDataParallel
|
| 6 |
+
from .registry import MODULE_WRAPPERS
|
| 7 |
+
from .scatter_gather import scatter, scatter_kwargs
|
| 8 |
+
from .utils import is_module_wrapper
|
| 9 |
+
|
| 10 |
+
__all__ = [
|
| 11 |
+
'collate', 'DataContainer', 'MMDataParallel', 'MMDistributedDataParallel',
|
| 12 |
+
'scatter', 'scatter_kwargs', 'is_module_wrapper', 'MODULE_WRAPPERS'
|
| 13 |
+
]
|
RAVE-main/annotator/mmpkg/mmcv/parallel/_functions.py
ADDED
|
@@ -0,0 +1,79 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
import torch
|
| 3 |
+
from torch.nn.parallel._functions import _get_stream
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
def scatter(input, devices, streams=None):
|
| 7 |
+
"""Scatters tensor across multiple GPUs."""
|
| 8 |
+
if streams is None:
|
| 9 |
+
streams = [None] * len(devices)
|
| 10 |
+
|
| 11 |
+
if isinstance(input, list):
|
| 12 |
+
chunk_size = (len(input) - 1) // len(devices) + 1
|
| 13 |
+
outputs = [
|
| 14 |
+
scatter(input[i], [devices[i // chunk_size]],
|
| 15 |
+
[streams[i // chunk_size]]) for i in range(len(input))
|
| 16 |
+
]
|
| 17 |
+
return outputs
|
| 18 |
+
elif isinstance(input, torch.Tensor):
|
| 19 |
+
output = input.contiguous()
|
| 20 |
+
# TODO: copy to a pinned buffer first (if copying from CPU)
|
| 21 |
+
stream = streams[0] if output.numel() > 0 else None
|
| 22 |
+
if devices != [-1]:
|
| 23 |
+
with torch.cuda.device(devices[0]), torch.cuda.stream(stream):
|
| 24 |
+
output = output.cuda(devices[0], non_blocking=True)
|
| 25 |
+
else:
|
| 26 |
+
# unsqueeze the first dimension thus the tensor's shape is the
|
| 27 |
+
# same as those scattered with GPU.
|
| 28 |
+
output = output.unsqueeze(0)
|
| 29 |
+
return output
|
| 30 |
+
else:
|
| 31 |
+
raise Exception(f'Unknown type {type(input)}.')
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
def synchronize_stream(output, devices, streams):
|
| 35 |
+
if isinstance(output, list):
|
| 36 |
+
chunk_size = len(output) // len(devices)
|
| 37 |
+
for i in range(len(devices)):
|
| 38 |
+
for j in range(chunk_size):
|
| 39 |
+
synchronize_stream(output[i * chunk_size + j], [devices[i]],
|
| 40 |
+
[streams[i]])
|
| 41 |
+
elif isinstance(output, torch.Tensor):
|
| 42 |
+
if output.numel() != 0:
|
| 43 |
+
with torch.cuda.device(devices[0]):
|
| 44 |
+
main_stream = torch.cuda.current_stream()
|
| 45 |
+
main_stream.wait_stream(streams[0])
|
| 46 |
+
output.record_stream(main_stream)
|
| 47 |
+
else:
|
| 48 |
+
raise Exception(f'Unknown type {type(output)}.')
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
def get_input_device(input):
|
| 52 |
+
if isinstance(input, list):
|
| 53 |
+
for item in input:
|
| 54 |
+
input_device = get_input_device(item)
|
| 55 |
+
if input_device != -1:
|
| 56 |
+
return input_device
|
| 57 |
+
return -1
|
| 58 |
+
elif isinstance(input, torch.Tensor):
|
| 59 |
+
return input.get_device() if input.is_cuda else -1
|
| 60 |
+
else:
|
| 61 |
+
raise Exception(f'Unknown type {type(input)}.')
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
class Scatter:
|
| 65 |
+
|
| 66 |
+
@staticmethod
|
| 67 |
+
def forward(target_gpus, input):
|
| 68 |
+
input_device = get_input_device(input)
|
| 69 |
+
streams = None
|
| 70 |
+
if input_device == -1 and target_gpus != [-1]:
|
| 71 |
+
# Perform CPU to GPU copies in a background stream
|
| 72 |
+
streams = [_get_stream(device) for device in target_gpus]
|
| 73 |
+
|
| 74 |
+
outputs = scatter(input, target_gpus, streams)
|
| 75 |
+
# Synchronize with the copy stream
|
| 76 |
+
if streams is not None:
|
| 77 |
+
synchronize_stream(outputs, target_gpus, streams)
|
| 78 |
+
|
| 79 |
+
return tuple(outputs)
|
RAVE-main/annotator/mmpkg/mmcv/parallel/collate.py
ADDED
|
@@ -0,0 +1,84 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
from collections.abc import Mapping, Sequence
|
| 3 |
+
|
| 4 |
+
import torch
|
| 5 |
+
import torch.nn.functional as F
|
| 6 |
+
from torch.utils.data.dataloader import default_collate
|
| 7 |
+
|
| 8 |
+
from .data_container import DataContainer
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
def collate(batch, samples_per_gpu=1):
|
| 12 |
+
"""Puts each data field into a tensor/DataContainer with outer dimension
|
| 13 |
+
batch size.
|
| 14 |
+
|
| 15 |
+
Extend default_collate to add support for
|
| 16 |
+
:type:`~mmcv.parallel.DataContainer`. There are 3 cases.
|
| 17 |
+
|
| 18 |
+
1. cpu_only = True, e.g., meta data
|
| 19 |
+
2. cpu_only = False, stack = True, e.g., images tensors
|
| 20 |
+
3. cpu_only = False, stack = False, e.g., gt bboxes
|
| 21 |
+
"""
|
| 22 |
+
|
| 23 |
+
if not isinstance(batch, Sequence):
|
| 24 |
+
raise TypeError(f'{batch.dtype} is not supported.')
|
| 25 |
+
|
| 26 |
+
if isinstance(batch[0], DataContainer):
|
| 27 |
+
stacked = []
|
| 28 |
+
if batch[0].cpu_only:
|
| 29 |
+
for i in range(0, len(batch), samples_per_gpu):
|
| 30 |
+
stacked.append(
|
| 31 |
+
[sample.data for sample in batch[i:i + samples_per_gpu]])
|
| 32 |
+
return DataContainer(
|
| 33 |
+
stacked, batch[0].stack, batch[0].padding_value, cpu_only=True)
|
| 34 |
+
elif batch[0].stack:
|
| 35 |
+
for i in range(0, len(batch), samples_per_gpu):
|
| 36 |
+
assert isinstance(batch[i].data, torch.Tensor)
|
| 37 |
+
|
| 38 |
+
if batch[i].pad_dims is not None:
|
| 39 |
+
ndim = batch[i].dim()
|
| 40 |
+
assert ndim > batch[i].pad_dims
|
| 41 |
+
max_shape = [0 for _ in range(batch[i].pad_dims)]
|
| 42 |
+
for dim in range(1, batch[i].pad_dims + 1):
|
| 43 |
+
max_shape[dim - 1] = batch[i].size(-dim)
|
| 44 |
+
for sample in batch[i:i + samples_per_gpu]:
|
| 45 |
+
for dim in range(0, ndim - batch[i].pad_dims):
|
| 46 |
+
assert batch[i].size(dim) == sample.size(dim)
|
| 47 |
+
for dim in range(1, batch[i].pad_dims + 1):
|
| 48 |
+
max_shape[dim - 1] = max(max_shape[dim - 1],
|
| 49 |
+
sample.size(-dim))
|
| 50 |
+
padded_samples = []
|
| 51 |
+
for sample in batch[i:i + samples_per_gpu]:
|
| 52 |
+
pad = [0 for _ in range(batch[i].pad_dims * 2)]
|
| 53 |
+
for dim in range(1, batch[i].pad_dims + 1):
|
| 54 |
+
pad[2 * dim -
|
| 55 |
+
1] = max_shape[dim - 1] - sample.size(-dim)
|
| 56 |
+
padded_samples.append(
|
| 57 |
+
F.pad(
|
| 58 |
+
sample.data, pad, value=sample.padding_value))
|
| 59 |
+
stacked.append(default_collate(padded_samples))
|
| 60 |
+
elif batch[i].pad_dims is None:
|
| 61 |
+
stacked.append(
|
| 62 |
+
default_collate([
|
| 63 |
+
sample.data
|
| 64 |
+
for sample in batch[i:i + samples_per_gpu]
|
| 65 |
+
]))
|
| 66 |
+
else:
|
| 67 |
+
raise ValueError(
|
| 68 |
+
'pad_dims should be either None or integers (1-3)')
|
| 69 |
+
|
| 70 |
+
else:
|
| 71 |
+
for i in range(0, len(batch), samples_per_gpu):
|
| 72 |
+
stacked.append(
|
| 73 |
+
[sample.data for sample in batch[i:i + samples_per_gpu]])
|
| 74 |
+
return DataContainer(stacked, batch[0].stack, batch[0].padding_value)
|
| 75 |
+
elif isinstance(batch[0], Sequence):
|
| 76 |
+
transposed = zip(*batch)
|
| 77 |
+
return [collate(samples, samples_per_gpu) for samples in transposed]
|
| 78 |
+
elif isinstance(batch[0], Mapping):
|
| 79 |
+
return {
|
| 80 |
+
key: collate([d[key] for d in batch], samples_per_gpu)
|
| 81 |
+
for key in batch[0]
|
| 82 |
+
}
|
| 83 |
+
else:
|
| 84 |
+
return default_collate(batch)
|
RAVE-main/annotator/mmpkg/mmcv/parallel/data_parallel.py
ADDED
|
@@ -0,0 +1,89 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
from itertools import chain
|
| 3 |
+
|
| 4 |
+
from torch.nn.parallel import DataParallel
|
| 5 |
+
|
| 6 |
+
from .scatter_gather import scatter_kwargs
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
class MMDataParallel(DataParallel):
|
| 10 |
+
"""The DataParallel module that supports DataContainer.
|
| 11 |
+
|
| 12 |
+
MMDataParallel has two main differences with PyTorch DataParallel:
|
| 13 |
+
|
| 14 |
+
- It supports a custom type :class:`DataContainer` which allows more
|
| 15 |
+
flexible control of input data during both GPU and CPU inference.
|
| 16 |
+
- It implement two more APIs ``train_step()`` and ``val_step()``.
|
| 17 |
+
|
| 18 |
+
Args:
|
| 19 |
+
module (:class:`nn.Module`): Module to be encapsulated.
|
| 20 |
+
device_ids (list[int]): Device IDS of modules to be scattered to.
|
| 21 |
+
Defaults to None when GPU is not available.
|
| 22 |
+
output_device (str | int): Device ID for output. Defaults to None.
|
| 23 |
+
dim (int): Dimension used to scatter the data. Defaults to 0.
|
| 24 |
+
"""
|
| 25 |
+
|
| 26 |
+
def __init__(self, *args, dim=0, **kwargs):
|
| 27 |
+
super(MMDataParallel, self).__init__(*args, dim=dim, **kwargs)
|
| 28 |
+
self.dim = dim
|
| 29 |
+
|
| 30 |
+
def forward(self, *inputs, **kwargs):
|
| 31 |
+
"""Override the original forward function.
|
| 32 |
+
|
| 33 |
+
The main difference lies in the CPU inference where the data in
|
| 34 |
+
:class:`DataContainers` will still be gathered.
|
| 35 |
+
"""
|
| 36 |
+
if not self.device_ids:
|
| 37 |
+
# We add the following line thus the module could gather and
|
| 38 |
+
# convert data containers as those in GPU inference
|
| 39 |
+
inputs, kwargs = self.scatter(inputs, kwargs, [-1])
|
| 40 |
+
return self.module(*inputs[0], **kwargs[0])
|
| 41 |
+
else:
|
| 42 |
+
return super().forward(*inputs, **kwargs)
|
| 43 |
+
|
| 44 |
+
def scatter(self, inputs, kwargs, device_ids):
|
| 45 |
+
return scatter_kwargs(inputs, kwargs, device_ids, dim=self.dim)
|
| 46 |
+
|
| 47 |
+
def train_step(self, *inputs, **kwargs):
|
| 48 |
+
if not self.device_ids:
|
| 49 |
+
# We add the following line thus the module could gather and
|
| 50 |
+
# convert data containers as those in GPU inference
|
| 51 |
+
inputs, kwargs = self.scatter(inputs, kwargs, [-1])
|
| 52 |
+
return self.module.train_step(*inputs[0], **kwargs[0])
|
| 53 |
+
|
| 54 |
+
assert len(self.device_ids) == 1, \
|
| 55 |
+
('MMDataParallel only supports single GPU training, if you need to'
|
| 56 |
+
' train with multiple GPUs, please use MMDistributedDataParallel'
|
| 57 |
+
'instead.')
|
| 58 |
+
|
| 59 |
+
for t in chain(self.module.parameters(), self.module.buffers()):
|
| 60 |
+
if t.device != self.src_device_obj:
|
| 61 |
+
raise RuntimeError(
|
| 62 |
+
'module must have its parameters and buffers '
|
| 63 |
+
f'on device {self.src_device_obj} (device_ids[0]) but '
|
| 64 |
+
f'found one of them on device: {t.device}')
|
| 65 |
+
|
| 66 |
+
inputs, kwargs = self.scatter(inputs, kwargs, self.device_ids)
|
| 67 |
+
return self.module.train_step(*inputs[0], **kwargs[0])
|
| 68 |
+
|
| 69 |
+
def val_step(self, *inputs, **kwargs):
|
| 70 |
+
if not self.device_ids:
|
| 71 |
+
# We add the following line thus the module could gather and
|
| 72 |
+
# convert data containers as those in GPU inference
|
| 73 |
+
inputs, kwargs = self.scatter(inputs, kwargs, [-1])
|
| 74 |
+
return self.module.val_step(*inputs[0], **kwargs[0])
|
| 75 |
+
|
| 76 |
+
assert len(self.device_ids) == 1, \
|
| 77 |
+
('MMDataParallel only supports single GPU training, if you need to'
|
| 78 |
+
' train with multiple GPUs, please use MMDistributedDataParallel'
|
| 79 |
+
' instead.')
|
| 80 |
+
|
| 81 |
+
for t in chain(self.module.parameters(), self.module.buffers()):
|
| 82 |
+
if t.device != self.src_device_obj:
|
| 83 |
+
raise RuntimeError(
|
| 84 |
+
'module must have its parameters and buffers '
|
| 85 |
+
f'on device {self.src_device_obj} (device_ids[0]) but '
|
| 86 |
+
f'found one of them on device: {t.device}')
|
| 87 |
+
|
| 88 |
+
inputs, kwargs = self.scatter(inputs, kwargs, self.device_ids)
|
| 89 |
+
return self.module.val_step(*inputs[0], **kwargs[0])
|