Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- RAVE-main/annotator/mmpkg/mmcv/cnn/bricks/conv_ws.py +148 -0
- RAVE-main/annotator/mmpkg/mmcv/cnn/utils/__init__.py +19 -0
- RAVE-main/annotator/mmpkg/mmcv/cnn/utils/flops_counter.py +599 -0
- RAVE-main/annotator/mmpkg/mmcv/cnn/utils/fuse_conv_bn.py +59 -0
- RAVE-main/annotator/mmpkg/mmcv/cnn/utils/sync_bn.py +59 -0
- RAVE-main/annotator/mmpkg/mmcv/cnn/utils/weight_init.py +684 -0
- RAVE-main/annotator/mmpkg/mmcv/runner/base_runner.py +542 -0
- RAVE-main/annotator/mmpkg/mmcv/runner/default_constructor.py +44 -0
- RAVE-main/annotator/mmpkg/mmcv/runner/hooks/__init__.py +29 -0
- RAVE-main/annotator/mmpkg/mmcv/runner/hooks/checkpoint.py +167 -0
- RAVE-main/annotator/mmpkg/mmcv/runner/hooks/closure.py +11 -0
- RAVE-main/annotator/mmpkg/mmcv/runner/hooks/evaluation.py +509 -0
- RAVE-main/annotator/mmpkg/mmcv/runner/hooks/hook.py +92 -0
- RAVE-main/annotator/mmpkg/mmcv/runner/hooks/logger/__init__.py +15 -0
- RAVE-main/annotator/mmpkg/mmcv/runner/hooks/logger/base.py +166 -0
- RAVE-main/annotator/mmpkg/mmcv/runner/hooks/logger/dvclive.py +58 -0
- RAVE-main/annotator/mmpkg/mmcv/runner/hooks/logger/mlflow.py +78 -0
- RAVE-main/annotator/mmpkg/mmcv/runner/hooks/logger/neptune.py +82 -0
- RAVE-main/annotator/mmpkg/mmcv/runner/hooks/logger/pavi.py +117 -0
- RAVE-main/annotator/mmpkg/mmcv/runner/hooks/logger/tensorboard.py +57 -0
- RAVE-main/annotator/mmpkg/mmcv/runner/hooks/logger/text.py +256 -0
- RAVE-main/annotator/mmpkg/mmcv/runner/hooks/logger/wandb.py +56 -0
- RAVE-main/annotator/mmpkg/mmcv/runner/hooks/momentum_updater.py +493 -0
- RAVE-main/annotator/mmpkg/mmcv/runner/hooks/sampler_seed.py +20 -0
- RAVE-main/annotator/mmpkg/mmcv/runner/hooks/sync_buffer.py +22 -0
- RAVE-main/annotator/mmpkg/mmseg/models/__init__.py +12 -0
- RAVE-main/annotator/mmpkg/mmseg/models/backbones/fast_scnn.py +375 -0
- RAVE-main/annotator/mmpkg/mmseg/models/backbones/mobilenet_v3.py +255 -0
- RAVE-main/annotator/mmpkg/mmseg/models/backbones/resnest.py +314 -0
- RAVE-main/annotator/mmpkg/mmseg/models/backbones/resnet.py +688 -0
- RAVE-main/annotator/mmpkg/mmseg/models/backbones/resnext.py +145 -0
- RAVE-main/annotator/mmpkg/mmseg/models/builder.py +46 -0
- RAVE-main/annotator/mmpkg/mmseg/models/decode_heads/__init__.py +28 -0
- RAVE-main/annotator/mmpkg/mmseg/models/decode_heads/ann_head.py +245 -0
- RAVE-main/annotator/mmpkg/mmseg/models/decode_heads/apc_head.py +158 -0
- RAVE-main/annotator/mmpkg/mmseg/models/decode_heads/aspp_head.py +107 -0
- RAVE-main/annotator/mmpkg/mmseg/models/decode_heads/cascade_decode_head.py +57 -0
- RAVE-main/annotator/mmpkg/mmseg/models/decode_heads/cc_head.py +45 -0
- RAVE-main/annotator/mmpkg/mmseg/models/decode_heads/da_head.py +178 -0
- RAVE-main/annotator/mmpkg/mmseg/models/decode_heads/decode_head.py +234 -0
- RAVE-main/annotator/mmpkg/mmseg/models/decode_heads/dm_head.py +140 -0
- RAVE-main/annotator/mmpkg/mmseg/models/decode_heads/dnl_head.py +131 -0
- RAVE-main/annotator/mmpkg/mmseg/models/decode_heads/ema_head.py +168 -0
- RAVE-main/annotator/mmpkg/mmseg/models/decode_heads/enc_head.py +187 -0
- RAVE-main/annotator/mmpkg/mmseg/models/decode_heads/fcn_head.py +81 -0
- RAVE-main/annotator/mmpkg/mmseg/models/decode_heads/fpn_head.py +68 -0
- RAVE-main/annotator/mmpkg/mmseg/models/decode_heads/gc_head.py +47 -0
- RAVE-main/annotator/mmpkg/mmseg/models/decode_heads/lraspp_head.py +90 -0
- RAVE-main/annotator/mmpkg/mmseg/models/decode_heads/nl_head.py +49 -0
- RAVE-main/annotator/mmpkg/mmseg/models/decode_heads/ocr_head.py +127 -0
RAVE-main/annotator/mmpkg/mmcv/cnn/bricks/conv_ws.py
ADDED
|
@@ -0,0 +1,148 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
import torch
|
| 3 |
+
import torch.nn as nn
|
| 4 |
+
import torch.nn.functional as F
|
| 5 |
+
|
| 6 |
+
from .registry import CONV_LAYERS
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
def conv_ws_2d(input,
|
| 10 |
+
weight,
|
| 11 |
+
bias=None,
|
| 12 |
+
stride=1,
|
| 13 |
+
padding=0,
|
| 14 |
+
dilation=1,
|
| 15 |
+
groups=1,
|
| 16 |
+
eps=1e-5):
|
| 17 |
+
c_in = weight.size(0)
|
| 18 |
+
weight_flat = weight.view(c_in, -1)
|
| 19 |
+
mean = weight_flat.mean(dim=1, keepdim=True).view(c_in, 1, 1, 1)
|
| 20 |
+
std = weight_flat.std(dim=1, keepdim=True).view(c_in, 1, 1, 1)
|
| 21 |
+
weight = (weight - mean) / (std + eps)
|
| 22 |
+
return F.conv2d(input, weight, bias, stride, padding, dilation, groups)
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
@CONV_LAYERS.register_module('ConvWS')
|
| 26 |
+
class ConvWS2d(nn.Conv2d):
|
| 27 |
+
|
| 28 |
+
def __init__(self,
|
| 29 |
+
in_channels,
|
| 30 |
+
out_channels,
|
| 31 |
+
kernel_size,
|
| 32 |
+
stride=1,
|
| 33 |
+
padding=0,
|
| 34 |
+
dilation=1,
|
| 35 |
+
groups=1,
|
| 36 |
+
bias=True,
|
| 37 |
+
eps=1e-5):
|
| 38 |
+
super(ConvWS2d, self).__init__(
|
| 39 |
+
in_channels,
|
| 40 |
+
out_channels,
|
| 41 |
+
kernel_size,
|
| 42 |
+
stride=stride,
|
| 43 |
+
padding=padding,
|
| 44 |
+
dilation=dilation,
|
| 45 |
+
groups=groups,
|
| 46 |
+
bias=bias)
|
| 47 |
+
self.eps = eps
|
| 48 |
+
|
| 49 |
+
def forward(self, x):
|
| 50 |
+
return conv_ws_2d(x, self.weight, self.bias, self.stride, self.padding,
|
| 51 |
+
self.dilation, self.groups, self.eps)
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
@CONV_LAYERS.register_module(name='ConvAWS')
|
| 55 |
+
class ConvAWS2d(nn.Conv2d):
|
| 56 |
+
"""AWS (Adaptive Weight Standardization)
|
| 57 |
+
|
| 58 |
+
This is a variant of Weight Standardization
|
| 59 |
+
(https://arxiv.org/pdf/1903.10520.pdf)
|
| 60 |
+
It is used in DetectoRS to avoid NaN
|
| 61 |
+
(https://arxiv.org/pdf/2006.02334.pdf)
|
| 62 |
+
|
| 63 |
+
Args:
|
| 64 |
+
in_channels (int): Number of channels in the input image
|
| 65 |
+
out_channels (int): Number of channels produced by the convolution
|
| 66 |
+
kernel_size (int or tuple): Size of the conv kernel
|
| 67 |
+
stride (int or tuple, optional): Stride of the convolution. Default: 1
|
| 68 |
+
padding (int or tuple, optional): Zero-padding added to both sides of
|
| 69 |
+
the input. Default: 0
|
| 70 |
+
dilation (int or tuple, optional): Spacing between kernel elements.
|
| 71 |
+
Default: 1
|
| 72 |
+
groups (int, optional): Number of blocked connections from input
|
| 73 |
+
channels to output channels. Default: 1
|
| 74 |
+
bias (bool, optional): If set True, adds a learnable bias to the
|
| 75 |
+
output. Default: True
|
| 76 |
+
"""
|
| 77 |
+
|
| 78 |
+
def __init__(self,
|
| 79 |
+
in_channels,
|
| 80 |
+
out_channels,
|
| 81 |
+
kernel_size,
|
| 82 |
+
stride=1,
|
| 83 |
+
padding=0,
|
| 84 |
+
dilation=1,
|
| 85 |
+
groups=1,
|
| 86 |
+
bias=True):
|
| 87 |
+
super().__init__(
|
| 88 |
+
in_channels,
|
| 89 |
+
out_channels,
|
| 90 |
+
kernel_size,
|
| 91 |
+
stride=stride,
|
| 92 |
+
padding=padding,
|
| 93 |
+
dilation=dilation,
|
| 94 |
+
groups=groups,
|
| 95 |
+
bias=bias)
|
| 96 |
+
self.register_buffer('weight_gamma',
|
| 97 |
+
torch.ones(self.out_channels, 1, 1, 1))
|
| 98 |
+
self.register_buffer('weight_beta',
|
| 99 |
+
torch.zeros(self.out_channels, 1, 1, 1))
|
| 100 |
+
|
| 101 |
+
def _get_weight(self, weight):
|
| 102 |
+
weight_flat = weight.view(weight.size(0), -1)
|
| 103 |
+
mean = weight_flat.mean(dim=1).view(-1, 1, 1, 1)
|
| 104 |
+
std = torch.sqrt(weight_flat.var(dim=1) + 1e-5).view(-1, 1, 1, 1)
|
| 105 |
+
weight = (weight - mean) / std
|
| 106 |
+
weight = self.weight_gamma * weight + self.weight_beta
|
| 107 |
+
return weight
|
| 108 |
+
|
| 109 |
+
def forward(self, x):
|
| 110 |
+
weight = self._get_weight(self.weight)
|
| 111 |
+
return F.conv2d(x, weight, self.bias, self.stride, self.padding,
|
| 112 |
+
self.dilation, self.groups)
|
| 113 |
+
|
| 114 |
+
def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict,
|
| 115 |
+
missing_keys, unexpected_keys, error_msgs):
|
| 116 |
+
"""Override default load function.
|
| 117 |
+
|
| 118 |
+
AWS overrides the function _load_from_state_dict to recover
|
| 119 |
+
weight_gamma and weight_beta if they are missing. If weight_gamma and
|
| 120 |
+
weight_beta are found in the checkpoint, this function will return
|
| 121 |
+
after super()._load_from_state_dict. Otherwise, it will compute the
|
| 122 |
+
mean and std of the pretrained weights and store them in weight_beta
|
| 123 |
+
and weight_gamma.
|
| 124 |
+
"""
|
| 125 |
+
|
| 126 |
+
self.weight_gamma.data.fill_(-1)
|
| 127 |
+
local_missing_keys = []
|
| 128 |
+
super()._load_from_state_dict(state_dict, prefix, local_metadata,
|
| 129 |
+
strict, local_missing_keys,
|
| 130 |
+
unexpected_keys, error_msgs)
|
| 131 |
+
if self.weight_gamma.data.mean() > 0:
|
| 132 |
+
for k in local_missing_keys:
|
| 133 |
+
missing_keys.append(k)
|
| 134 |
+
return
|
| 135 |
+
weight = self.weight.data
|
| 136 |
+
weight_flat = weight.view(weight.size(0), -1)
|
| 137 |
+
mean = weight_flat.mean(dim=1).view(-1, 1, 1, 1)
|
| 138 |
+
std = torch.sqrt(weight_flat.var(dim=1) + 1e-5).view(-1, 1, 1, 1)
|
| 139 |
+
self.weight_beta.data.copy_(mean)
|
| 140 |
+
self.weight_gamma.data.copy_(std)
|
| 141 |
+
missing_gamma_beta = [
|
| 142 |
+
k for k in local_missing_keys
|
| 143 |
+
if k.endswith('weight_gamma') or k.endswith('weight_beta')
|
| 144 |
+
]
|
| 145 |
+
for k in missing_gamma_beta:
|
| 146 |
+
local_missing_keys.remove(k)
|
| 147 |
+
for k in local_missing_keys:
|
| 148 |
+
missing_keys.append(k)
|
RAVE-main/annotator/mmpkg/mmcv/cnn/utils/__init__.py
ADDED
|
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
from .flops_counter import get_model_complexity_info
|
| 3 |
+
from .fuse_conv_bn import fuse_conv_bn
|
| 4 |
+
from .sync_bn import revert_sync_batchnorm
|
| 5 |
+
from .weight_init import (INITIALIZERS, Caffe2XavierInit, ConstantInit,
|
| 6 |
+
KaimingInit, NormalInit, PretrainedInit,
|
| 7 |
+
TruncNormalInit, UniformInit, XavierInit,
|
| 8 |
+
bias_init_with_prob, caffe2_xavier_init,
|
| 9 |
+
constant_init, initialize, kaiming_init, normal_init,
|
| 10 |
+
trunc_normal_init, uniform_init, xavier_init)
|
| 11 |
+
|
| 12 |
+
__all__ = [
|
| 13 |
+
'get_model_complexity_info', 'bias_init_with_prob', 'caffe2_xavier_init',
|
| 14 |
+
'constant_init', 'kaiming_init', 'normal_init', 'trunc_normal_init',
|
| 15 |
+
'uniform_init', 'xavier_init', 'fuse_conv_bn', 'initialize',
|
| 16 |
+
'INITIALIZERS', 'ConstantInit', 'XavierInit', 'NormalInit',
|
| 17 |
+
'TruncNormalInit', 'UniformInit', 'KaimingInit', 'PretrainedInit',
|
| 18 |
+
'Caffe2XavierInit', 'revert_sync_batchnorm'
|
| 19 |
+
]
|
RAVE-main/annotator/mmpkg/mmcv/cnn/utils/flops_counter.py
ADDED
|
@@ -0,0 +1,599 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Modified from flops-counter.pytorch by Vladislav Sovrasov
|
| 2 |
+
# original repo: https://github.com/sovrasov/flops-counter.pytorch
|
| 3 |
+
|
| 4 |
+
# MIT License
|
| 5 |
+
|
| 6 |
+
# Copyright (c) 2018 Vladislav Sovrasov
|
| 7 |
+
|
| 8 |
+
# Permission is hereby granted, free of charge, to any person obtaining a copy
|
| 9 |
+
# of this software and associated documentation files (the "Software"), to deal
|
| 10 |
+
# in the Software without restriction, including without limitation the rights
|
| 11 |
+
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
| 12 |
+
# copies of the Software, and to permit persons to whom the Software is
|
| 13 |
+
# furnished to do so, subject to the following conditions:
|
| 14 |
+
|
| 15 |
+
# The above copyright notice and this permission notice shall be included in
|
| 16 |
+
# all copies or substantial portions of the Software.
|
| 17 |
+
|
| 18 |
+
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
| 19 |
+
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
| 20 |
+
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
| 21 |
+
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
| 22 |
+
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
| 23 |
+
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
| 24 |
+
# SOFTWARE.
|
| 25 |
+
|
| 26 |
+
import sys
|
| 27 |
+
from functools import partial
|
| 28 |
+
|
| 29 |
+
import numpy as np
|
| 30 |
+
import torch
|
| 31 |
+
import torch.nn as nn
|
| 32 |
+
|
| 33 |
+
import annotator.mmpkg.mmcv as mmcv
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
def get_model_complexity_info(model,
|
| 37 |
+
input_shape,
|
| 38 |
+
print_per_layer_stat=True,
|
| 39 |
+
as_strings=True,
|
| 40 |
+
input_constructor=None,
|
| 41 |
+
flush=False,
|
| 42 |
+
ost=sys.stdout):
|
| 43 |
+
"""Get complexity information of a model.
|
| 44 |
+
|
| 45 |
+
This method can calculate FLOPs and parameter counts of a model with
|
| 46 |
+
corresponding input shape. It can also print complexity information for
|
| 47 |
+
each layer in a model.
|
| 48 |
+
|
| 49 |
+
Supported layers are listed as below:
|
| 50 |
+
- Convolutions: ``nn.Conv1d``, ``nn.Conv2d``, ``nn.Conv3d``.
|
| 51 |
+
- Activations: ``nn.ReLU``, ``nn.PReLU``, ``nn.ELU``, ``nn.LeakyReLU``,
|
| 52 |
+
``nn.ReLU6``.
|
| 53 |
+
- Poolings: ``nn.MaxPool1d``, ``nn.MaxPool2d``, ``nn.MaxPool3d``,
|
| 54 |
+
``nn.AvgPool1d``, ``nn.AvgPool2d``, ``nn.AvgPool3d``,
|
| 55 |
+
``nn.AdaptiveMaxPool1d``, ``nn.AdaptiveMaxPool2d``,
|
| 56 |
+
``nn.AdaptiveMaxPool3d``, ``nn.AdaptiveAvgPool1d``,
|
| 57 |
+
``nn.AdaptiveAvgPool2d``, ``nn.AdaptiveAvgPool3d``.
|
| 58 |
+
- BatchNorms: ``nn.BatchNorm1d``, ``nn.BatchNorm2d``,
|
| 59 |
+
``nn.BatchNorm3d``, ``nn.GroupNorm``, ``nn.InstanceNorm1d``,
|
| 60 |
+
``InstanceNorm2d``, ``InstanceNorm3d``, ``nn.LayerNorm``.
|
| 61 |
+
- Linear: ``nn.Linear``.
|
| 62 |
+
- Deconvolution: ``nn.ConvTranspose2d``.
|
| 63 |
+
- Upsample: ``nn.Upsample``.
|
| 64 |
+
|
| 65 |
+
Args:
|
| 66 |
+
model (nn.Module): The model for complexity calculation.
|
| 67 |
+
input_shape (tuple): Input shape used for calculation.
|
| 68 |
+
print_per_layer_stat (bool): Whether to print complexity information
|
| 69 |
+
for each layer in a model. Default: True.
|
| 70 |
+
as_strings (bool): Output FLOPs and params counts in a string form.
|
| 71 |
+
Default: True.
|
| 72 |
+
input_constructor (None | callable): If specified, it takes a callable
|
| 73 |
+
method that generates input. otherwise, it will generate a random
|
| 74 |
+
tensor with input shape to calculate FLOPs. Default: None.
|
| 75 |
+
flush (bool): same as that in :func:`print`. Default: False.
|
| 76 |
+
ost (stream): same as ``file`` param in :func:`print`.
|
| 77 |
+
Default: sys.stdout.
|
| 78 |
+
|
| 79 |
+
Returns:
|
| 80 |
+
tuple[float | str]: If ``as_strings`` is set to True, it will return
|
| 81 |
+
FLOPs and parameter counts in a string format. otherwise, it will
|
| 82 |
+
return those in a float number format.
|
| 83 |
+
"""
|
| 84 |
+
assert type(input_shape) is tuple
|
| 85 |
+
assert len(input_shape) >= 1
|
| 86 |
+
assert isinstance(model, nn.Module)
|
| 87 |
+
flops_model = add_flops_counting_methods(model)
|
| 88 |
+
flops_model.eval()
|
| 89 |
+
flops_model.start_flops_count()
|
| 90 |
+
if input_constructor:
|
| 91 |
+
input = input_constructor(input_shape)
|
| 92 |
+
_ = flops_model(**input)
|
| 93 |
+
else:
|
| 94 |
+
try:
|
| 95 |
+
batch = torch.ones(()).new_empty(
|
| 96 |
+
(1, *input_shape),
|
| 97 |
+
dtype=next(flops_model.parameters()).dtype,
|
| 98 |
+
device=next(flops_model.parameters()).device)
|
| 99 |
+
except StopIteration:
|
| 100 |
+
# Avoid StopIteration for models which have no parameters,
|
| 101 |
+
# like `nn.Relu()`, `nn.AvgPool2d`, etc.
|
| 102 |
+
batch = torch.ones(()).new_empty((1, *input_shape))
|
| 103 |
+
|
| 104 |
+
_ = flops_model(batch)
|
| 105 |
+
|
| 106 |
+
flops_count, params_count = flops_model.compute_average_flops_cost()
|
| 107 |
+
if print_per_layer_stat:
|
| 108 |
+
print_model_with_flops(
|
| 109 |
+
flops_model, flops_count, params_count, ost=ost, flush=flush)
|
| 110 |
+
flops_model.stop_flops_count()
|
| 111 |
+
|
| 112 |
+
if as_strings:
|
| 113 |
+
return flops_to_string(flops_count), params_to_string(params_count)
|
| 114 |
+
|
| 115 |
+
return flops_count, params_count
|
| 116 |
+
|
| 117 |
+
|
| 118 |
+
def flops_to_string(flops, units='GFLOPs', precision=2):
|
| 119 |
+
"""Convert FLOPs number into a string.
|
| 120 |
+
|
| 121 |
+
Note that Here we take a multiply-add counts as one FLOP.
|
| 122 |
+
|
| 123 |
+
Args:
|
| 124 |
+
flops (float): FLOPs number to be converted.
|
| 125 |
+
units (str | None): Converted FLOPs units. Options are None, 'GFLOPs',
|
| 126 |
+
'MFLOPs', 'KFLOPs', 'FLOPs'. If set to None, it will automatically
|
| 127 |
+
choose the most suitable unit for FLOPs. Default: 'GFLOPs'.
|
| 128 |
+
precision (int): Digit number after the decimal point. Default: 2.
|
| 129 |
+
|
| 130 |
+
Returns:
|
| 131 |
+
str: The converted FLOPs number with units.
|
| 132 |
+
|
| 133 |
+
Examples:
|
| 134 |
+
>>> flops_to_string(1e9)
|
| 135 |
+
'1.0 GFLOPs'
|
| 136 |
+
>>> flops_to_string(2e5, 'MFLOPs')
|
| 137 |
+
'0.2 MFLOPs'
|
| 138 |
+
>>> flops_to_string(3e-9, None)
|
| 139 |
+
'3e-09 FLOPs'
|
| 140 |
+
"""
|
| 141 |
+
if units is None:
|
| 142 |
+
if flops // 10**9 > 0:
|
| 143 |
+
return str(round(flops / 10.**9, precision)) + ' GFLOPs'
|
| 144 |
+
elif flops // 10**6 > 0:
|
| 145 |
+
return str(round(flops / 10.**6, precision)) + ' MFLOPs'
|
| 146 |
+
elif flops // 10**3 > 0:
|
| 147 |
+
return str(round(flops / 10.**3, precision)) + ' KFLOPs'
|
| 148 |
+
else:
|
| 149 |
+
return str(flops) + ' FLOPs'
|
| 150 |
+
else:
|
| 151 |
+
if units == 'GFLOPs':
|
| 152 |
+
return str(round(flops / 10.**9, precision)) + ' ' + units
|
| 153 |
+
elif units == 'MFLOPs':
|
| 154 |
+
return str(round(flops / 10.**6, precision)) + ' ' + units
|
| 155 |
+
elif units == 'KFLOPs':
|
| 156 |
+
return str(round(flops / 10.**3, precision)) + ' ' + units
|
| 157 |
+
else:
|
| 158 |
+
return str(flops) + ' FLOPs'
|
| 159 |
+
|
| 160 |
+
|
| 161 |
+
def params_to_string(num_params, units=None, precision=2):
|
| 162 |
+
"""Convert parameter number into a string.
|
| 163 |
+
|
| 164 |
+
Args:
|
| 165 |
+
num_params (float): Parameter number to be converted.
|
| 166 |
+
units (str | None): Converted FLOPs units. Options are None, 'M',
|
| 167 |
+
'K' and ''. If set to None, it will automatically choose the most
|
| 168 |
+
suitable unit for Parameter number. Default: None.
|
| 169 |
+
precision (int): Digit number after the decimal point. Default: 2.
|
| 170 |
+
|
| 171 |
+
Returns:
|
| 172 |
+
str: The converted parameter number with units.
|
| 173 |
+
|
| 174 |
+
Examples:
|
| 175 |
+
>>> params_to_string(1e9)
|
| 176 |
+
'1000.0 M'
|
| 177 |
+
>>> params_to_string(2e5)
|
| 178 |
+
'200.0 k'
|
| 179 |
+
>>> params_to_string(3e-9)
|
| 180 |
+
'3e-09'
|
| 181 |
+
"""
|
| 182 |
+
if units is None:
|
| 183 |
+
if num_params // 10**6 > 0:
|
| 184 |
+
return str(round(num_params / 10**6, precision)) + ' M'
|
| 185 |
+
elif num_params // 10**3:
|
| 186 |
+
return str(round(num_params / 10**3, precision)) + ' k'
|
| 187 |
+
else:
|
| 188 |
+
return str(num_params)
|
| 189 |
+
else:
|
| 190 |
+
if units == 'M':
|
| 191 |
+
return str(round(num_params / 10.**6, precision)) + ' ' + units
|
| 192 |
+
elif units == 'K':
|
| 193 |
+
return str(round(num_params / 10.**3, precision)) + ' ' + units
|
| 194 |
+
else:
|
| 195 |
+
return str(num_params)
|
| 196 |
+
|
| 197 |
+
|
| 198 |
+
def print_model_with_flops(model,
|
| 199 |
+
total_flops,
|
| 200 |
+
total_params,
|
| 201 |
+
units='GFLOPs',
|
| 202 |
+
precision=3,
|
| 203 |
+
ost=sys.stdout,
|
| 204 |
+
flush=False):
|
| 205 |
+
"""Print a model with FLOPs for each layer.
|
| 206 |
+
|
| 207 |
+
Args:
|
| 208 |
+
model (nn.Module): The model to be printed.
|
| 209 |
+
total_flops (float): Total FLOPs of the model.
|
| 210 |
+
total_params (float): Total parameter counts of the model.
|
| 211 |
+
units (str | None): Converted FLOPs units. Default: 'GFLOPs'.
|
| 212 |
+
precision (int): Digit number after the decimal point. Default: 3.
|
| 213 |
+
ost (stream): same as `file` param in :func:`print`.
|
| 214 |
+
Default: sys.stdout.
|
| 215 |
+
flush (bool): same as that in :func:`print`. Default: False.
|
| 216 |
+
|
| 217 |
+
Example:
|
| 218 |
+
>>> class ExampleModel(nn.Module):
|
| 219 |
+
|
| 220 |
+
>>> def __init__(self):
|
| 221 |
+
>>> super().__init__()
|
| 222 |
+
>>> self.conv1 = nn.Conv2d(3, 8, 3)
|
| 223 |
+
>>> self.conv2 = nn.Conv2d(8, 256, 3)
|
| 224 |
+
>>> self.conv3 = nn.Conv2d(256, 8, 3)
|
| 225 |
+
>>> self.avg_pool = nn.AdaptiveAvgPool2d((1, 1))
|
| 226 |
+
>>> self.flatten = nn.Flatten()
|
| 227 |
+
>>> self.fc = nn.Linear(8, 1)
|
| 228 |
+
|
| 229 |
+
>>> def forward(self, x):
|
| 230 |
+
>>> x = self.conv1(x)
|
| 231 |
+
>>> x = self.conv2(x)
|
| 232 |
+
>>> x = self.conv3(x)
|
| 233 |
+
>>> x = self.avg_pool(x)
|
| 234 |
+
>>> x = self.flatten(x)
|
| 235 |
+
>>> x = self.fc(x)
|
| 236 |
+
>>> return x
|
| 237 |
+
|
| 238 |
+
>>> model = ExampleModel()
|
| 239 |
+
>>> x = (3, 16, 16)
|
| 240 |
+
to print the complexity information state for each layer, you can use
|
| 241 |
+
>>> get_model_complexity_info(model, x)
|
| 242 |
+
or directly use
|
| 243 |
+
>>> print_model_with_flops(model, 4579784.0, 37361)
|
| 244 |
+
ExampleModel(
|
| 245 |
+
0.037 M, 100.000% Params, 0.005 GFLOPs, 100.000% FLOPs,
|
| 246 |
+
(conv1): Conv2d(0.0 M, 0.600% Params, 0.0 GFLOPs, 0.959% FLOPs, 3, 8, kernel_size=(3, 3), stride=(1, 1)) # noqa: E501
|
| 247 |
+
(conv2): Conv2d(0.019 M, 50.020% Params, 0.003 GFLOPs, 58.760% FLOPs, 8, 256, kernel_size=(3, 3), stride=(1, 1))
|
| 248 |
+
(conv3): Conv2d(0.018 M, 49.356% Params, 0.002 GFLOPs, 40.264% FLOPs, 256, 8, kernel_size=(3, 3), stride=(1, 1))
|
| 249 |
+
(avg_pool): AdaptiveAvgPool2d(0.0 M, 0.000% Params, 0.0 GFLOPs, 0.017% FLOPs, output_size=(1, 1))
|
| 250 |
+
(flatten): Flatten(0.0 M, 0.000% Params, 0.0 GFLOPs, 0.000% FLOPs, )
|
| 251 |
+
(fc): Linear(0.0 M, 0.024% Params, 0.0 GFLOPs, 0.000% FLOPs, in_features=8, out_features=1, bias=True)
|
| 252 |
+
)
|
| 253 |
+
"""
|
| 254 |
+
|
| 255 |
+
def accumulate_params(self):
|
| 256 |
+
if is_supported_instance(self):
|
| 257 |
+
return self.__params__
|
| 258 |
+
else:
|
| 259 |
+
sum = 0
|
| 260 |
+
for m in self.children():
|
| 261 |
+
sum += m.accumulate_params()
|
| 262 |
+
return sum
|
| 263 |
+
|
| 264 |
+
def accumulate_flops(self):
|
| 265 |
+
if is_supported_instance(self):
|
| 266 |
+
return self.__flops__ / model.__batch_counter__
|
| 267 |
+
else:
|
| 268 |
+
sum = 0
|
| 269 |
+
for m in self.children():
|
| 270 |
+
sum += m.accumulate_flops()
|
| 271 |
+
return sum
|
| 272 |
+
|
| 273 |
+
def flops_repr(self):
|
| 274 |
+
accumulated_num_params = self.accumulate_params()
|
| 275 |
+
accumulated_flops_cost = self.accumulate_flops()
|
| 276 |
+
return ', '.join([
|
| 277 |
+
params_to_string(
|
| 278 |
+
accumulated_num_params, units='M', precision=precision),
|
| 279 |
+
'{:.3%} Params'.format(accumulated_num_params / total_params),
|
| 280 |
+
flops_to_string(
|
| 281 |
+
accumulated_flops_cost, units=units, precision=precision),
|
| 282 |
+
'{:.3%} FLOPs'.format(accumulated_flops_cost / total_flops),
|
| 283 |
+
self.original_extra_repr()
|
| 284 |
+
])
|
| 285 |
+
|
| 286 |
+
def add_extra_repr(m):
|
| 287 |
+
m.accumulate_flops = accumulate_flops.__get__(m)
|
| 288 |
+
m.accumulate_params = accumulate_params.__get__(m)
|
| 289 |
+
flops_extra_repr = flops_repr.__get__(m)
|
| 290 |
+
if m.extra_repr != flops_extra_repr:
|
| 291 |
+
m.original_extra_repr = m.extra_repr
|
| 292 |
+
m.extra_repr = flops_extra_repr
|
| 293 |
+
assert m.extra_repr != m.original_extra_repr
|
| 294 |
+
|
| 295 |
+
def del_extra_repr(m):
|
| 296 |
+
if hasattr(m, 'original_extra_repr'):
|
| 297 |
+
m.extra_repr = m.original_extra_repr
|
| 298 |
+
del m.original_extra_repr
|
| 299 |
+
if hasattr(m, 'accumulate_flops'):
|
| 300 |
+
del m.accumulate_flops
|
| 301 |
+
|
| 302 |
+
model.apply(add_extra_repr)
|
| 303 |
+
print(model, file=ost, flush=flush)
|
| 304 |
+
model.apply(del_extra_repr)
|
| 305 |
+
|
| 306 |
+
|
| 307 |
+
def get_model_parameters_number(model):
|
| 308 |
+
"""Calculate parameter number of a model.
|
| 309 |
+
|
| 310 |
+
Args:
|
| 311 |
+
model (nn.module): The model for parameter number calculation.
|
| 312 |
+
|
| 313 |
+
Returns:
|
| 314 |
+
float: Parameter number of the model.
|
| 315 |
+
"""
|
| 316 |
+
num_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
|
| 317 |
+
return num_params
|
| 318 |
+
|
| 319 |
+
|
| 320 |
+
def add_flops_counting_methods(net_main_module):
|
| 321 |
+
# adding additional methods to the existing module object,
|
| 322 |
+
# this is done this way so that each function has access to self object
|
| 323 |
+
net_main_module.start_flops_count = start_flops_count.__get__(
|
| 324 |
+
net_main_module)
|
| 325 |
+
net_main_module.stop_flops_count = stop_flops_count.__get__(
|
| 326 |
+
net_main_module)
|
| 327 |
+
net_main_module.reset_flops_count = reset_flops_count.__get__(
|
| 328 |
+
net_main_module)
|
| 329 |
+
net_main_module.compute_average_flops_cost = compute_average_flops_cost.__get__( # noqa: E501
|
| 330 |
+
net_main_module)
|
| 331 |
+
|
| 332 |
+
net_main_module.reset_flops_count()
|
| 333 |
+
|
| 334 |
+
return net_main_module
|
| 335 |
+
|
| 336 |
+
|
| 337 |
+
def compute_average_flops_cost(self):
|
| 338 |
+
"""Compute average FLOPs cost.
|
| 339 |
+
|
| 340 |
+
A method to compute average FLOPs cost, which will be available after
|
| 341 |
+
`add_flops_counting_methods()` is called on a desired net object.
|
| 342 |
+
|
| 343 |
+
Returns:
|
| 344 |
+
float: Current mean flops consumption per image.
|
| 345 |
+
"""
|
| 346 |
+
batches_count = self.__batch_counter__
|
| 347 |
+
flops_sum = 0
|
| 348 |
+
for module in self.modules():
|
| 349 |
+
if is_supported_instance(module):
|
| 350 |
+
flops_sum += module.__flops__
|
| 351 |
+
params_sum = get_model_parameters_number(self)
|
| 352 |
+
return flops_sum / batches_count, params_sum
|
| 353 |
+
|
| 354 |
+
|
| 355 |
+
def start_flops_count(self):
|
| 356 |
+
"""Activate the computation of mean flops consumption per image.
|
| 357 |
+
|
| 358 |
+
A method to activate the computation of mean flops consumption per image.
|
| 359 |
+
which will be available after ``add_flops_counting_methods()`` is called on
|
| 360 |
+
a desired net object. It should be called before running the network.
|
| 361 |
+
"""
|
| 362 |
+
add_batch_counter_hook_function(self)
|
| 363 |
+
|
| 364 |
+
def add_flops_counter_hook_function(module):
|
| 365 |
+
if is_supported_instance(module):
|
| 366 |
+
if hasattr(module, '__flops_handle__'):
|
| 367 |
+
return
|
| 368 |
+
|
| 369 |
+
else:
|
| 370 |
+
handle = module.register_forward_hook(
|
| 371 |
+
get_modules_mapping()[type(module)])
|
| 372 |
+
|
| 373 |
+
module.__flops_handle__ = handle
|
| 374 |
+
|
| 375 |
+
self.apply(partial(add_flops_counter_hook_function))
|
| 376 |
+
|
| 377 |
+
|
| 378 |
+
def stop_flops_count(self):
|
| 379 |
+
"""Stop computing the mean flops consumption per image.
|
| 380 |
+
|
| 381 |
+
A method to stop computing the mean flops consumption per image, which will
|
| 382 |
+
be available after ``add_flops_counting_methods()`` is called on a desired
|
| 383 |
+
net object. It can be called to pause the computation whenever.
|
| 384 |
+
"""
|
| 385 |
+
remove_batch_counter_hook_function(self)
|
| 386 |
+
self.apply(remove_flops_counter_hook_function)
|
| 387 |
+
|
| 388 |
+
|
| 389 |
+
def reset_flops_count(self):
|
| 390 |
+
"""Reset statistics computed so far.
|
| 391 |
+
|
| 392 |
+
A method to Reset computed statistics, which will be available after
|
| 393 |
+
`add_flops_counting_methods()` is called on a desired net object.
|
| 394 |
+
"""
|
| 395 |
+
add_batch_counter_variables_or_reset(self)
|
| 396 |
+
self.apply(add_flops_counter_variable_or_reset)
|
| 397 |
+
|
| 398 |
+
|
| 399 |
+
# ---- Internal functions
|
| 400 |
+
def empty_flops_counter_hook(module, input, output):
|
| 401 |
+
module.__flops__ += 0
|
| 402 |
+
|
| 403 |
+
|
| 404 |
+
def upsample_flops_counter_hook(module, input, output):
|
| 405 |
+
output_size = output[0]
|
| 406 |
+
batch_size = output_size.shape[0]
|
| 407 |
+
output_elements_count = batch_size
|
| 408 |
+
for val in output_size.shape[1:]:
|
| 409 |
+
output_elements_count *= val
|
| 410 |
+
module.__flops__ += int(output_elements_count)
|
| 411 |
+
|
| 412 |
+
|
| 413 |
+
def relu_flops_counter_hook(module, input, output):
|
| 414 |
+
active_elements_count = output.numel()
|
| 415 |
+
module.__flops__ += int(active_elements_count)
|
| 416 |
+
|
| 417 |
+
|
| 418 |
+
def linear_flops_counter_hook(module, input, output):
|
| 419 |
+
input = input[0]
|
| 420 |
+
output_last_dim = output.shape[
|
| 421 |
+
-1] # pytorch checks dimensions, so here we don't care much
|
| 422 |
+
module.__flops__ += int(np.prod(input.shape) * output_last_dim)
|
| 423 |
+
|
| 424 |
+
|
| 425 |
+
def pool_flops_counter_hook(module, input, output):
|
| 426 |
+
input = input[0]
|
| 427 |
+
module.__flops__ += int(np.prod(input.shape))
|
| 428 |
+
|
| 429 |
+
|
| 430 |
+
def norm_flops_counter_hook(module, input, output):
|
| 431 |
+
input = input[0]
|
| 432 |
+
|
| 433 |
+
batch_flops = np.prod(input.shape)
|
| 434 |
+
if (getattr(module, 'affine', False)
|
| 435 |
+
or getattr(module, 'elementwise_affine', False)):
|
| 436 |
+
batch_flops *= 2
|
| 437 |
+
module.__flops__ += int(batch_flops)
|
| 438 |
+
|
| 439 |
+
|
| 440 |
+
def deconv_flops_counter_hook(conv_module, input, output):
|
| 441 |
+
# Can have multiple inputs, getting the first one
|
| 442 |
+
input = input[0]
|
| 443 |
+
|
| 444 |
+
batch_size = input.shape[0]
|
| 445 |
+
input_height, input_width = input.shape[2:]
|
| 446 |
+
|
| 447 |
+
kernel_height, kernel_width = conv_module.kernel_size
|
| 448 |
+
in_channels = conv_module.in_channels
|
| 449 |
+
out_channels = conv_module.out_channels
|
| 450 |
+
groups = conv_module.groups
|
| 451 |
+
|
| 452 |
+
filters_per_channel = out_channels // groups
|
| 453 |
+
conv_per_position_flops = (
|
| 454 |
+
kernel_height * kernel_width * in_channels * filters_per_channel)
|
| 455 |
+
|
| 456 |
+
active_elements_count = batch_size * input_height * input_width
|
| 457 |
+
overall_conv_flops = conv_per_position_flops * active_elements_count
|
| 458 |
+
bias_flops = 0
|
| 459 |
+
if conv_module.bias is not None:
|
| 460 |
+
output_height, output_width = output.shape[2:]
|
| 461 |
+
bias_flops = out_channels * batch_size * output_height * output_height
|
| 462 |
+
overall_flops = overall_conv_flops + bias_flops
|
| 463 |
+
|
| 464 |
+
conv_module.__flops__ += int(overall_flops)
|
| 465 |
+
|
| 466 |
+
|
| 467 |
+
def conv_flops_counter_hook(conv_module, input, output):
|
| 468 |
+
# Can have multiple inputs, getting the first one
|
| 469 |
+
input = input[0]
|
| 470 |
+
|
| 471 |
+
batch_size = input.shape[0]
|
| 472 |
+
output_dims = list(output.shape[2:])
|
| 473 |
+
|
| 474 |
+
kernel_dims = list(conv_module.kernel_size)
|
| 475 |
+
in_channels = conv_module.in_channels
|
| 476 |
+
out_channels = conv_module.out_channels
|
| 477 |
+
groups = conv_module.groups
|
| 478 |
+
|
| 479 |
+
filters_per_channel = out_channels // groups
|
| 480 |
+
conv_per_position_flops = int(
|
| 481 |
+
np.prod(kernel_dims)) * in_channels * filters_per_channel
|
| 482 |
+
|
| 483 |
+
active_elements_count = batch_size * int(np.prod(output_dims))
|
| 484 |
+
|
| 485 |
+
overall_conv_flops = conv_per_position_flops * active_elements_count
|
| 486 |
+
|
| 487 |
+
bias_flops = 0
|
| 488 |
+
|
| 489 |
+
if conv_module.bias is not None:
|
| 490 |
+
|
| 491 |
+
bias_flops = out_channels * active_elements_count
|
| 492 |
+
|
| 493 |
+
overall_flops = overall_conv_flops + bias_flops
|
| 494 |
+
|
| 495 |
+
conv_module.__flops__ += int(overall_flops)
|
| 496 |
+
|
| 497 |
+
|
| 498 |
+
def batch_counter_hook(module, input, output):
|
| 499 |
+
batch_size = 1
|
| 500 |
+
if len(input) > 0:
|
| 501 |
+
# Can have multiple inputs, getting the first one
|
| 502 |
+
input = input[0]
|
| 503 |
+
batch_size = len(input)
|
| 504 |
+
else:
|
| 505 |
+
pass
|
| 506 |
+
print('Warning! No positional inputs found for a module, '
|
| 507 |
+
'assuming batch size is 1.')
|
| 508 |
+
module.__batch_counter__ += batch_size
|
| 509 |
+
|
| 510 |
+
|
| 511 |
+
def add_batch_counter_variables_or_reset(module):
|
| 512 |
+
|
| 513 |
+
module.__batch_counter__ = 0
|
| 514 |
+
|
| 515 |
+
|
| 516 |
+
def add_batch_counter_hook_function(module):
|
| 517 |
+
if hasattr(module, '__batch_counter_handle__'):
|
| 518 |
+
return
|
| 519 |
+
|
| 520 |
+
handle = module.register_forward_hook(batch_counter_hook)
|
| 521 |
+
module.__batch_counter_handle__ = handle
|
| 522 |
+
|
| 523 |
+
|
| 524 |
+
def remove_batch_counter_hook_function(module):
|
| 525 |
+
if hasattr(module, '__batch_counter_handle__'):
|
| 526 |
+
module.__batch_counter_handle__.remove()
|
| 527 |
+
del module.__batch_counter_handle__
|
| 528 |
+
|
| 529 |
+
|
| 530 |
+
def add_flops_counter_variable_or_reset(module):
|
| 531 |
+
if is_supported_instance(module):
|
| 532 |
+
if hasattr(module, '__flops__') or hasattr(module, '__params__'):
|
| 533 |
+
print('Warning: variables __flops__ or __params__ are already '
|
| 534 |
+
'defined for the module' + type(module).__name__ +
|
| 535 |
+
' ptflops can affect your code!')
|
| 536 |
+
module.__flops__ = 0
|
| 537 |
+
module.__params__ = get_model_parameters_number(module)
|
| 538 |
+
|
| 539 |
+
|
| 540 |
+
def is_supported_instance(module):
|
| 541 |
+
if type(module) in get_modules_mapping():
|
| 542 |
+
return True
|
| 543 |
+
return False
|
| 544 |
+
|
| 545 |
+
|
| 546 |
+
def remove_flops_counter_hook_function(module):
|
| 547 |
+
if is_supported_instance(module):
|
| 548 |
+
if hasattr(module, '__flops_handle__'):
|
| 549 |
+
module.__flops_handle__.remove()
|
| 550 |
+
del module.__flops_handle__
|
| 551 |
+
|
| 552 |
+
|
| 553 |
+
def get_modules_mapping():
|
| 554 |
+
return {
|
| 555 |
+
# convolutions
|
| 556 |
+
nn.Conv1d: conv_flops_counter_hook,
|
| 557 |
+
nn.Conv2d: conv_flops_counter_hook,
|
| 558 |
+
mmcv.cnn.bricks.Conv2d: conv_flops_counter_hook,
|
| 559 |
+
nn.Conv3d: conv_flops_counter_hook,
|
| 560 |
+
mmcv.cnn.bricks.Conv3d: conv_flops_counter_hook,
|
| 561 |
+
# activations
|
| 562 |
+
nn.ReLU: relu_flops_counter_hook,
|
| 563 |
+
nn.PReLU: relu_flops_counter_hook,
|
| 564 |
+
nn.ELU: relu_flops_counter_hook,
|
| 565 |
+
nn.LeakyReLU: relu_flops_counter_hook,
|
| 566 |
+
nn.ReLU6: relu_flops_counter_hook,
|
| 567 |
+
# poolings
|
| 568 |
+
nn.MaxPool1d: pool_flops_counter_hook,
|
| 569 |
+
nn.AvgPool1d: pool_flops_counter_hook,
|
| 570 |
+
nn.AvgPool2d: pool_flops_counter_hook,
|
| 571 |
+
nn.MaxPool2d: pool_flops_counter_hook,
|
| 572 |
+
mmcv.cnn.bricks.MaxPool2d: pool_flops_counter_hook,
|
| 573 |
+
nn.MaxPool3d: pool_flops_counter_hook,
|
| 574 |
+
mmcv.cnn.bricks.MaxPool3d: pool_flops_counter_hook,
|
| 575 |
+
nn.AvgPool3d: pool_flops_counter_hook,
|
| 576 |
+
nn.AdaptiveMaxPool1d: pool_flops_counter_hook,
|
| 577 |
+
nn.AdaptiveAvgPool1d: pool_flops_counter_hook,
|
| 578 |
+
nn.AdaptiveMaxPool2d: pool_flops_counter_hook,
|
| 579 |
+
nn.AdaptiveAvgPool2d: pool_flops_counter_hook,
|
| 580 |
+
nn.AdaptiveMaxPool3d: pool_flops_counter_hook,
|
| 581 |
+
nn.AdaptiveAvgPool3d: pool_flops_counter_hook,
|
| 582 |
+
# normalizations
|
| 583 |
+
nn.BatchNorm1d: norm_flops_counter_hook,
|
| 584 |
+
nn.BatchNorm2d: norm_flops_counter_hook,
|
| 585 |
+
nn.BatchNorm3d: norm_flops_counter_hook,
|
| 586 |
+
nn.GroupNorm: norm_flops_counter_hook,
|
| 587 |
+
nn.InstanceNorm1d: norm_flops_counter_hook,
|
| 588 |
+
nn.InstanceNorm2d: norm_flops_counter_hook,
|
| 589 |
+
nn.InstanceNorm3d: norm_flops_counter_hook,
|
| 590 |
+
nn.LayerNorm: norm_flops_counter_hook,
|
| 591 |
+
# FC
|
| 592 |
+
nn.Linear: linear_flops_counter_hook,
|
| 593 |
+
mmcv.cnn.bricks.Linear: linear_flops_counter_hook,
|
| 594 |
+
# Upscale
|
| 595 |
+
nn.Upsample: upsample_flops_counter_hook,
|
| 596 |
+
# Deconvolution
|
| 597 |
+
nn.ConvTranspose2d: deconv_flops_counter_hook,
|
| 598 |
+
mmcv.cnn.bricks.ConvTranspose2d: deconv_flops_counter_hook,
|
| 599 |
+
}
|
RAVE-main/annotator/mmpkg/mmcv/cnn/utils/fuse_conv_bn.py
ADDED
|
@@ -0,0 +1,59 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
import torch
|
| 3 |
+
import torch.nn as nn
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
def _fuse_conv_bn(conv, bn):
|
| 7 |
+
"""Fuse conv and bn into one module.
|
| 8 |
+
|
| 9 |
+
Args:
|
| 10 |
+
conv (nn.Module): Conv to be fused.
|
| 11 |
+
bn (nn.Module): BN to be fused.
|
| 12 |
+
|
| 13 |
+
Returns:
|
| 14 |
+
nn.Module: Fused module.
|
| 15 |
+
"""
|
| 16 |
+
conv_w = conv.weight
|
| 17 |
+
conv_b = conv.bias if conv.bias is not None else torch.zeros_like(
|
| 18 |
+
bn.running_mean)
|
| 19 |
+
|
| 20 |
+
factor = bn.weight / torch.sqrt(bn.running_var + bn.eps)
|
| 21 |
+
conv.weight = nn.Parameter(conv_w *
|
| 22 |
+
factor.reshape([conv.out_channels, 1, 1, 1]))
|
| 23 |
+
conv.bias = nn.Parameter((conv_b - bn.running_mean) * factor + bn.bias)
|
| 24 |
+
return conv
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
def fuse_conv_bn(module):
|
| 28 |
+
"""Recursively fuse conv and bn in a module.
|
| 29 |
+
|
| 30 |
+
During inference, the functionary of batch norm layers is turned off
|
| 31 |
+
but only the mean and var alone channels are used, which exposes the
|
| 32 |
+
chance to fuse it with the preceding conv layers to save computations and
|
| 33 |
+
simplify network structures.
|
| 34 |
+
|
| 35 |
+
Args:
|
| 36 |
+
module (nn.Module): Module to be fused.
|
| 37 |
+
|
| 38 |
+
Returns:
|
| 39 |
+
nn.Module: Fused module.
|
| 40 |
+
"""
|
| 41 |
+
last_conv = None
|
| 42 |
+
last_conv_name = None
|
| 43 |
+
|
| 44 |
+
for name, child in module.named_children():
|
| 45 |
+
if isinstance(child,
|
| 46 |
+
(nn.modules.batchnorm._BatchNorm, nn.SyncBatchNorm)):
|
| 47 |
+
if last_conv is None: # only fuse BN that is after Conv
|
| 48 |
+
continue
|
| 49 |
+
fused_conv = _fuse_conv_bn(last_conv, child)
|
| 50 |
+
module._modules[last_conv_name] = fused_conv
|
| 51 |
+
# To reduce changes, set BN as Identity instead of deleting it.
|
| 52 |
+
module._modules[name] = nn.Identity()
|
| 53 |
+
last_conv = None
|
| 54 |
+
elif isinstance(child, nn.Conv2d):
|
| 55 |
+
last_conv = child
|
| 56 |
+
last_conv_name = name
|
| 57 |
+
else:
|
| 58 |
+
fuse_conv_bn(child)
|
| 59 |
+
return module
|
RAVE-main/annotator/mmpkg/mmcv/cnn/utils/sync_bn.py
ADDED
|
@@ -0,0 +1,59 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
|
| 3 |
+
import annotator.mmpkg.mmcv as mmcv
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
class _BatchNormXd(torch.nn.modules.batchnorm._BatchNorm):
|
| 7 |
+
"""A general BatchNorm layer without input dimension check.
|
| 8 |
+
|
| 9 |
+
Reproduced from @kapily's work:
|
| 10 |
+
(https://github.com/pytorch/pytorch/issues/41081#issuecomment-783961547)
|
| 11 |
+
The only difference between BatchNorm1d, BatchNorm2d, BatchNorm3d, etc
|
| 12 |
+
is `_check_input_dim` that is designed for tensor sanity checks.
|
| 13 |
+
The check has been bypassed in this class for the convenience of converting
|
| 14 |
+
SyncBatchNorm.
|
| 15 |
+
"""
|
| 16 |
+
|
| 17 |
+
def _check_input_dim(self, input):
|
| 18 |
+
return
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
def revert_sync_batchnorm(module):
|
| 22 |
+
"""Helper function to convert all `SyncBatchNorm` (SyncBN) and
|
| 23 |
+
`mmcv.ops.sync_bn.SyncBatchNorm`(MMSyncBN) layers in the model to
|
| 24 |
+
`BatchNormXd` layers.
|
| 25 |
+
|
| 26 |
+
Adapted from @kapily's work:
|
| 27 |
+
(https://github.com/pytorch/pytorch/issues/41081#issuecomment-783961547)
|
| 28 |
+
|
| 29 |
+
Args:
|
| 30 |
+
module (nn.Module): The module containing `SyncBatchNorm` layers.
|
| 31 |
+
|
| 32 |
+
Returns:
|
| 33 |
+
module_output: The converted module with `BatchNormXd` layers.
|
| 34 |
+
"""
|
| 35 |
+
module_output = module
|
| 36 |
+
module_checklist = [torch.nn.modules.batchnorm.SyncBatchNorm]
|
| 37 |
+
if hasattr(mmcv, 'ops'):
|
| 38 |
+
module_checklist.append(mmcv.ops.SyncBatchNorm)
|
| 39 |
+
if isinstance(module, tuple(module_checklist)):
|
| 40 |
+
module_output = _BatchNormXd(module.num_features, module.eps,
|
| 41 |
+
module.momentum, module.affine,
|
| 42 |
+
module.track_running_stats)
|
| 43 |
+
if module.affine:
|
| 44 |
+
# no_grad() may not be needed here but
|
| 45 |
+
# just to be consistent with `convert_sync_batchnorm()`
|
| 46 |
+
with torch.no_grad():
|
| 47 |
+
module_output.weight = module.weight
|
| 48 |
+
module_output.bias = module.bias
|
| 49 |
+
module_output.running_mean = module.running_mean
|
| 50 |
+
module_output.running_var = module.running_var
|
| 51 |
+
module_output.num_batches_tracked = module.num_batches_tracked
|
| 52 |
+
module_output.training = module.training
|
| 53 |
+
# qconfig exists in quantized models
|
| 54 |
+
if hasattr(module, 'qconfig'):
|
| 55 |
+
module_output.qconfig = module.qconfig
|
| 56 |
+
for name, child in module.named_children():
|
| 57 |
+
module_output.add_module(name, revert_sync_batchnorm(child))
|
| 58 |
+
del module
|
| 59 |
+
return module_output
|
RAVE-main/annotator/mmpkg/mmcv/cnn/utils/weight_init.py
ADDED
|
@@ -0,0 +1,684 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
import copy
|
| 3 |
+
import math
|
| 4 |
+
import warnings
|
| 5 |
+
|
| 6 |
+
import numpy as np
|
| 7 |
+
import torch
|
| 8 |
+
import torch.nn as nn
|
| 9 |
+
from torch import Tensor
|
| 10 |
+
|
| 11 |
+
from annotator.mmpkg.mmcv.utils import Registry, build_from_cfg, get_logger, print_log
|
| 12 |
+
|
| 13 |
+
INITIALIZERS = Registry('initializer')
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
def update_init_info(module, init_info):
|
| 17 |
+
"""Update the `_params_init_info` in the module if the value of parameters
|
| 18 |
+
are changed.
|
| 19 |
+
|
| 20 |
+
Args:
|
| 21 |
+
module (obj:`nn.Module`): The module of PyTorch with a user-defined
|
| 22 |
+
attribute `_params_init_info` which records the initialization
|
| 23 |
+
information.
|
| 24 |
+
init_info (str): The string that describes the initialization.
|
| 25 |
+
"""
|
| 26 |
+
assert hasattr(
|
| 27 |
+
module,
|
| 28 |
+
'_params_init_info'), f'Can not find `_params_init_info` in {module}'
|
| 29 |
+
for name, param in module.named_parameters():
|
| 30 |
+
|
| 31 |
+
assert param in module._params_init_info, (
|
| 32 |
+
f'Find a new :obj:`Parameter` '
|
| 33 |
+
f'named `{name}` during executing the '
|
| 34 |
+
f'`init_weights` of '
|
| 35 |
+
f'`{module.__class__.__name__}`. '
|
| 36 |
+
f'Please do not add or '
|
| 37 |
+
f'replace parameters during executing '
|
| 38 |
+
f'the `init_weights`. ')
|
| 39 |
+
|
| 40 |
+
# The parameter has been changed during executing the
|
| 41 |
+
# `init_weights` of module
|
| 42 |
+
mean_value = param.data.mean()
|
| 43 |
+
if module._params_init_info[param]['tmp_mean_value'] != mean_value:
|
| 44 |
+
module._params_init_info[param]['init_info'] = init_info
|
| 45 |
+
module._params_init_info[param]['tmp_mean_value'] = mean_value
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
def constant_init(module, val, bias=0):
|
| 49 |
+
if hasattr(module, 'weight') and module.weight is not None:
|
| 50 |
+
nn.init.constant_(module.weight, val)
|
| 51 |
+
if hasattr(module, 'bias') and module.bias is not None:
|
| 52 |
+
nn.init.constant_(module.bias, bias)
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
def xavier_init(module, gain=1, bias=0, distribution='normal'):
|
| 56 |
+
assert distribution in ['uniform', 'normal']
|
| 57 |
+
if hasattr(module, 'weight') and module.weight is not None:
|
| 58 |
+
if distribution == 'uniform':
|
| 59 |
+
nn.init.xavier_uniform_(module.weight, gain=gain)
|
| 60 |
+
else:
|
| 61 |
+
nn.init.xavier_normal_(module.weight, gain=gain)
|
| 62 |
+
if hasattr(module, 'bias') and module.bias is not None:
|
| 63 |
+
nn.init.constant_(module.bias, bias)
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
def normal_init(module, mean=0, std=1, bias=0):
|
| 67 |
+
if hasattr(module, 'weight') and module.weight is not None:
|
| 68 |
+
nn.init.normal_(module.weight, mean, std)
|
| 69 |
+
if hasattr(module, 'bias') and module.bias is not None:
|
| 70 |
+
nn.init.constant_(module.bias, bias)
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
def trunc_normal_init(module: nn.Module,
|
| 74 |
+
mean: float = 0,
|
| 75 |
+
std: float = 1,
|
| 76 |
+
a: float = -2,
|
| 77 |
+
b: float = 2,
|
| 78 |
+
bias: float = 0) -> None:
|
| 79 |
+
if hasattr(module, 'weight') and module.weight is not None:
|
| 80 |
+
trunc_normal_(module.weight, mean, std, a, b) # type: ignore
|
| 81 |
+
if hasattr(module, 'bias') and module.bias is not None:
|
| 82 |
+
nn.init.constant_(module.bias, bias) # type: ignore
|
| 83 |
+
|
| 84 |
+
|
| 85 |
+
def uniform_init(module, a=0, b=1, bias=0):
|
| 86 |
+
if hasattr(module, 'weight') and module.weight is not None:
|
| 87 |
+
nn.init.uniform_(module.weight, a, b)
|
| 88 |
+
if hasattr(module, 'bias') and module.bias is not None:
|
| 89 |
+
nn.init.constant_(module.bias, bias)
|
| 90 |
+
|
| 91 |
+
|
| 92 |
+
def kaiming_init(module,
|
| 93 |
+
a=0,
|
| 94 |
+
mode='fan_out',
|
| 95 |
+
nonlinearity='relu',
|
| 96 |
+
bias=0,
|
| 97 |
+
distribution='normal'):
|
| 98 |
+
assert distribution in ['uniform', 'normal']
|
| 99 |
+
if hasattr(module, 'weight') and module.weight is not None:
|
| 100 |
+
if distribution == 'uniform':
|
| 101 |
+
nn.init.kaiming_uniform_(
|
| 102 |
+
module.weight, a=a, mode=mode, nonlinearity=nonlinearity)
|
| 103 |
+
else:
|
| 104 |
+
nn.init.kaiming_normal_(
|
| 105 |
+
module.weight, a=a, mode=mode, nonlinearity=nonlinearity)
|
| 106 |
+
if hasattr(module, 'bias') and module.bias is not None:
|
| 107 |
+
nn.init.constant_(module.bias, bias)
|
| 108 |
+
|
| 109 |
+
|
| 110 |
+
def caffe2_xavier_init(module, bias=0):
|
| 111 |
+
# `XavierFill` in Caffe2 corresponds to `kaiming_uniform_` in PyTorch
|
| 112 |
+
# Acknowledgment to FAIR's internal code
|
| 113 |
+
kaiming_init(
|
| 114 |
+
module,
|
| 115 |
+
a=1,
|
| 116 |
+
mode='fan_in',
|
| 117 |
+
nonlinearity='leaky_relu',
|
| 118 |
+
bias=bias,
|
| 119 |
+
distribution='uniform')
|
| 120 |
+
|
| 121 |
+
|
| 122 |
+
def bias_init_with_prob(prior_prob):
|
| 123 |
+
"""initialize conv/fc bias value according to a given probability value."""
|
| 124 |
+
bias_init = float(-np.log((1 - prior_prob) / prior_prob))
|
| 125 |
+
return bias_init
|
| 126 |
+
|
| 127 |
+
|
| 128 |
+
def _get_bases_name(m):
|
| 129 |
+
return [b.__name__ for b in m.__class__.__bases__]
|
| 130 |
+
|
| 131 |
+
|
| 132 |
+
class BaseInit(object):
|
| 133 |
+
|
| 134 |
+
def __init__(self, *, bias=0, bias_prob=None, layer=None):
|
| 135 |
+
self.wholemodule = False
|
| 136 |
+
if not isinstance(bias, (int, float)):
|
| 137 |
+
raise TypeError(f'bias must be a number, but got a {type(bias)}')
|
| 138 |
+
|
| 139 |
+
if bias_prob is not None:
|
| 140 |
+
if not isinstance(bias_prob, float):
|
| 141 |
+
raise TypeError(f'bias_prob type must be float, \
|
| 142 |
+
but got {type(bias_prob)}')
|
| 143 |
+
|
| 144 |
+
if layer is not None:
|
| 145 |
+
if not isinstance(layer, (str, list)):
|
| 146 |
+
raise TypeError(f'layer must be a str or a list of str, \
|
| 147 |
+
but got a {type(layer)}')
|
| 148 |
+
else:
|
| 149 |
+
layer = []
|
| 150 |
+
|
| 151 |
+
if bias_prob is not None:
|
| 152 |
+
self.bias = bias_init_with_prob(bias_prob)
|
| 153 |
+
else:
|
| 154 |
+
self.bias = bias
|
| 155 |
+
self.layer = [layer] if isinstance(layer, str) else layer
|
| 156 |
+
|
| 157 |
+
def _get_init_info(self):
|
| 158 |
+
info = f'{self.__class__.__name__}, bias={self.bias}'
|
| 159 |
+
return info
|
| 160 |
+
|
| 161 |
+
|
| 162 |
+
@INITIALIZERS.register_module(name='Constant')
|
| 163 |
+
class ConstantInit(BaseInit):
|
| 164 |
+
"""Initialize module parameters with constant values.
|
| 165 |
+
|
| 166 |
+
Args:
|
| 167 |
+
val (int | float): the value to fill the weights in the module with
|
| 168 |
+
bias (int | float): the value to fill the bias. Defaults to 0.
|
| 169 |
+
bias_prob (float, optional): the probability for bias initialization.
|
| 170 |
+
Defaults to None.
|
| 171 |
+
layer (str | list[str], optional): the layer will be initialized.
|
| 172 |
+
Defaults to None.
|
| 173 |
+
"""
|
| 174 |
+
|
| 175 |
+
def __init__(self, val, **kwargs):
|
| 176 |
+
super().__init__(**kwargs)
|
| 177 |
+
self.val = val
|
| 178 |
+
|
| 179 |
+
def __call__(self, module):
|
| 180 |
+
|
| 181 |
+
def init(m):
|
| 182 |
+
if self.wholemodule:
|
| 183 |
+
constant_init(m, self.val, self.bias)
|
| 184 |
+
else:
|
| 185 |
+
layername = m.__class__.__name__
|
| 186 |
+
basesname = _get_bases_name(m)
|
| 187 |
+
if len(set(self.layer) & set([layername] + basesname)):
|
| 188 |
+
constant_init(m, self.val, self.bias)
|
| 189 |
+
|
| 190 |
+
module.apply(init)
|
| 191 |
+
if hasattr(module, '_params_init_info'):
|
| 192 |
+
update_init_info(module, init_info=self._get_init_info())
|
| 193 |
+
|
| 194 |
+
def _get_init_info(self):
|
| 195 |
+
info = f'{self.__class__.__name__}: val={self.val}, bias={self.bias}'
|
| 196 |
+
return info
|
| 197 |
+
|
| 198 |
+
|
| 199 |
+
@INITIALIZERS.register_module(name='Xavier')
|
| 200 |
+
class XavierInit(BaseInit):
|
| 201 |
+
r"""Initialize module parameters with values according to the method
|
| 202 |
+
described in `Understanding the difficulty of training deep feedforward
|
| 203 |
+
neural networks - Glorot, X. & Bengio, Y. (2010).
|
| 204 |
+
<http://proceedings.mlr.press/v9/glorot10a/glorot10a.pdf>`_
|
| 205 |
+
|
| 206 |
+
Args:
|
| 207 |
+
gain (int | float): an optional scaling factor. Defaults to 1.
|
| 208 |
+
bias (int | float): the value to fill the bias. Defaults to 0.
|
| 209 |
+
bias_prob (float, optional): the probability for bias initialization.
|
| 210 |
+
Defaults to None.
|
| 211 |
+
distribution (str): distribution either be ``'normal'``
|
| 212 |
+
or ``'uniform'``. Defaults to ``'normal'``.
|
| 213 |
+
layer (str | list[str], optional): the layer will be initialized.
|
| 214 |
+
Defaults to None.
|
| 215 |
+
"""
|
| 216 |
+
|
| 217 |
+
def __init__(self, gain=1, distribution='normal', **kwargs):
|
| 218 |
+
super().__init__(**kwargs)
|
| 219 |
+
self.gain = gain
|
| 220 |
+
self.distribution = distribution
|
| 221 |
+
|
| 222 |
+
def __call__(self, module):
|
| 223 |
+
|
| 224 |
+
def init(m):
|
| 225 |
+
if self.wholemodule:
|
| 226 |
+
xavier_init(m, self.gain, self.bias, self.distribution)
|
| 227 |
+
else:
|
| 228 |
+
layername = m.__class__.__name__
|
| 229 |
+
basesname = _get_bases_name(m)
|
| 230 |
+
if len(set(self.layer) & set([layername] + basesname)):
|
| 231 |
+
xavier_init(m, self.gain, self.bias, self.distribution)
|
| 232 |
+
|
| 233 |
+
module.apply(init)
|
| 234 |
+
if hasattr(module, '_params_init_info'):
|
| 235 |
+
update_init_info(module, init_info=self._get_init_info())
|
| 236 |
+
|
| 237 |
+
def _get_init_info(self):
|
| 238 |
+
info = f'{self.__class__.__name__}: gain={self.gain}, ' \
|
| 239 |
+
f'distribution={self.distribution}, bias={self.bias}'
|
| 240 |
+
return info
|
| 241 |
+
|
| 242 |
+
|
| 243 |
+
@INITIALIZERS.register_module(name='Normal')
|
| 244 |
+
class NormalInit(BaseInit):
|
| 245 |
+
r"""Initialize module parameters with the values drawn from the normal
|
| 246 |
+
distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)`.
|
| 247 |
+
|
| 248 |
+
Args:
|
| 249 |
+
mean (int | float):the mean of the normal distribution. Defaults to 0.
|
| 250 |
+
std (int | float): the standard deviation of the normal distribution.
|
| 251 |
+
Defaults to 1.
|
| 252 |
+
bias (int | float): the value to fill the bias. Defaults to 0.
|
| 253 |
+
bias_prob (float, optional): the probability for bias initialization.
|
| 254 |
+
Defaults to None.
|
| 255 |
+
layer (str | list[str], optional): the layer will be initialized.
|
| 256 |
+
Defaults to None.
|
| 257 |
+
|
| 258 |
+
"""
|
| 259 |
+
|
| 260 |
+
def __init__(self, mean=0, std=1, **kwargs):
|
| 261 |
+
super().__init__(**kwargs)
|
| 262 |
+
self.mean = mean
|
| 263 |
+
self.std = std
|
| 264 |
+
|
| 265 |
+
def __call__(self, module):
|
| 266 |
+
|
| 267 |
+
def init(m):
|
| 268 |
+
if self.wholemodule:
|
| 269 |
+
normal_init(m, self.mean, self.std, self.bias)
|
| 270 |
+
else:
|
| 271 |
+
layername = m.__class__.__name__
|
| 272 |
+
basesname = _get_bases_name(m)
|
| 273 |
+
if len(set(self.layer) & set([layername] + basesname)):
|
| 274 |
+
normal_init(m, self.mean, self.std, self.bias)
|
| 275 |
+
|
| 276 |
+
module.apply(init)
|
| 277 |
+
if hasattr(module, '_params_init_info'):
|
| 278 |
+
update_init_info(module, init_info=self._get_init_info())
|
| 279 |
+
|
| 280 |
+
def _get_init_info(self):
|
| 281 |
+
info = f'{self.__class__.__name__}: mean={self.mean},' \
|
| 282 |
+
f' std={self.std}, bias={self.bias}'
|
| 283 |
+
return info
|
| 284 |
+
|
| 285 |
+
|
| 286 |
+
@INITIALIZERS.register_module(name='TruncNormal')
|
| 287 |
+
class TruncNormalInit(BaseInit):
|
| 288 |
+
r"""Initialize module parameters with the values drawn from the normal
|
| 289 |
+
distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)` with values
|
| 290 |
+
outside :math:`[a, b]`.
|
| 291 |
+
|
| 292 |
+
Args:
|
| 293 |
+
mean (float): the mean of the normal distribution. Defaults to 0.
|
| 294 |
+
std (float): the standard deviation of the normal distribution.
|
| 295 |
+
Defaults to 1.
|
| 296 |
+
a (float): The minimum cutoff value.
|
| 297 |
+
b ( float): The maximum cutoff value.
|
| 298 |
+
bias (float): the value to fill the bias. Defaults to 0.
|
| 299 |
+
bias_prob (float, optional): the probability for bias initialization.
|
| 300 |
+
Defaults to None.
|
| 301 |
+
layer (str | list[str], optional): the layer will be initialized.
|
| 302 |
+
Defaults to None.
|
| 303 |
+
|
| 304 |
+
"""
|
| 305 |
+
|
| 306 |
+
def __init__(self,
|
| 307 |
+
mean: float = 0,
|
| 308 |
+
std: float = 1,
|
| 309 |
+
a: float = -2,
|
| 310 |
+
b: float = 2,
|
| 311 |
+
**kwargs) -> None:
|
| 312 |
+
super().__init__(**kwargs)
|
| 313 |
+
self.mean = mean
|
| 314 |
+
self.std = std
|
| 315 |
+
self.a = a
|
| 316 |
+
self.b = b
|
| 317 |
+
|
| 318 |
+
def __call__(self, module: nn.Module) -> None:
|
| 319 |
+
|
| 320 |
+
def init(m):
|
| 321 |
+
if self.wholemodule:
|
| 322 |
+
trunc_normal_init(m, self.mean, self.std, self.a, self.b,
|
| 323 |
+
self.bias)
|
| 324 |
+
else:
|
| 325 |
+
layername = m.__class__.__name__
|
| 326 |
+
basesname = _get_bases_name(m)
|
| 327 |
+
if len(set(self.layer) & set([layername] + basesname)):
|
| 328 |
+
trunc_normal_init(m, self.mean, self.std, self.a, self.b,
|
| 329 |
+
self.bias)
|
| 330 |
+
|
| 331 |
+
module.apply(init)
|
| 332 |
+
if hasattr(module, '_params_init_info'):
|
| 333 |
+
update_init_info(module, init_info=self._get_init_info())
|
| 334 |
+
|
| 335 |
+
def _get_init_info(self):
|
| 336 |
+
info = f'{self.__class__.__name__}: a={self.a}, b={self.b},' \
|
| 337 |
+
f' mean={self.mean}, std={self.std}, bias={self.bias}'
|
| 338 |
+
return info
|
| 339 |
+
|
| 340 |
+
|
| 341 |
+
@INITIALIZERS.register_module(name='Uniform')
|
| 342 |
+
class UniformInit(BaseInit):
|
| 343 |
+
r"""Initialize module parameters with values drawn from the uniform
|
| 344 |
+
distribution :math:`\mathcal{U}(a, b)`.
|
| 345 |
+
|
| 346 |
+
Args:
|
| 347 |
+
a (int | float): the lower bound of the uniform distribution.
|
| 348 |
+
Defaults to 0.
|
| 349 |
+
b (int | float): the upper bound of the uniform distribution.
|
| 350 |
+
Defaults to 1.
|
| 351 |
+
bias (int | float): the value to fill the bias. Defaults to 0.
|
| 352 |
+
bias_prob (float, optional): the probability for bias initialization.
|
| 353 |
+
Defaults to None.
|
| 354 |
+
layer (str | list[str], optional): the layer will be initialized.
|
| 355 |
+
Defaults to None.
|
| 356 |
+
"""
|
| 357 |
+
|
| 358 |
+
def __init__(self, a=0, b=1, **kwargs):
|
| 359 |
+
super().__init__(**kwargs)
|
| 360 |
+
self.a = a
|
| 361 |
+
self.b = b
|
| 362 |
+
|
| 363 |
+
def __call__(self, module):
|
| 364 |
+
|
| 365 |
+
def init(m):
|
| 366 |
+
if self.wholemodule:
|
| 367 |
+
uniform_init(m, self.a, self.b, self.bias)
|
| 368 |
+
else:
|
| 369 |
+
layername = m.__class__.__name__
|
| 370 |
+
basesname = _get_bases_name(m)
|
| 371 |
+
if len(set(self.layer) & set([layername] + basesname)):
|
| 372 |
+
uniform_init(m, self.a, self.b, self.bias)
|
| 373 |
+
|
| 374 |
+
module.apply(init)
|
| 375 |
+
if hasattr(module, '_params_init_info'):
|
| 376 |
+
update_init_info(module, init_info=self._get_init_info())
|
| 377 |
+
|
| 378 |
+
def _get_init_info(self):
|
| 379 |
+
info = f'{self.__class__.__name__}: a={self.a},' \
|
| 380 |
+
f' b={self.b}, bias={self.bias}'
|
| 381 |
+
return info
|
| 382 |
+
|
| 383 |
+
|
| 384 |
+
@INITIALIZERS.register_module(name='Kaiming')
|
| 385 |
+
class KaimingInit(BaseInit):
|
| 386 |
+
r"""Initialize module parameters with the values according to the method
|
| 387 |
+
described in `Delving deep into rectifiers: Surpassing human-level
|
| 388 |
+
performance on ImageNet classification - He, K. et al. (2015).
|
| 389 |
+
<https://www.cv-foundation.org/openaccess/content_iccv_2015/
|
| 390 |
+
papers/He_Delving_Deep_into_ICCV_2015_paper.pdf>`_
|
| 391 |
+
|
| 392 |
+
Args:
|
| 393 |
+
a (int | float): the negative slope of the rectifier used after this
|
| 394 |
+
layer (only used with ``'leaky_relu'``). Defaults to 0.
|
| 395 |
+
mode (str): either ``'fan_in'`` or ``'fan_out'``. Choosing
|
| 396 |
+
``'fan_in'`` preserves the magnitude of the variance of the weights
|
| 397 |
+
in the forward pass. Choosing ``'fan_out'`` preserves the
|
| 398 |
+
magnitudes in the backwards pass. Defaults to ``'fan_out'``.
|
| 399 |
+
nonlinearity (str): the non-linear function (`nn.functional` name),
|
| 400 |
+
recommended to use only with ``'relu'`` or ``'leaky_relu'`` .
|
| 401 |
+
Defaults to 'relu'.
|
| 402 |
+
bias (int | float): the value to fill the bias. Defaults to 0.
|
| 403 |
+
bias_prob (float, optional): the probability for bias initialization.
|
| 404 |
+
Defaults to None.
|
| 405 |
+
distribution (str): distribution either be ``'normal'`` or
|
| 406 |
+
``'uniform'``. Defaults to ``'normal'``.
|
| 407 |
+
layer (str | list[str], optional): the layer will be initialized.
|
| 408 |
+
Defaults to None.
|
| 409 |
+
"""
|
| 410 |
+
|
| 411 |
+
def __init__(self,
|
| 412 |
+
a=0,
|
| 413 |
+
mode='fan_out',
|
| 414 |
+
nonlinearity='relu',
|
| 415 |
+
distribution='normal',
|
| 416 |
+
**kwargs):
|
| 417 |
+
super().__init__(**kwargs)
|
| 418 |
+
self.a = a
|
| 419 |
+
self.mode = mode
|
| 420 |
+
self.nonlinearity = nonlinearity
|
| 421 |
+
self.distribution = distribution
|
| 422 |
+
|
| 423 |
+
def __call__(self, module):
|
| 424 |
+
|
| 425 |
+
def init(m):
|
| 426 |
+
if self.wholemodule:
|
| 427 |
+
kaiming_init(m, self.a, self.mode, self.nonlinearity,
|
| 428 |
+
self.bias, self.distribution)
|
| 429 |
+
else:
|
| 430 |
+
layername = m.__class__.__name__
|
| 431 |
+
basesname = _get_bases_name(m)
|
| 432 |
+
if len(set(self.layer) & set([layername] + basesname)):
|
| 433 |
+
kaiming_init(m, self.a, self.mode, self.nonlinearity,
|
| 434 |
+
self.bias, self.distribution)
|
| 435 |
+
|
| 436 |
+
module.apply(init)
|
| 437 |
+
if hasattr(module, '_params_init_info'):
|
| 438 |
+
update_init_info(module, init_info=self._get_init_info())
|
| 439 |
+
|
| 440 |
+
def _get_init_info(self):
|
| 441 |
+
info = f'{self.__class__.__name__}: a={self.a}, mode={self.mode}, ' \
|
| 442 |
+
f'nonlinearity={self.nonlinearity}, ' \
|
| 443 |
+
f'distribution ={self.distribution}, bias={self.bias}'
|
| 444 |
+
return info
|
| 445 |
+
|
| 446 |
+
|
| 447 |
+
@INITIALIZERS.register_module(name='Caffe2Xavier')
|
| 448 |
+
class Caffe2XavierInit(KaimingInit):
|
| 449 |
+
# `XavierFill` in Caffe2 corresponds to `kaiming_uniform_` in PyTorch
|
| 450 |
+
# Acknowledgment to FAIR's internal code
|
| 451 |
+
def __init__(self, **kwargs):
|
| 452 |
+
super().__init__(
|
| 453 |
+
a=1,
|
| 454 |
+
mode='fan_in',
|
| 455 |
+
nonlinearity='leaky_relu',
|
| 456 |
+
distribution='uniform',
|
| 457 |
+
**kwargs)
|
| 458 |
+
|
| 459 |
+
def __call__(self, module):
|
| 460 |
+
super().__call__(module)
|
| 461 |
+
|
| 462 |
+
|
| 463 |
+
@INITIALIZERS.register_module(name='Pretrained')
|
| 464 |
+
class PretrainedInit(object):
|
| 465 |
+
"""Initialize module by loading a pretrained model.
|
| 466 |
+
|
| 467 |
+
Args:
|
| 468 |
+
checkpoint (str): the checkpoint file of the pretrained model should
|
| 469 |
+
be load.
|
| 470 |
+
prefix (str, optional): the prefix of a sub-module in the pretrained
|
| 471 |
+
model. it is for loading a part of the pretrained model to
|
| 472 |
+
initialize. For example, if we would like to only load the
|
| 473 |
+
backbone of a detector model, we can set ``prefix='backbone.'``.
|
| 474 |
+
Defaults to None.
|
| 475 |
+
map_location (str): map tensors into proper locations.
|
| 476 |
+
"""
|
| 477 |
+
|
| 478 |
+
def __init__(self, checkpoint, prefix=None, map_location=None):
|
| 479 |
+
self.checkpoint = checkpoint
|
| 480 |
+
self.prefix = prefix
|
| 481 |
+
self.map_location = map_location
|
| 482 |
+
|
| 483 |
+
def __call__(self, module):
|
| 484 |
+
from annotator.mmpkg.mmcv.runner import (_load_checkpoint_with_prefix, load_checkpoint,
|
| 485 |
+
load_state_dict)
|
| 486 |
+
logger = get_logger('mmcv')
|
| 487 |
+
if self.prefix is None:
|
| 488 |
+
print_log(f'load model from: {self.checkpoint}', logger=logger)
|
| 489 |
+
load_checkpoint(
|
| 490 |
+
module,
|
| 491 |
+
self.checkpoint,
|
| 492 |
+
map_location=self.map_location,
|
| 493 |
+
strict=False,
|
| 494 |
+
logger=logger)
|
| 495 |
+
else:
|
| 496 |
+
print_log(
|
| 497 |
+
f'load {self.prefix} in model from: {self.checkpoint}',
|
| 498 |
+
logger=logger)
|
| 499 |
+
state_dict = _load_checkpoint_with_prefix(
|
| 500 |
+
self.prefix, self.checkpoint, map_location=self.map_location)
|
| 501 |
+
load_state_dict(module, state_dict, strict=False, logger=logger)
|
| 502 |
+
|
| 503 |
+
if hasattr(module, '_params_init_info'):
|
| 504 |
+
update_init_info(module, init_info=self._get_init_info())
|
| 505 |
+
|
| 506 |
+
def _get_init_info(self):
|
| 507 |
+
info = f'{self.__class__.__name__}: load from {self.checkpoint}'
|
| 508 |
+
return info
|
| 509 |
+
|
| 510 |
+
|
| 511 |
+
def _initialize(module, cfg, wholemodule=False):
|
| 512 |
+
func = build_from_cfg(cfg, INITIALIZERS)
|
| 513 |
+
# wholemodule flag is for override mode, there is no layer key in override
|
| 514 |
+
# and initializer will give init values for the whole module with the name
|
| 515 |
+
# in override.
|
| 516 |
+
func.wholemodule = wholemodule
|
| 517 |
+
func(module)
|
| 518 |
+
|
| 519 |
+
|
| 520 |
+
def _initialize_override(module, override, cfg):
|
| 521 |
+
if not isinstance(override, (dict, list)):
|
| 522 |
+
raise TypeError(f'override must be a dict or a list of dict, \
|
| 523 |
+
but got {type(override)}')
|
| 524 |
+
|
| 525 |
+
override = [override] if isinstance(override, dict) else override
|
| 526 |
+
|
| 527 |
+
for override_ in override:
|
| 528 |
+
|
| 529 |
+
cp_override = copy.deepcopy(override_)
|
| 530 |
+
name = cp_override.pop('name', None)
|
| 531 |
+
if name is None:
|
| 532 |
+
raise ValueError('`override` must contain the key "name",'
|
| 533 |
+
f'but got {cp_override}')
|
| 534 |
+
# if override only has name key, it means use args in init_cfg
|
| 535 |
+
if not cp_override:
|
| 536 |
+
cp_override.update(cfg)
|
| 537 |
+
# if override has name key and other args except type key, it will
|
| 538 |
+
# raise error
|
| 539 |
+
elif 'type' not in cp_override.keys():
|
| 540 |
+
raise ValueError(
|
| 541 |
+
f'`override` need "type" key, but got {cp_override}')
|
| 542 |
+
|
| 543 |
+
if hasattr(module, name):
|
| 544 |
+
_initialize(getattr(module, name), cp_override, wholemodule=True)
|
| 545 |
+
else:
|
| 546 |
+
raise RuntimeError(f'module did not have attribute {name}, '
|
| 547 |
+
f'but init_cfg is {cp_override}.')
|
| 548 |
+
|
| 549 |
+
|
| 550 |
+
def initialize(module, init_cfg):
|
| 551 |
+
"""Initialize a module.
|
| 552 |
+
|
| 553 |
+
Args:
|
| 554 |
+
module (``torch.nn.Module``): the module will be initialized.
|
| 555 |
+
init_cfg (dict | list[dict]): initialization configuration dict to
|
| 556 |
+
define initializer. OpenMMLab has implemented 6 initializers
|
| 557 |
+
including ``Constant``, ``Xavier``, ``Normal``, ``Uniform``,
|
| 558 |
+
``Kaiming``, and ``Pretrained``.
|
| 559 |
+
Example:
|
| 560 |
+
>>> module = nn.Linear(2, 3, bias=True)
|
| 561 |
+
>>> init_cfg = dict(type='Constant', layer='Linear', val =1 , bias =2)
|
| 562 |
+
>>> initialize(module, init_cfg)
|
| 563 |
+
|
| 564 |
+
>>> module = nn.Sequential(nn.Conv1d(3, 1, 3), nn.Linear(1,2))
|
| 565 |
+
>>> # define key ``'layer'`` for initializing layer with different
|
| 566 |
+
>>> # configuration
|
| 567 |
+
>>> init_cfg = [dict(type='Constant', layer='Conv1d', val=1),
|
| 568 |
+
dict(type='Constant', layer='Linear', val=2)]
|
| 569 |
+
>>> initialize(module, init_cfg)
|
| 570 |
+
|
| 571 |
+
>>> # define key``'override'`` to initialize some specific part in
|
| 572 |
+
>>> # module
|
| 573 |
+
>>> class FooNet(nn.Module):
|
| 574 |
+
>>> def __init__(self):
|
| 575 |
+
>>> super().__init__()
|
| 576 |
+
>>> self.feat = nn.Conv2d(3, 16, 3)
|
| 577 |
+
>>> self.reg = nn.Conv2d(16, 10, 3)
|
| 578 |
+
>>> self.cls = nn.Conv2d(16, 5, 3)
|
| 579 |
+
>>> model = FooNet()
|
| 580 |
+
>>> init_cfg = dict(type='Constant', val=1, bias=2, layer='Conv2d',
|
| 581 |
+
>>> override=dict(type='Constant', name='reg', val=3, bias=4))
|
| 582 |
+
>>> initialize(model, init_cfg)
|
| 583 |
+
|
| 584 |
+
>>> model = ResNet(depth=50)
|
| 585 |
+
>>> # Initialize weights with the pretrained model.
|
| 586 |
+
>>> init_cfg = dict(type='Pretrained',
|
| 587 |
+
checkpoint='torchvision://resnet50')
|
| 588 |
+
>>> initialize(model, init_cfg)
|
| 589 |
+
|
| 590 |
+
>>> # Initialize weights of a sub-module with the specific part of
|
| 591 |
+
>>> # a pretrained model by using "prefix".
|
| 592 |
+
>>> url = 'http://download.openmmlab.com/mmdetection/v2.0/retinanet/'\
|
| 593 |
+
>>> 'retinanet_r50_fpn_1x_coco/'\
|
| 594 |
+
>>> 'retinanet_r50_fpn_1x_coco_20200130-c2398f9e.pth'
|
| 595 |
+
>>> init_cfg = dict(type='Pretrained',
|
| 596 |
+
checkpoint=url, prefix='backbone.')
|
| 597 |
+
"""
|
| 598 |
+
if not isinstance(init_cfg, (dict, list)):
|
| 599 |
+
raise TypeError(f'init_cfg must be a dict or a list of dict, \
|
| 600 |
+
but got {type(init_cfg)}')
|
| 601 |
+
|
| 602 |
+
if isinstance(init_cfg, dict):
|
| 603 |
+
init_cfg = [init_cfg]
|
| 604 |
+
|
| 605 |
+
for cfg in init_cfg:
|
| 606 |
+
# should deeply copy the original config because cfg may be used by
|
| 607 |
+
# other modules, e.g., one init_cfg shared by multiple bottleneck
|
| 608 |
+
# blocks, the expected cfg will be changed after pop and will change
|
| 609 |
+
# the initialization behavior of other modules
|
| 610 |
+
cp_cfg = copy.deepcopy(cfg)
|
| 611 |
+
override = cp_cfg.pop('override', None)
|
| 612 |
+
_initialize(module, cp_cfg)
|
| 613 |
+
|
| 614 |
+
if override is not None:
|
| 615 |
+
cp_cfg.pop('layer', None)
|
| 616 |
+
_initialize_override(module, override, cp_cfg)
|
| 617 |
+
else:
|
| 618 |
+
# All attributes in module have same initialization.
|
| 619 |
+
pass
|
| 620 |
+
|
| 621 |
+
|
| 622 |
+
def _no_grad_trunc_normal_(tensor: Tensor, mean: float, std: float, a: float,
|
| 623 |
+
b: float) -> Tensor:
|
| 624 |
+
# Method based on
|
| 625 |
+
# https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf
|
| 626 |
+
# Modified from
|
| 627 |
+
# https://github.com/pytorch/pytorch/blob/master/torch/nn/init.py
|
| 628 |
+
def norm_cdf(x):
|
| 629 |
+
# Computes standard normal cumulative distribution function
|
| 630 |
+
return (1. + math.erf(x / math.sqrt(2.))) / 2.
|
| 631 |
+
|
| 632 |
+
if (mean < a - 2 * std) or (mean > b + 2 * std):
|
| 633 |
+
warnings.warn(
|
| 634 |
+
'mean is more than 2 std from [a, b] in nn.init.trunc_normal_. '
|
| 635 |
+
'The distribution of values may be incorrect.',
|
| 636 |
+
stacklevel=2)
|
| 637 |
+
|
| 638 |
+
with torch.no_grad():
|
| 639 |
+
# Values are generated by using a truncated uniform distribution and
|
| 640 |
+
# then using the inverse CDF for the normal distribution.
|
| 641 |
+
# Get upper and lower cdf values
|
| 642 |
+
lower = norm_cdf((a - mean) / std)
|
| 643 |
+
upper = norm_cdf((b - mean) / std)
|
| 644 |
+
|
| 645 |
+
# Uniformly fill tensor with values from [lower, upper], then translate
|
| 646 |
+
# to [2lower-1, 2upper-1].
|
| 647 |
+
tensor.uniform_(2 * lower - 1, 2 * upper - 1)
|
| 648 |
+
|
| 649 |
+
# Use inverse cdf transform for normal distribution to get truncated
|
| 650 |
+
# standard normal
|
| 651 |
+
tensor.erfinv_()
|
| 652 |
+
|
| 653 |
+
# Transform to proper mean, std
|
| 654 |
+
tensor.mul_(std * math.sqrt(2.))
|
| 655 |
+
tensor.add_(mean)
|
| 656 |
+
|
| 657 |
+
# Clamp to ensure it's in the proper range
|
| 658 |
+
tensor.clamp_(min=a, max=b)
|
| 659 |
+
return tensor
|
| 660 |
+
|
| 661 |
+
|
| 662 |
+
def trunc_normal_(tensor: Tensor,
|
| 663 |
+
mean: float = 0.,
|
| 664 |
+
std: float = 1.,
|
| 665 |
+
a: float = -2.,
|
| 666 |
+
b: float = 2.) -> Tensor:
|
| 667 |
+
r"""Fills the input Tensor with values drawn from a truncated
|
| 668 |
+
normal distribution. The values are effectively drawn from the
|
| 669 |
+
normal distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)`
|
| 670 |
+
with values outside :math:`[a, b]` redrawn until they are within
|
| 671 |
+
the bounds. The method used for generating the random values works
|
| 672 |
+
best when :math:`a \leq \text{mean} \leq b`.
|
| 673 |
+
|
| 674 |
+
Modified from
|
| 675 |
+
https://github.com/pytorch/pytorch/blob/master/torch/nn/init.py
|
| 676 |
+
|
| 677 |
+
Args:
|
| 678 |
+
tensor (``torch.Tensor``): an n-dimensional `torch.Tensor`.
|
| 679 |
+
mean (float): the mean of the normal distribution.
|
| 680 |
+
std (float): the standard deviation of the normal distribution.
|
| 681 |
+
a (float): the minimum cutoff value.
|
| 682 |
+
b (float): the maximum cutoff value.
|
| 683 |
+
"""
|
| 684 |
+
return _no_grad_trunc_normal_(tensor, mean, std, a, b)
|
RAVE-main/annotator/mmpkg/mmcv/runner/base_runner.py
ADDED
|
@@ -0,0 +1,542 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
import copy
|
| 3 |
+
import logging
|
| 4 |
+
import os.path as osp
|
| 5 |
+
import warnings
|
| 6 |
+
from abc import ABCMeta, abstractmethod
|
| 7 |
+
|
| 8 |
+
import torch
|
| 9 |
+
from torch.optim import Optimizer
|
| 10 |
+
|
| 11 |
+
import annotator.mmpkg.mmcv as mmcv
|
| 12 |
+
from ..parallel import is_module_wrapper
|
| 13 |
+
from .checkpoint import load_checkpoint
|
| 14 |
+
from .dist_utils import get_dist_info
|
| 15 |
+
from .hooks import HOOKS, Hook
|
| 16 |
+
from .log_buffer import LogBuffer
|
| 17 |
+
from .priority import Priority, get_priority
|
| 18 |
+
from .utils import get_time_str
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
class BaseRunner(metaclass=ABCMeta):
|
| 22 |
+
"""The base class of Runner, a training helper for PyTorch.
|
| 23 |
+
|
| 24 |
+
All subclasses should implement the following APIs:
|
| 25 |
+
|
| 26 |
+
- ``run()``
|
| 27 |
+
- ``train()``
|
| 28 |
+
- ``val()``
|
| 29 |
+
- ``save_checkpoint()``
|
| 30 |
+
|
| 31 |
+
Args:
|
| 32 |
+
model (:obj:`torch.nn.Module`): The model to be run.
|
| 33 |
+
batch_processor (callable): A callable method that process a data
|
| 34 |
+
batch. The interface of this method should be
|
| 35 |
+
`batch_processor(model, data, train_mode) -> dict`
|
| 36 |
+
optimizer (dict or :obj:`torch.optim.Optimizer`): It can be either an
|
| 37 |
+
optimizer (in most cases) or a dict of optimizers (in models that
|
| 38 |
+
requires more than one optimizer, e.g., GAN).
|
| 39 |
+
work_dir (str, optional): The working directory to save checkpoints
|
| 40 |
+
and logs. Defaults to None.
|
| 41 |
+
logger (:obj:`logging.Logger`): Logger used during training.
|
| 42 |
+
Defaults to None. (The default value is just for backward
|
| 43 |
+
compatibility)
|
| 44 |
+
meta (dict | None): A dict records some import information such as
|
| 45 |
+
environment info and seed, which will be logged in logger hook.
|
| 46 |
+
Defaults to None.
|
| 47 |
+
max_epochs (int, optional): Total training epochs.
|
| 48 |
+
max_iters (int, optional): Total training iterations.
|
| 49 |
+
"""
|
| 50 |
+
|
| 51 |
+
def __init__(self,
|
| 52 |
+
model,
|
| 53 |
+
batch_processor=None,
|
| 54 |
+
optimizer=None,
|
| 55 |
+
work_dir=None,
|
| 56 |
+
logger=None,
|
| 57 |
+
meta=None,
|
| 58 |
+
max_iters=None,
|
| 59 |
+
max_epochs=None):
|
| 60 |
+
if batch_processor is not None:
|
| 61 |
+
if not callable(batch_processor):
|
| 62 |
+
raise TypeError('batch_processor must be callable, '
|
| 63 |
+
f'but got {type(batch_processor)}')
|
| 64 |
+
warnings.warn('batch_processor is deprecated, please implement '
|
| 65 |
+
'train_step() and val_step() in the model instead.')
|
| 66 |
+
# raise an error is `batch_processor` is not None and
|
| 67 |
+
# `model.train_step()` exists.
|
| 68 |
+
if is_module_wrapper(model):
|
| 69 |
+
_model = model.module
|
| 70 |
+
else:
|
| 71 |
+
_model = model
|
| 72 |
+
if hasattr(_model, 'train_step') or hasattr(_model, 'val_step'):
|
| 73 |
+
raise RuntimeError(
|
| 74 |
+
'batch_processor and model.train_step()/model.val_step() '
|
| 75 |
+
'cannot be both available.')
|
| 76 |
+
else:
|
| 77 |
+
assert hasattr(model, 'train_step')
|
| 78 |
+
|
| 79 |
+
# check the type of `optimizer`
|
| 80 |
+
if isinstance(optimizer, dict):
|
| 81 |
+
for name, optim in optimizer.items():
|
| 82 |
+
if not isinstance(optim, Optimizer):
|
| 83 |
+
raise TypeError(
|
| 84 |
+
f'optimizer must be a dict of torch.optim.Optimizers, '
|
| 85 |
+
f'but optimizer["{name}"] is a {type(optim)}')
|
| 86 |
+
elif not isinstance(optimizer, Optimizer) and optimizer is not None:
|
| 87 |
+
raise TypeError(
|
| 88 |
+
f'optimizer must be a torch.optim.Optimizer object '
|
| 89 |
+
f'or dict or None, but got {type(optimizer)}')
|
| 90 |
+
|
| 91 |
+
# check the type of `logger`
|
| 92 |
+
if not isinstance(logger, logging.Logger):
|
| 93 |
+
raise TypeError(f'logger must be a logging.Logger object, '
|
| 94 |
+
f'but got {type(logger)}')
|
| 95 |
+
|
| 96 |
+
# check the type of `meta`
|
| 97 |
+
if meta is not None and not isinstance(meta, dict):
|
| 98 |
+
raise TypeError(
|
| 99 |
+
f'meta must be a dict or None, but got {type(meta)}')
|
| 100 |
+
|
| 101 |
+
self.model = model
|
| 102 |
+
self.batch_processor = batch_processor
|
| 103 |
+
self.optimizer = optimizer
|
| 104 |
+
self.logger = logger
|
| 105 |
+
self.meta = meta
|
| 106 |
+
# create work_dir
|
| 107 |
+
if mmcv.is_str(work_dir):
|
| 108 |
+
self.work_dir = osp.abspath(work_dir)
|
| 109 |
+
mmcv.mkdir_or_exist(self.work_dir)
|
| 110 |
+
elif work_dir is None:
|
| 111 |
+
self.work_dir = None
|
| 112 |
+
else:
|
| 113 |
+
raise TypeError('"work_dir" must be a str or None')
|
| 114 |
+
|
| 115 |
+
# get model name from the model class
|
| 116 |
+
if hasattr(self.model, 'module'):
|
| 117 |
+
self._model_name = self.model.module.__class__.__name__
|
| 118 |
+
else:
|
| 119 |
+
self._model_name = self.model.__class__.__name__
|
| 120 |
+
|
| 121 |
+
self._rank, self._world_size = get_dist_info()
|
| 122 |
+
self.timestamp = get_time_str()
|
| 123 |
+
self.mode = None
|
| 124 |
+
self._hooks = []
|
| 125 |
+
self._epoch = 0
|
| 126 |
+
self._iter = 0
|
| 127 |
+
self._inner_iter = 0
|
| 128 |
+
|
| 129 |
+
if max_epochs is not None and max_iters is not None:
|
| 130 |
+
raise ValueError(
|
| 131 |
+
'Only one of `max_epochs` or `max_iters` can be set.')
|
| 132 |
+
|
| 133 |
+
self._max_epochs = max_epochs
|
| 134 |
+
self._max_iters = max_iters
|
| 135 |
+
# TODO: Redesign LogBuffer, it is not flexible and elegant enough
|
| 136 |
+
self.log_buffer = LogBuffer()
|
| 137 |
+
|
| 138 |
+
@property
|
| 139 |
+
def model_name(self):
|
| 140 |
+
"""str: Name of the model, usually the module class name."""
|
| 141 |
+
return self._model_name
|
| 142 |
+
|
| 143 |
+
@property
|
| 144 |
+
def rank(self):
|
| 145 |
+
"""int: Rank of current process. (distributed training)"""
|
| 146 |
+
return self._rank
|
| 147 |
+
|
| 148 |
+
@property
|
| 149 |
+
def world_size(self):
|
| 150 |
+
"""int: Number of processes participating in the job.
|
| 151 |
+
(distributed training)"""
|
| 152 |
+
return self._world_size
|
| 153 |
+
|
| 154 |
+
@property
|
| 155 |
+
def hooks(self):
|
| 156 |
+
"""list[:obj:`Hook`]: A list of registered hooks."""
|
| 157 |
+
return self._hooks
|
| 158 |
+
|
| 159 |
+
@property
|
| 160 |
+
def epoch(self):
|
| 161 |
+
"""int: Current epoch."""
|
| 162 |
+
return self._epoch
|
| 163 |
+
|
| 164 |
+
@property
|
| 165 |
+
def iter(self):
|
| 166 |
+
"""int: Current iteration."""
|
| 167 |
+
return self._iter
|
| 168 |
+
|
| 169 |
+
@property
|
| 170 |
+
def inner_iter(self):
|
| 171 |
+
"""int: Iteration in an epoch."""
|
| 172 |
+
return self._inner_iter
|
| 173 |
+
|
| 174 |
+
@property
|
| 175 |
+
def max_epochs(self):
|
| 176 |
+
"""int: Maximum training epochs."""
|
| 177 |
+
return self._max_epochs
|
| 178 |
+
|
| 179 |
+
@property
|
| 180 |
+
def max_iters(self):
|
| 181 |
+
"""int: Maximum training iterations."""
|
| 182 |
+
return self._max_iters
|
| 183 |
+
|
| 184 |
+
@abstractmethod
|
| 185 |
+
def train(self):
|
| 186 |
+
pass
|
| 187 |
+
|
| 188 |
+
@abstractmethod
|
| 189 |
+
def val(self):
|
| 190 |
+
pass
|
| 191 |
+
|
| 192 |
+
@abstractmethod
|
| 193 |
+
def run(self, data_loaders, workflow, **kwargs):
|
| 194 |
+
pass
|
| 195 |
+
|
| 196 |
+
@abstractmethod
|
| 197 |
+
def save_checkpoint(self,
|
| 198 |
+
out_dir,
|
| 199 |
+
filename_tmpl,
|
| 200 |
+
save_optimizer=True,
|
| 201 |
+
meta=None,
|
| 202 |
+
create_symlink=True):
|
| 203 |
+
pass
|
| 204 |
+
|
| 205 |
+
def current_lr(self):
|
| 206 |
+
"""Get current learning rates.
|
| 207 |
+
|
| 208 |
+
Returns:
|
| 209 |
+
list[float] | dict[str, list[float]]: Current learning rates of all
|
| 210 |
+
param groups. If the runner has a dict of optimizers, this
|
| 211 |
+
method will return a dict.
|
| 212 |
+
"""
|
| 213 |
+
if isinstance(self.optimizer, torch.optim.Optimizer):
|
| 214 |
+
lr = [group['lr'] for group in self.optimizer.param_groups]
|
| 215 |
+
elif isinstance(self.optimizer, dict):
|
| 216 |
+
lr = dict()
|
| 217 |
+
for name, optim in self.optimizer.items():
|
| 218 |
+
lr[name] = [group['lr'] for group in optim.param_groups]
|
| 219 |
+
else:
|
| 220 |
+
raise RuntimeError(
|
| 221 |
+
'lr is not applicable because optimizer does not exist.')
|
| 222 |
+
return lr
|
| 223 |
+
|
| 224 |
+
def current_momentum(self):
|
| 225 |
+
"""Get current momentums.
|
| 226 |
+
|
| 227 |
+
Returns:
|
| 228 |
+
list[float] | dict[str, list[float]]: Current momentums of all
|
| 229 |
+
param groups. If the runner has a dict of optimizers, this
|
| 230 |
+
method will return a dict.
|
| 231 |
+
"""
|
| 232 |
+
|
| 233 |
+
def _get_momentum(optimizer):
|
| 234 |
+
momentums = []
|
| 235 |
+
for group in optimizer.param_groups:
|
| 236 |
+
if 'momentum' in group.keys():
|
| 237 |
+
momentums.append(group['momentum'])
|
| 238 |
+
elif 'betas' in group.keys():
|
| 239 |
+
momentums.append(group['betas'][0])
|
| 240 |
+
else:
|
| 241 |
+
momentums.append(0)
|
| 242 |
+
return momentums
|
| 243 |
+
|
| 244 |
+
if self.optimizer is None:
|
| 245 |
+
raise RuntimeError(
|
| 246 |
+
'momentum is not applicable because optimizer does not exist.')
|
| 247 |
+
elif isinstance(self.optimizer, torch.optim.Optimizer):
|
| 248 |
+
momentums = _get_momentum(self.optimizer)
|
| 249 |
+
elif isinstance(self.optimizer, dict):
|
| 250 |
+
momentums = dict()
|
| 251 |
+
for name, optim in self.optimizer.items():
|
| 252 |
+
momentums[name] = _get_momentum(optim)
|
| 253 |
+
return momentums
|
| 254 |
+
|
| 255 |
+
def register_hook(self, hook, priority='NORMAL'):
|
| 256 |
+
"""Register a hook into the hook list.
|
| 257 |
+
|
| 258 |
+
The hook will be inserted into a priority queue, with the specified
|
| 259 |
+
priority (See :class:`Priority` for details of priorities).
|
| 260 |
+
For hooks with the same priority, they will be triggered in the same
|
| 261 |
+
order as they are registered.
|
| 262 |
+
|
| 263 |
+
Args:
|
| 264 |
+
hook (:obj:`Hook`): The hook to be registered.
|
| 265 |
+
priority (int or str or :obj:`Priority`): Hook priority.
|
| 266 |
+
Lower value means higher priority.
|
| 267 |
+
"""
|
| 268 |
+
assert isinstance(hook, Hook)
|
| 269 |
+
if hasattr(hook, 'priority'):
|
| 270 |
+
raise ValueError('"priority" is a reserved attribute for hooks')
|
| 271 |
+
priority = get_priority(priority)
|
| 272 |
+
hook.priority = priority
|
| 273 |
+
# insert the hook to a sorted list
|
| 274 |
+
inserted = False
|
| 275 |
+
for i in range(len(self._hooks) - 1, -1, -1):
|
| 276 |
+
if priority >= self._hooks[i].priority:
|
| 277 |
+
self._hooks.insert(i + 1, hook)
|
| 278 |
+
inserted = True
|
| 279 |
+
break
|
| 280 |
+
if not inserted:
|
| 281 |
+
self._hooks.insert(0, hook)
|
| 282 |
+
|
| 283 |
+
def register_hook_from_cfg(self, hook_cfg):
|
| 284 |
+
"""Register a hook from its cfg.
|
| 285 |
+
|
| 286 |
+
Args:
|
| 287 |
+
hook_cfg (dict): Hook config. It should have at least keys 'type'
|
| 288 |
+
and 'priority' indicating its type and priority.
|
| 289 |
+
|
| 290 |
+
Notes:
|
| 291 |
+
The specific hook class to register should not use 'type' and
|
| 292 |
+
'priority' arguments during initialization.
|
| 293 |
+
"""
|
| 294 |
+
hook_cfg = hook_cfg.copy()
|
| 295 |
+
priority = hook_cfg.pop('priority', 'NORMAL')
|
| 296 |
+
hook = mmcv.build_from_cfg(hook_cfg, HOOKS)
|
| 297 |
+
self.register_hook(hook, priority=priority)
|
| 298 |
+
|
| 299 |
+
def call_hook(self, fn_name):
|
| 300 |
+
"""Call all hooks.
|
| 301 |
+
|
| 302 |
+
Args:
|
| 303 |
+
fn_name (str): The function name in each hook to be called, such as
|
| 304 |
+
"before_train_epoch".
|
| 305 |
+
"""
|
| 306 |
+
for hook in self._hooks:
|
| 307 |
+
getattr(hook, fn_name)(self)
|
| 308 |
+
|
| 309 |
+
def get_hook_info(self):
|
| 310 |
+
# Get hooks info in each stage
|
| 311 |
+
stage_hook_map = {stage: [] for stage in Hook.stages}
|
| 312 |
+
for hook in self.hooks:
|
| 313 |
+
try:
|
| 314 |
+
priority = Priority(hook.priority).name
|
| 315 |
+
except ValueError:
|
| 316 |
+
priority = hook.priority
|
| 317 |
+
classname = hook.__class__.__name__
|
| 318 |
+
hook_info = f'({priority:<12}) {classname:<35}'
|
| 319 |
+
for trigger_stage in hook.get_triggered_stages():
|
| 320 |
+
stage_hook_map[trigger_stage].append(hook_info)
|
| 321 |
+
|
| 322 |
+
stage_hook_infos = []
|
| 323 |
+
for stage in Hook.stages:
|
| 324 |
+
hook_infos = stage_hook_map[stage]
|
| 325 |
+
if len(hook_infos) > 0:
|
| 326 |
+
info = f'{stage}:\n'
|
| 327 |
+
info += '\n'.join(hook_infos)
|
| 328 |
+
info += '\n -------------------- '
|
| 329 |
+
stage_hook_infos.append(info)
|
| 330 |
+
return '\n'.join(stage_hook_infos)
|
| 331 |
+
|
| 332 |
+
def load_checkpoint(self,
|
| 333 |
+
filename,
|
| 334 |
+
map_location='cpu',
|
| 335 |
+
strict=False,
|
| 336 |
+
revise_keys=[(r'^module.', '')]):
|
| 337 |
+
return load_checkpoint(
|
| 338 |
+
self.model,
|
| 339 |
+
filename,
|
| 340 |
+
map_location,
|
| 341 |
+
strict,
|
| 342 |
+
self.logger,
|
| 343 |
+
revise_keys=revise_keys)
|
| 344 |
+
|
| 345 |
+
def resume(self,
|
| 346 |
+
checkpoint,
|
| 347 |
+
resume_optimizer=True,
|
| 348 |
+
map_location='default'):
|
| 349 |
+
if map_location == 'default':
|
| 350 |
+
if torch.cuda.is_available():
|
| 351 |
+
device_id = torch.cuda.current_device()
|
| 352 |
+
checkpoint = self.load_checkpoint(
|
| 353 |
+
checkpoint,
|
| 354 |
+
map_location=lambda storage, loc: storage.cuda(device_id))
|
| 355 |
+
else:
|
| 356 |
+
checkpoint = self.load_checkpoint(checkpoint)
|
| 357 |
+
else:
|
| 358 |
+
checkpoint = self.load_checkpoint(
|
| 359 |
+
checkpoint, map_location=map_location)
|
| 360 |
+
|
| 361 |
+
self._epoch = checkpoint['meta']['epoch']
|
| 362 |
+
self._iter = checkpoint['meta']['iter']
|
| 363 |
+
if self.meta is None:
|
| 364 |
+
self.meta = {}
|
| 365 |
+
self.meta.setdefault('hook_msgs', {})
|
| 366 |
+
# load `last_ckpt`, `best_score`, `best_ckpt`, etc. for hook messages
|
| 367 |
+
self.meta['hook_msgs'].update(checkpoint['meta'].get('hook_msgs', {}))
|
| 368 |
+
|
| 369 |
+
# Re-calculate the number of iterations when resuming
|
| 370 |
+
# models with different number of GPUs
|
| 371 |
+
if 'config' in checkpoint['meta']:
|
| 372 |
+
config = mmcv.Config.fromstring(
|
| 373 |
+
checkpoint['meta']['config'], file_format='.py')
|
| 374 |
+
previous_gpu_ids = config.get('gpu_ids', None)
|
| 375 |
+
if previous_gpu_ids and len(previous_gpu_ids) > 0 and len(
|
| 376 |
+
previous_gpu_ids) != self.world_size:
|
| 377 |
+
self._iter = int(self._iter * len(previous_gpu_ids) /
|
| 378 |
+
self.world_size)
|
| 379 |
+
self.logger.info('the iteration number is changed due to '
|
| 380 |
+
'change of GPU number')
|
| 381 |
+
|
| 382 |
+
# resume meta information meta
|
| 383 |
+
self.meta = checkpoint['meta']
|
| 384 |
+
|
| 385 |
+
if 'optimizer' in checkpoint and resume_optimizer:
|
| 386 |
+
if isinstance(self.optimizer, Optimizer):
|
| 387 |
+
self.optimizer.load_state_dict(checkpoint['optimizer'])
|
| 388 |
+
elif isinstance(self.optimizer, dict):
|
| 389 |
+
for k in self.optimizer.keys():
|
| 390 |
+
self.optimizer[k].load_state_dict(
|
| 391 |
+
checkpoint['optimizer'][k])
|
| 392 |
+
else:
|
| 393 |
+
raise TypeError(
|
| 394 |
+
'Optimizer should be dict or torch.optim.Optimizer '
|
| 395 |
+
f'but got {type(self.optimizer)}')
|
| 396 |
+
|
| 397 |
+
self.logger.info('resumed epoch %d, iter %d', self.epoch, self.iter)
|
| 398 |
+
|
| 399 |
+
def register_lr_hook(self, lr_config):
|
| 400 |
+
if lr_config is None:
|
| 401 |
+
return
|
| 402 |
+
elif isinstance(lr_config, dict):
|
| 403 |
+
assert 'policy' in lr_config
|
| 404 |
+
policy_type = lr_config.pop('policy')
|
| 405 |
+
# If the type of policy is all in lower case, e.g., 'cyclic',
|
| 406 |
+
# then its first letter will be capitalized, e.g., to be 'Cyclic'.
|
| 407 |
+
# This is for the convenient usage of Lr updater.
|
| 408 |
+
# Since this is not applicable for `
|
| 409 |
+
# CosineAnnealingLrUpdater`,
|
| 410 |
+
# the string will not be changed if it contains capital letters.
|
| 411 |
+
if policy_type == policy_type.lower():
|
| 412 |
+
policy_type = policy_type.title()
|
| 413 |
+
hook_type = policy_type + 'LrUpdaterHook'
|
| 414 |
+
lr_config['type'] = hook_type
|
| 415 |
+
hook = mmcv.build_from_cfg(lr_config, HOOKS)
|
| 416 |
+
else:
|
| 417 |
+
hook = lr_config
|
| 418 |
+
self.register_hook(hook, priority='VERY_HIGH')
|
| 419 |
+
|
| 420 |
+
def register_momentum_hook(self, momentum_config):
|
| 421 |
+
if momentum_config is None:
|
| 422 |
+
return
|
| 423 |
+
if isinstance(momentum_config, dict):
|
| 424 |
+
assert 'policy' in momentum_config
|
| 425 |
+
policy_type = momentum_config.pop('policy')
|
| 426 |
+
# If the type of policy is all in lower case, e.g., 'cyclic',
|
| 427 |
+
# then its first letter will be capitalized, e.g., to be 'Cyclic'.
|
| 428 |
+
# This is for the convenient usage of momentum updater.
|
| 429 |
+
# Since this is not applicable for
|
| 430 |
+
# `CosineAnnealingMomentumUpdater`,
|
| 431 |
+
# the string will not be changed if it contains capital letters.
|
| 432 |
+
if policy_type == policy_type.lower():
|
| 433 |
+
policy_type = policy_type.title()
|
| 434 |
+
hook_type = policy_type + 'MomentumUpdaterHook'
|
| 435 |
+
momentum_config['type'] = hook_type
|
| 436 |
+
hook = mmcv.build_from_cfg(momentum_config, HOOKS)
|
| 437 |
+
else:
|
| 438 |
+
hook = momentum_config
|
| 439 |
+
self.register_hook(hook, priority='HIGH')
|
| 440 |
+
|
| 441 |
+
def register_optimizer_hook(self, optimizer_config):
|
| 442 |
+
if optimizer_config is None:
|
| 443 |
+
return
|
| 444 |
+
if isinstance(optimizer_config, dict):
|
| 445 |
+
optimizer_config.setdefault('type', 'OptimizerHook')
|
| 446 |
+
hook = mmcv.build_from_cfg(optimizer_config, HOOKS)
|
| 447 |
+
else:
|
| 448 |
+
hook = optimizer_config
|
| 449 |
+
self.register_hook(hook, priority='ABOVE_NORMAL')
|
| 450 |
+
|
| 451 |
+
def register_checkpoint_hook(self, checkpoint_config):
|
| 452 |
+
if checkpoint_config is None:
|
| 453 |
+
return
|
| 454 |
+
if isinstance(checkpoint_config, dict):
|
| 455 |
+
checkpoint_config.setdefault('type', 'CheckpointHook')
|
| 456 |
+
hook = mmcv.build_from_cfg(checkpoint_config, HOOKS)
|
| 457 |
+
else:
|
| 458 |
+
hook = checkpoint_config
|
| 459 |
+
self.register_hook(hook, priority='NORMAL')
|
| 460 |
+
|
| 461 |
+
def register_logger_hooks(self, log_config):
|
| 462 |
+
if log_config is None:
|
| 463 |
+
return
|
| 464 |
+
log_interval = log_config['interval']
|
| 465 |
+
for info in log_config['hooks']:
|
| 466 |
+
logger_hook = mmcv.build_from_cfg(
|
| 467 |
+
info, HOOKS, default_args=dict(interval=log_interval))
|
| 468 |
+
self.register_hook(logger_hook, priority='VERY_LOW')
|
| 469 |
+
|
| 470 |
+
def register_timer_hook(self, timer_config):
|
| 471 |
+
if timer_config is None:
|
| 472 |
+
return
|
| 473 |
+
if isinstance(timer_config, dict):
|
| 474 |
+
timer_config_ = copy.deepcopy(timer_config)
|
| 475 |
+
hook = mmcv.build_from_cfg(timer_config_, HOOKS)
|
| 476 |
+
else:
|
| 477 |
+
hook = timer_config
|
| 478 |
+
self.register_hook(hook, priority='LOW')
|
| 479 |
+
|
| 480 |
+
def register_custom_hooks(self, custom_config):
|
| 481 |
+
if custom_config is None:
|
| 482 |
+
return
|
| 483 |
+
|
| 484 |
+
if not isinstance(custom_config, list):
|
| 485 |
+
custom_config = [custom_config]
|
| 486 |
+
|
| 487 |
+
for item in custom_config:
|
| 488 |
+
if isinstance(item, dict):
|
| 489 |
+
self.register_hook_from_cfg(item)
|
| 490 |
+
else:
|
| 491 |
+
self.register_hook(item, priority='NORMAL')
|
| 492 |
+
|
| 493 |
+
def register_profiler_hook(self, profiler_config):
|
| 494 |
+
if profiler_config is None:
|
| 495 |
+
return
|
| 496 |
+
if isinstance(profiler_config, dict):
|
| 497 |
+
profiler_config.setdefault('type', 'ProfilerHook')
|
| 498 |
+
hook = mmcv.build_from_cfg(profiler_config, HOOKS)
|
| 499 |
+
else:
|
| 500 |
+
hook = profiler_config
|
| 501 |
+
self.register_hook(hook)
|
| 502 |
+
|
| 503 |
+
def register_training_hooks(self,
|
| 504 |
+
lr_config,
|
| 505 |
+
optimizer_config=None,
|
| 506 |
+
checkpoint_config=None,
|
| 507 |
+
log_config=None,
|
| 508 |
+
momentum_config=None,
|
| 509 |
+
timer_config=dict(type='IterTimerHook'),
|
| 510 |
+
custom_hooks_config=None):
|
| 511 |
+
"""Register default and custom hooks for training.
|
| 512 |
+
|
| 513 |
+
Default and custom hooks include:
|
| 514 |
+
|
| 515 |
+
+----------------------+-------------------------+
|
| 516 |
+
| Hooks | Priority |
|
| 517 |
+
+======================+=========================+
|
| 518 |
+
| LrUpdaterHook | VERY_HIGH (10) |
|
| 519 |
+
+----------------------+-------------------------+
|
| 520 |
+
| MomentumUpdaterHook | HIGH (30) |
|
| 521 |
+
+----------------------+-------------------------+
|
| 522 |
+
| OptimizerStepperHook | ABOVE_NORMAL (40) |
|
| 523 |
+
+----------------------+-------------------------+
|
| 524 |
+
| CheckpointSaverHook | NORMAL (50) |
|
| 525 |
+
+----------------------+-------------------------+
|
| 526 |
+
| IterTimerHook | LOW (70) |
|
| 527 |
+
+----------------------+-------------------------+
|
| 528 |
+
| LoggerHook(s) | VERY_LOW (90) |
|
| 529 |
+
+----------------------+-------------------------+
|
| 530 |
+
| CustomHook(s) | defaults to NORMAL (50) |
|
| 531 |
+
+----------------------+-------------------------+
|
| 532 |
+
|
| 533 |
+
If custom hooks have same priority with default hooks, custom hooks
|
| 534 |
+
will be triggered after default hooks.
|
| 535 |
+
"""
|
| 536 |
+
self.register_lr_hook(lr_config)
|
| 537 |
+
self.register_momentum_hook(momentum_config)
|
| 538 |
+
self.register_optimizer_hook(optimizer_config)
|
| 539 |
+
self.register_checkpoint_hook(checkpoint_config)
|
| 540 |
+
self.register_timer_hook(timer_config)
|
| 541 |
+
self.register_logger_hooks(log_config)
|
| 542 |
+
self.register_custom_hooks(custom_hooks_config)
|
RAVE-main/annotator/mmpkg/mmcv/runner/default_constructor.py
ADDED
|
@@ -0,0 +1,44 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .builder import RUNNER_BUILDERS, RUNNERS
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
@RUNNER_BUILDERS.register_module()
|
| 5 |
+
class DefaultRunnerConstructor:
|
| 6 |
+
"""Default constructor for runners.
|
| 7 |
+
|
| 8 |
+
Custom existing `Runner` like `EpocBasedRunner` though `RunnerConstructor`.
|
| 9 |
+
For example, We can inject some new properties and functions for `Runner`.
|
| 10 |
+
|
| 11 |
+
Example:
|
| 12 |
+
>>> from annotator.mmpkg.mmcv.runner import RUNNER_BUILDERS, build_runner
|
| 13 |
+
>>> # Define a new RunnerReconstructor
|
| 14 |
+
>>> @RUNNER_BUILDERS.register_module()
|
| 15 |
+
>>> class MyRunnerConstructor:
|
| 16 |
+
... def __init__(self, runner_cfg, default_args=None):
|
| 17 |
+
... if not isinstance(runner_cfg, dict):
|
| 18 |
+
... raise TypeError('runner_cfg should be a dict',
|
| 19 |
+
... f'but got {type(runner_cfg)}')
|
| 20 |
+
... self.runner_cfg = runner_cfg
|
| 21 |
+
... self.default_args = default_args
|
| 22 |
+
...
|
| 23 |
+
... def __call__(self):
|
| 24 |
+
... runner = RUNNERS.build(self.runner_cfg,
|
| 25 |
+
... default_args=self.default_args)
|
| 26 |
+
... # Add new properties for existing runner
|
| 27 |
+
... runner.my_name = 'my_runner'
|
| 28 |
+
... runner.my_function = lambda self: print(self.my_name)
|
| 29 |
+
... ...
|
| 30 |
+
>>> # build your runner
|
| 31 |
+
>>> runner_cfg = dict(type='EpochBasedRunner', max_epochs=40,
|
| 32 |
+
... constructor='MyRunnerConstructor')
|
| 33 |
+
>>> runner = build_runner(runner_cfg)
|
| 34 |
+
"""
|
| 35 |
+
|
| 36 |
+
def __init__(self, runner_cfg, default_args=None):
|
| 37 |
+
if not isinstance(runner_cfg, dict):
|
| 38 |
+
raise TypeError('runner_cfg should be a dict',
|
| 39 |
+
f'but got {type(runner_cfg)}')
|
| 40 |
+
self.runner_cfg = runner_cfg
|
| 41 |
+
self.default_args = default_args
|
| 42 |
+
|
| 43 |
+
def __call__(self):
|
| 44 |
+
return RUNNERS.build(self.runner_cfg, default_args=self.default_args)
|
RAVE-main/annotator/mmpkg/mmcv/runner/hooks/__init__.py
ADDED
|
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
from .checkpoint import CheckpointHook
|
| 3 |
+
from .closure import ClosureHook
|
| 4 |
+
from .ema import EMAHook
|
| 5 |
+
from .evaluation import DistEvalHook, EvalHook
|
| 6 |
+
from .hook import HOOKS, Hook
|
| 7 |
+
from .iter_timer import IterTimerHook
|
| 8 |
+
from .logger import (DvcliveLoggerHook, LoggerHook, MlflowLoggerHook,
|
| 9 |
+
NeptuneLoggerHook, PaviLoggerHook, TensorboardLoggerHook,
|
| 10 |
+
TextLoggerHook, WandbLoggerHook)
|
| 11 |
+
from .lr_updater import LrUpdaterHook
|
| 12 |
+
from .memory import EmptyCacheHook
|
| 13 |
+
from .momentum_updater import MomentumUpdaterHook
|
| 14 |
+
from .optimizer import (Fp16OptimizerHook, GradientCumulativeFp16OptimizerHook,
|
| 15 |
+
GradientCumulativeOptimizerHook, OptimizerHook)
|
| 16 |
+
from .profiler import ProfilerHook
|
| 17 |
+
from .sampler_seed import DistSamplerSeedHook
|
| 18 |
+
from .sync_buffer import SyncBuffersHook
|
| 19 |
+
|
| 20 |
+
__all__ = [
|
| 21 |
+
'HOOKS', 'Hook', 'CheckpointHook', 'ClosureHook', 'LrUpdaterHook',
|
| 22 |
+
'OptimizerHook', 'Fp16OptimizerHook', 'IterTimerHook',
|
| 23 |
+
'DistSamplerSeedHook', 'EmptyCacheHook', 'LoggerHook', 'MlflowLoggerHook',
|
| 24 |
+
'PaviLoggerHook', 'TextLoggerHook', 'TensorboardLoggerHook',
|
| 25 |
+
'NeptuneLoggerHook', 'WandbLoggerHook', 'DvcliveLoggerHook',
|
| 26 |
+
'MomentumUpdaterHook', 'SyncBuffersHook', 'EMAHook', 'EvalHook',
|
| 27 |
+
'DistEvalHook', 'ProfilerHook', 'GradientCumulativeOptimizerHook',
|
| 28 |
+
'GradientCumulativeFp16OptimizerHook'
|
| 29 |
+
]
|
RAVE-main/annotator/mmpkg/mmcv/runner/hooks/checkpoint.py
ADDED
|
@@ -0,0 +1,167 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
import os.path as osp
|
| 3 |
+
import warnings
|
| 4 |
+
|
| 5 |
+
from annotator.mmpkg.mmcv.fileio import FileClient
|
| 6 |
+
from ..dist_utils import allreduce_params, master_only
|
| 7 |
+
from .hook import HOOKS, Hook
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
@HOOKS.register_module()
|
| 11 |
+
class CheckpointHook(Hook):
|
| 12 |
+
"""Save checkpoints periodically.
|
| 13 |
+
|
| 14 |
+
Args:
|
| 15 |
+
interval (int): The saving period. If ``by_epoch=True``, interval
|
| 16 |
+
indicates epochs, otherwise it indicates iterations.
|
| 17 |
+
Default: -1, which means "never".
|
| 18 |
+
by_epoch (bool): Saving checkpoints by epoch or by iteration.
|
| 19 |
+
Default: True.
|
| 20 |
+
save_optimizer (bool): Whether to save optimizer state_dict in the
|
| 21 |
+
checkpoint. It is usually used for resuming experiments.
|
| 22 |
+
Default: True.
|
| 23 |
+
out_dir (str, optional): The root directory to save checkpoints. If not
|
| 24 |
+
specified, ``runner.work_dir`` will be used by default. If
|
| 25 |
+
specified, the ``out_dir`` will be the concatenation of ``out_dir``
|
| 26 |
+
and the last level directory of ``runner.work_dir``.
|
| 27 |
+
`Changed in version 1.3.16.`
|
| 28 |
+
max_keep_ckpts (int, optional): The maximum checkpoints to keep.
|
| 29 |
+
In some cases we want only the latest few checkpoints and would
|
| 30 |
+
like to delete old ones to save the disk space.
|
| 31 |
+
Default: -1, which means unlimited.
|
| 32 |
+
save_last (bool, optional): Whether to force the last checkpoint to be
|
| 33 |
+
saved regardless of interval. Default: True.
|
| 34 |
+
sync_buffer (bool, optional): Whether to synchronize buffers in
|
| 35 |
+
different gpus. Default: False.
|
| 36 |
+
file_client_args (dict, optional): Arguments to instantiate a
|
| 37 |
+
FileClient. See :class:`mmcv.fileio.FileClient` for details.
|
| 38 |
+
Default: None.
|
| 39 |
+
`New in version 1.3.16.`
|
| 40 |
+
|
| 41 |
+
.. warning::
|
| 42 |
+
Before v1.3.16, the ``out_dir`` argument indicates the path where the
|
| 43 |
+
checkpoint is stored. However, since v1.3.16, ``out_dir`` indicates the
|
| 44 |
+
root directory and the final path to save checkpoint is the
|
| 45 |
+
concatenation of ``out_dir`` and the last level directory of
|
| 46 |
+
``runner.work_dir``. Suppose the value of ``out_dir`` is "/path/of/A"
|
| 47 |
+
and the value of ``runner.work_dir`` is "/path/of/B", then the final
|
| 48 |
+
path will be "/path/of/A/B".
|
| 49 |
+
"""
|
| 50 |
+
|
| 51 |
+
def __init__(self,
|
| 52 |
+
interval=-1,
|
| 53 |
+
by_epoch=True,
|
| 54 |
+
save_optimizer=True,
|
| 55 |
+
out_dir=None,
|
| 56 |
+
max_keep_ckpts=-1,
|
| 57 |
+
save_last=True,
|
| 58 |
+
sync_buffer=False,
|
| 59 |
+
file_client_args=None,
|
| 60 |
+
**kwargs):
|
| 61 |
+
self.interval = interval
|
| 62 |
+
self.by_epoch = by_epoch
|
| 63 |
+
self.save_optimizer = save_optimizer
|
| 64 |
+
self.out_dir = out_dir
|
| 65 |
+
self.max_keep_ckpts = max_keep_ckpts
|
| 66 |
+
self.save_last = save_last
|
| 67 |
+
self.args = kwargs
|
| 68 |
+
self.sync_buffer = sync_buffer
|
| 69 |
+
self.file_client_args = file_client_args
|
| 70 |
+
|
| 71 |
+
def before_run(self, runner):
|
| 72 |
+
if not self.out_dir:
|
| 73 |
+
self.out_dir = runner.work_dir
|
| 74 |
+
|
| 75 |
+
self.file_client = FileClient.infer_client(self.file_client_args,
|
| 76 |
+
self.out_dir)
|
| 77 |
+
|
| 78 |
+
# if `self.out_dir` is not equal to `runner.work_dir`, it means that
|
| 79 |
+
# `self.out_dir` is set so the final `self.out_dir` is the
|
| 80 |
+
# concatenation of `self.out_dir` and the last level directory of
|
| 81 |
+
# `runner.work_dir`
|
| 82 |
+
if self.out_dir != runner.work_dir:
|
| 83 |
+
basename = osp.basename(runner.work_dir.rstrip(osp.sep))
|
| 84 |
+
self.out_dir = self.file_client.join_path(self.out_dir, basename)
|
| 85 |
+
|
| 86 |
+
runner.logger.info((f'Checkpoints will be saved to {self.out_dir} by '
|
| 87 |
+
f'{self.file_client.name}.'))
|
| 88 |
+
|
| 89 |
+
# disable the create_symlink option because some file backends do not
|
| 90 |
+
# allow to create a symlink
|
| 91 |
+
if 'create_symlink' in self.args:
|
| 92 |
+
if self.args[
|
| 93 |
+
'create_symlink'] and not self.file_client.allow_symlink:
|
| 94 |
+
self.args['create_symlink'] = False
|
| 95 |
+
warnings.warn(
|
| 96 |
+
('create_symlink is set as True by the user but is changed'
|
| 97 |
+
'to be False because creating symbolic link is not '
|
| 98 |
+
f'allowed in {self.file_client.name}'))
|
| 99 |
+
else:
|
| 100 |
+
self.args['create_symlink'] = self.file_client.allow_symlink
|
| 101 |
+
|
| 102 |
+
def after_train_epoch(self, runner):
|
| 103 |
+
if not self.by_epoch:
|
| 104 |
+
return
|
| 105 |
+
|
| 106 |
+
# save checkpoint for following cases:
|
| 107 |
+
# 1. every ``self.interval`` epochs
|
| 108 |
+
# 2. reach the last epoch of training
|
| 109 |
+
if self.every_n_epochs(
|
| 110 |
+
runner, self.interval) or (self.save_last
|
| 111 |
+
and self.is_last_epoch(runner)):
|
| 112 |
+
runner.logger.info(
|
| 113 |
+
f'Saving checkpoint at {runner.epoch + 1} epochs')
|
| 114 |
+
if self.sync_buffer:
|
| 115 |
+
allreduce_params(runner.model.buffers())
|
| 116 |
+
self._save_checkpoint(runner)
|
| 117 |
+
|
| 118 |
+
@master_only
|
| 119 |
+
def _save_checkpoint(self, runner):
|
| 120 |
+
"""Save the current checkpoint and delete unwanted checkpoint."""
|
| 121 |
+
runner.save_checkpoint(
|
| 122 |
+
self.out_dir, save_optimizer=self.save_optimizer, **self.args)
|
| 123 |
+
if runner.meta is not None:
|
| 124 |
+
if self.by_epoch:
|
| 125 |
+
cur_ckpt_filename = self.args.get(
|
| 126 |
+
'filename_tmpl', 'epoch_{}.pth').format(runner.epoch + 1)
|
| 127 |
+
else:
|
| 128 |
+
cur_ckpt_filename = self.args.get(
|
| 129 |
+
'filename_tmpl', 'iter_{}.pth').format(runner.iter + 1)
|
| 130 |
+
runner.meta.setdefault('hook_msgs', dict())
|
| 131 |
+
runner.meta['hook_msgs']['last_ckpt'] = self.file_client.join_path(
|
| 132 |
+
self.out_dir, cur_ckpt_filename)
|
| 133 |
+
# remove other checkpoints
|
| 134 |
+
if self.max_keep_ckpts > 0:
|
| 135 |
+
if self.by_epoch:
|
| 136 |
+
name = 'epoch_{}.pth'
|
| 137 |
+
current_ckpt = runner.epoch + 1
|
| 138 |
+
else:
|
| 139 |
+
name = 'iter_{}.pth'
|
| 140 |
+
current_ckpt = runner.iter + 1
|
| 141 |
+
redundant_ckpts = range(
|
| 142 |
+
current_ckpt - self.max_keep_ckpts * self.interval, 0,
|
| 143 |
+
-self.interval)
|
| 144 |
+
filename_tmpl = self.args.get('filename_tmpl', name)
|
| 145 |
+
for _step in redundant_ckpts:
|
| 146 |
+
ckpt_path = self.file_client.join_path(
|
| 147 |
+
self.out_dir, filename_tmpl.format(_step))
|
| 148 |
+
if self.file_client.isfile(ckpt_path):
|
| 149 |
+
self.file_client.remove(ckpt_path)
|
| 150 |
+
else:
|
| 151 |
+
break
|
| 152 |
+
|
| 153 |
+
def after_train_iter(self, runner):
|
| 154 |
+
if self.by_epoch:
|
| 155 |
+
return
|
| 156 |
+
|
| 157 |
+
# save checkpoint for following cases:
|
| 158 |
+
# 1. every ``self.interval`` iterations
|
| 159 |
+
# 2. reach the last iteration of training
|
| 160 |
+
if self.every_n_iters(
|
| 161 |
+
runner, self.interval) or (self.save_last
|
| 162 |
+
and self.is_last_iter(runner)):
|
| 163 |
+
runner.logger.info(
|
| 164 |
+
f'Saving checkpoint at {runner.iter + 1} iterations')
|
| 165 |
+
if self.sync_buffer:
|
| 166 |
+
allreduce_params(runner.model.buffers())
|
| 167 |
+
self._save_checkpoint(runner)
|
RAVE-main/annotator/mmpkg/mmcv/runner/hooks/closure.py
ADDED
|
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
from .hook import HOOKS, Hook
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
@HOOKS.register_module()
|
| 6 |
+
class ClosureHook(Hook):
|
| 7 |
+
|
| 8 |
+
def __init__(self, fn_name, fn):
|
| 9 |
+
assert hasattr(self, fn_name)
|
| 10 |
+
assert callable(fn)
|
| 11 |
+
setattr(self, fn_name, fn)
|
RAVE-main/annotator/mmpkg/mmcv/runner/hooks/evaluation.py
ADDED
|
@@ -0,0 +1,509 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
import os.path as osp
|
| 3 |
+
import warnings
|
| 4 |
+
from math import inf
|
| 5 |
+
|
| 6 |
+
import torch.distributed as dist
|
| 7 |
+
from torch.nn.modules.batchnorm import _BatchNorm
|
| 8 |
+
from torch.utils.data import DataLoader
|
| 9 |
+
|
| 10 |
+
from annotator.mmpkg.mmcv.fileio import FileClient
|
| 11 |
+
from annotator.mmpkg.mmcv.utils import is_seq_of
|
| 12 |
+
from .hook import Hook
|
| 13 |
+
from .logger import LoggerHook
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
class EvalHook(Hook):
|
| 17 |
+
"""Non-Distributed evaluation hook.
|
| 18 |
+
|
| 19 |
+
This hook will regularly perform evaluation in a given interval when
|
| 20 |
+
performing in non-distributed environment.
|
| 21 |
+
|
| 22 |
+
Args:
|
| 23 |
+
dataloader (DataLoader): A PyTorch dataloader, whose dataset has
|
| 24 |
+
implemented ``evaluate`` function.
|
| 25 |
+
start (int | None, optional): Evaluation starting epoch. It enables
|
| 26 |
+
evaluation before the training starts if ``start`` <= the resuming
|
| 27 |
+
epoch. If None, whether to evaluate is merely decided by
|
| 28 |
+
``interval``. Default: None.
|
| 29 |
+
interval (int): Evaluation interval. Default: 1.
|
| 30 |
+
by_epoch (bool): Determine perform evaluation by epoch or by iteration.
|
| 31 |
+
If set to True, it will perform by epoch. Otherwise, by iteration.
|
| 32 |
+
Default: True.
|
| 33 |
+
save_best (str, optional): If a metric is specified, it would measure
|
| 34 |
+
the best checkpoint during evaluation. The information about best
|
| 35 |
+
checkpoint would be saved in ``runner.meta['hook_msgs']`` to keep
|
| 36 |
+
best score value and best checkpoint path, which will be also
|
| 37 |
+
loaded when resume checkpoint. Options are the evaluation metrics
|
| 38 |
+
on the test dataset. e.g., ``bbox_mAP``, ``segm_mAP`` for bbox
|
| 39 |
+
detection and instance segmentation. ``AR@100`` for proposal
|
| 40 |
+
recall. If ``save_best`` is ``auto``, the first key of the returned
|
| 41 |
+
``OrderedDict`` result will be used. Default: None.
|
| 42 |
+
rule (str | None, optional): Comparison rule for best score. If set to
|
| 43 |
+
None, it will infer a reasonable rule. Keys such as 'acc', 'top'
|
| 44 |
+
.etc will be inferred by 'greater' rule. Keys contain 'loss' will
|
| 45 |
+
be inferred by 'less' rule. Options are 'greater', 'less', None.
|
| 46 |
+
Default: None.
|
| 47 |
+
test_fn (callable, optional): test a model with samples from a
|
| 48 |
+
dataloader, and return the test results. If ``None``, the default
|
| 49 |
+
test function ``mmcv.engine.single_gpu_test`` will be used.
|
| 50 |
+
(default: ``None``)
|
| 51 |
+
greater_keys (List[str] | None, optional): Metric keys that will be
|
| 52 |
+
inferred by 'greater' comparison rule. If ``None``,
|
| 53 |
+
_default_greater_keys will be used. (default: ``None``)
|
| 54 |
+
less_keys (List[str] | None, optional): Metric keys that will be
|
| 55 |
+
inferred by 'less' comparison rule. If ``None``, _default_less_keys
|
| 56 |
+
will be used. (default: ``None``)
|
| 57 |
+
out_dir (str, optional): The root directory to save checkpoints. If not
|
| 58 |
+
specified, `runner.work_dir` will be used by default. If specified,
|
| 59 |
+
the `out_dir` will be the concatenation of `out_dir` and the last
|
| 60 |
+
level directory of `runner.work_dir`.
|
| 61 |
+
`New in version 1.3.16.`
|
| 62 |
+
file_client_args (dict): Arguments to instantiate a FileClient.
|
| 63 |
+
See :class:`mmcv.fileio.FileClient` for details. Default: None.
|
| 64 |
+
`New in version 1.3.16.`
|
| 65 |
+
**eval_kwargs: Evaluation arguments fed into the evaluate function of
|
| 66 |
+
the dataset.
|
| 67 |
+
|
| 68 |
+
Notes:
|
| 69 |
+
If new arguments are added for EvalHook, tools/test.py,
|
| 70 |
+
tools/eval_metric.py may be affected.
|
| 71 |
+
"""
|
| 72 |
+
|
| 73 |
+
# Since the key for determine greater or less is related to the downstream
|
| 74 |
+
# tasks, downstream repos may need to overwrite the following inner
|
| 75 |
+
# variable accordingly.
|
| 76 |
+
|
| 77 |
+
rule_map = {'greater': lambda x, y: x > y, 'less': lambda x, y: x < y}
|
| 78 |
+
init_value_map = {'greater': -inf, 'less': inf}
|
| 79 |
+
_default_greater_keys = [
|
| 80 |
+
'acc', 'top', 'AR@', 'auc', 'precision', 'mAP', 'mDice', 'mIoU',
|
| 81 |
+
'mAcc', 'aAcc'
|
| 82 |
+
]
|
| 83 |
+
_default_less_keys = ['loss']
|
| 84 |
+
|
| 85 |
+
def __init__(self,
|
| 86 |
+
dataloader,
|
| 87 |
+
start=None,
|
| 88 |
+
interval=1,
|
| 89 |
+
by_epoch=True,
|
| 90 |
+
save_best=None,
|
| 91 |
+
rule=None,
|
| 92 |
+
test_fn=None,
|
| 93 |
+
greater_keys=None,
|
| 94 |
+
less_keys=None,
|
| 95 |
+
out_dir=None,
|
| 96 |
+
file_client_args=None,
|
| 97 |
+
**eval_kwargs):
|
| 98 |
+
if not isinstance(dataloader, DataLoader):
|
| 99 |
+
raise TypeError(f'dataloader must be a pytorch DataLoader, '
|
| 100 |
+
f'but got {type(dataloader)}')
|
| 101 |
+
|
| 102 |
+
if interval <= 0:
|
| 103 |
+
raise ValueError(f'interval must be a positive number, '
|
| 104 |
+
f'but got {interval}')
|
| 105 |
+
|
| 106 |
+
assert isinstance(by_epoch, bool), '``by_epoch`` should be a boolean'
|
| 107 |
+
|
| 108 |
+
if start is not None and start < 0:
|
| 109 |
+
raise ValueError(f'The evaluation start epoch {start} is smaller '
|
| 110 |
+
f'than 0')
|
| 111 |
+
|
| 112 |
+
self.dataloader = dataloader
|
| 113 |
+
self.interval = interval
|
| 114 |
+
self.start = start
|
| 115 |
+
self.by_epoch = by_epoch
|
| 116 |
+
|
| 117 |
+
assert isinstance(save_best, str) or save_best is None, \
|
| 118 |
+
'""save_best"" should be a str or None ' \
|
| 119 |
+
f'rather than {type(save_best)}'
|
| 120 |
+
self.save_best = save_best
|
| 121 |
+
self.eval_kwargs = eval_kwargs
|
| 122 |
+
self.initial_flag = True
|
| 123 |
+
|
| 124 |
+
if test_fn is None:
|
| 125 |
+
from annotator.mmpkg.mmcv.engine import single_gpu_test
|
| 126 |
+
self.test_fn = single_gpu_test
|
| 127 |
+
else:
|
| 128 |
+
self.test_fn = test_fn
|
| 129 |
+
|
| 130 |
+
if greater_keys is None:
|
| 131 |
+
self.greater_keys = self._default_greater_keys
|
| 132 |
+
else:
|
| 133 |
+
if not isinstance(greater_keys, (list, tuple)):
|
| 134 |
+
greater_keys = (greater_keys, )
|
| 135 |
+
assert is_seq_of(greater_keys, str)
|
| 136 |
+
self.greater_keys = greater_keys
|
| 137 |
+
|
| 138 |
+
if less_keys is None:
|
| 139 |
+
self.less_keys = self._default_less_keys
|
| 140 |
+
else:
|
| 141 |
+
if not isinstance(less_keys, (list, tuple)):
|
| 142 |
+
less_keys = (less_keys, )
|
| 143 |
+
assert is_seq_of(less_keys, str)
|
| 144 |
+
self.less_keys = less_keys
|
| 145 |
+
|
| 146 |
+
if self.save_best is not None:
|
| 147 |
+
self.best_ckpt_path = None
|
| 148 |
+
self._init_rule(rule, self.save_best)
|
| 149 |
+
|
| 150 |
+
self.out_dir = out_dir
|
| 151 |
+
self.file_client_args = file_client_args
|
| 152 |
+
|
| 153 |
+
def _init_rule(self, rule, key_indicator):
|
| 154 |
+
"""Initialize rule, key_indicator, comparison_func, and best score.
|
| 155 |
+
|
| 156 |
+
Here is the rule to determine which rule is used for key indicator
|
| 157 |
+
when the rule is not specific (note that the key indicator matching
|
| 158 |
+
is case-insensitive):
|
| 159 |
+
1. If the key indicator is in ``self.greater_keys``, the rule will be
|
| 160 |
+
specified as 'greater'.
|
| 161 |
+
2. Or if the key indicator is in ``self.less_keys``, the rule will be
|
| 162 |
+
specified as 'less'.
|
| 163 |
+
3. Or if the key indicator is equal to the substring in any one item
|
| 164 |
+
in ``self.greater_keys``, the rule will be specified as 'greater'.
|
| 165 |
+
4. Or if the key indicator is equal to the substring in any one item
|
| 166 |
+
in ``self.less_keys``, the rule will be specified as 'less'.
|
| 167 |
+
|
| 168 |
+
Args:
|
| 169 |
+
rule (str | None): Comparison rule for best score.
|
| 170 |
+
key_indicator (str | None): Key indicator to determine the
|
| 171 |
+
comparison rule.
|
| 172 |
+
"""
|
| 173 |
+
if rule not in self.rule_map and rule is not None:
|
| 174 |
+
raise KeyError(f'rule must be greater, less or None, '
|
| 175 |
+
f'but got {rule}.')
|
| 176 |
+
|
| 177 |
+
if rule is None:
|
| 178 |
+
if key_indicator != 'auto':
|
| 179 |
+
# `_lc` here means we use the lower case of keys for
|
| 180 |
+
# case-insensitive matching
|
| 181 |
+
key_indicator_lc = key_indicator.lower()
|
| 182 |
+
greater_keys = [key.lower() for key in self.greater_keys]
|
| 183 |
+
less_keys = [key.lower() for key in self.less_keys]
|
| 184 |
+
|
| 185 |
+
if key_indicator_lc in greater_keys:
|
| 186 |
+
rule = 'greater'
|
| 187 |
+
elif key_indicator_lc in less_keys:
|
| 188 |
+
rule = 'less'
|
| 189 |
+
elif any(key in key_indicator_lc for key in greater_keys):
|
| 190 |
+
rule = 'greater'
|
| 191 |
+
elif any(key in key_indicator_lc for key in less_keys):
|
| 192 |
+
rule = 'less'
|
| 193 |
+
else:
|
| 194 |
+
raise ValueError(f'Cannot infer the rule for key '
|
| 195 |
+
f'{key_indicator}, thus a specific rule '
|
| 196 |
+
f'must be specified.')
|
| 197 |
+
self.rule = rule
|
| 198 |
+
self.key_indicator = key_indicator
|
| 199 |
+
if self.rule is not None:
|
| 200 |
+
self.compare_func = self.rule_map[self.rule]
|
| 201 |
+
|
| 202 |
+
def before_run(self, runner):
|
| 203 |
+
if not self.out_dir:
|
| 204 |
+
self.out_dir = runner.work_dir
|
| 205 |
+
|
| 206 |
+
self.file_client = FileClient.infer_client(self.file_client_args,
|
| 207 |
+
self.out_dir)
|
| 208 |
+
|
| 209 |
+
# if `self.out_dir` is not equal to `runner.work_dir`, it means that
|
| 210 |
+
# `self.out_dir` is set so the final `self.out_dir` is the
|
| 211 |
+
# concatenation of `self.out_dir` and the last level directory of
|
| 212 |
+
# `runner.work_dir`
|
| 213 |
+
if self.out_dir != runner.work_dir:
|
| 214 |
+
basename = osp.basename(runner.work_dir.rstrip(osp.sep))
|
| 215 |
+
self.out_dir = self.file_client.join_path(self.out_dir, basename)
|
| 216 |
+
runner.logger.info(
|
| 217 |
+
(f'The best checkpoint will be saved to {self.out_dir} by '
|
| 218 |
+
f'{self.file_client.name}'))
|
| 219 |
+
|
| 220 |
+
if self.save_best is not None:
|
| 221 |
+
if runner.meta is None:
|
| 222 |
+
warnings.warn('runner.meta is None. Creating an empty one.')
|
| 223 |
+
runner.meta = dict()
|
| 224 |
+
runner.meta.setdefault('hook_msgs', dict())
|
| 225 |
+
self.best_ckpt_path = runner.meta['hook_msgs'].get(
|
| 226 |
+
'best_ckpt', None)
|
| 227 |
+
|
| 228 |
+
def before_train_iter(self, runner):
|
| 229 |
+
"""Evaluate the model only at the start of training by iteration."""
|
| 230 |
+
if self.by_epoch or not self.initial_flag:
|
| 231 |
+
return
|
| 232 |
+
if self.start is not None and runner.iter >= self.start:
|
| 233 |
+
self.after_train_iter(runner)
|
| 234 |
+
self.initial_flag = False
|
| 235 |
+
|
| 236 |
+
def before_train_epoch(self, runner):
|
| 237 |
+
"""Evaluate the model only at the start of training by epoch."""
|
| 238 |
+
if not (self.by_epoch and self.initial_flag):
|
| 239 |
+
return
|
| 240 |
+
if self.start is not None and runner.epoch >= self.start:
|
| 241 |
+
self.after_train_epoch(runner)
|
| 242 |
+
self.initial_flag = False
|
| 243 |
+
|
| 244 |
+
def after_train_iter(self, runner):
|
| 245 |
+
"""Called after every training iter to evaluate the results."""
|
| 246 |
+
if not self.by_epoch and self._should_evaluate(runner):
|
| 247 |
+
# Because the priority of EvalHook is higher than LoggerHook, the
|
| 248 |
+
# training log and the evaluating log are mixed. Therefore,
|
| 249 |
+
# we need to dump the training log and clear it before evaluating
|
| 250 |
+
# log is generated. In addition, this problem will only appear in
|
| 251 |
+
# `IterBasedRunner` whose `self.by_epoch` is False, because
|
| 252 |
+
# `EpochBasedRunner` whose `self.by_epoch` is True calls
|
| 253 |
+
# `_do_evaluate` in `after_train_epoch` stage, and at this stage
|
| 254 |
+
# the training log has been printed, so it will not cause any
|
| 255 |
+
# problem. more details at
|
| 256 |
+
# https://github.com/open-mmlab/mmsegmentation/issues/694
|
| 257 |
+
for hook in runner._hooks:
|
| 258 |
+
if isinstance(hook, LoggerHook):
|
| 259 |
+
hook.after_train_iter(runner)
|
| 260 |
+
runner.log_buffer.clear()
|
| 261 |
+
|
| 262 |
+
self._do_evaluate(runner)
|
| 263 |
+
|
| 264 |
+
def after_train_epoch(self, runner):
|
| 265 |
+
"""Called after every training epoch to evaluate the results."""
|
| 266 |
+
if self.by_epoch and self._should_evaluate(runner):
|
| 267 |
+
self._do_evaluate(runner)
|
| 268 |
+
|
| 269 |
+
def _do_evaluate(self, runner):
|
| 270 |
+
"""perform evaluation and save ckpt."""
|
| 271 |
+
results = self.test_fn(runner.model, self.dataloader)
|
| 272 |
+
runner.log_buffer.output['eval_iter_num'] = len(self.dataloader)
|
| 273 |
+
key_score = self.evaluate(runner, results)
|
| 274 |
+
# the key_score may be `None` so it needs to skip the action to save
|
| 275 |
+
# the best checkpoint
|
| 276 |
+
if self.save_best and key_score:
|
| 277 |
+
self._save_ckpt(runner, key_score)
|
| 278 |
+
|
| 279 |
+
def _should_evaluate(self, runner):
|
| 280 |
+
"""Judge whether to perform evaluation.
|
| 281 |
+
|
| 282 |
+
Here is the rule to judge whether to perform evaluation:
|
| 283 |
+
1. It will not perform evaluation during the epoch/iteration interval,
|
| 284 |
+
which is determined by ``self.interval``.
|
| 285 |
+
2. It will not perform evaluation if the start time is larger than
|
| 286 |
+
current time.
|
| 287 |
+
3. It will not perform evaluation when current time is larger than
|
| 288 |
+
the start time but during epoch/iteration interval.
|
| 289 |
+
|
| 290 |
+
Returns:
|
| 291 |
+
bool: The flag indicating whether to perform evaluation.
|
| 292 |
+
"""
|
| 293 |
+
if self.by_epoch:
|
| 294 |
+
current = runner.epoch
|
| 295 |
+
check_time = self.every_n_epochs
|
| 296 |
+
else:
|
| 297 |
+
current = runner.iter
|
| 298 |
+
check_time = self.every_n_iters
|
| 299 |
+
|
| 300 |
+
if self.start is None:
|
| 301 |
+
if not check_time(runner, self.interval):
|
| 302 |
+
# No evaluation during the interval.
|
| 303 |
+
return False
|
| 304 |
+
elif (current + 1) < self.start:
|
| 305 |
+
# No evaluation if start is larger than the current time.
|
| 306 |
+
return False
|
| 307 |
+
else:
|
| 308 |
+
# Evaluation only at epochs/iters 3, 5, 7...
|
| 309 |
+
# if start==3 and interval==2
|
| 310 |
+
if (current + 1 - self.start) % self.interval:
|
| 311 |
+
return False
|
| 312 |
+
return True
|
| 313 |
+
|
| 314 |
+
def _save_ckpt(self, runner, key_score):
|
| 315 |
+
"""Save the best checkpoint.
|
| 316 |
+
|
| 317 |
+
It will compare the score according to the compare function, write
|
| 318 |
+
related information (best score, best checkpoint path) and save the
|
| 319 |
+
best checkpoint into ``work_dir``.
|
| 320 |
+
"""
|
| 321 |
+
if self.by_epoch:
|
| 322 |
+
current = f'epoch_{runner.epoch + 1}'
|
| 323 |
+
cur_type, cur_time = 'epoch', runner.epoch + 1
|
| 324 |
+
else:
|
| 325 |
+
current = f'iter_{runner.iter + 1}'
|
| 326 |
+
cur_type, cur_time = 'iter', runner.iter + 1
|
| 327 |
+
|
| 328 |
+
best_score = runner.meta['hook_msgs'].get(
|
| 329 |
+
'best_score', self.init_value_map[self.rule])
|
| 330 |
+
if self.compare_func(key_score, best_score):
|
| 331 |
+
best_score = key_score
|
| 332 |
+
runner.meta['hook_msgs']['best_score'] = best_score
|
| 333 |
+
|
| 334 |
+
if self.best_ckpt_path and self.file_client.isfile(
|
| 335 |
+
self.best_ckpt_path):
|
| 336 |
+
self.file_client.remove(self.best_ckpt_path)
|
| 337 |
+
runner.logger.info(
|
| 338 |
+
(f'The previous best checkpoint {self.best_ckpt_path} was '
|
| 339 |
+
'removed'))
|
| 340 |
+
|
| 341 |
+
best_ckpt_name = f'best_{self.key_indicator}_{current}.pth'
|
| 342 |
+
self.best_ckpt_path = self.file_client.join_path(
|
| 343 |
+
self.out_dir, best_ckpt_name)
|
| 344 |
+
runner.meta['hook_msgs']['best_ckpt'] = self.best_ckpt_path
|
| 345 |
+
|
| 346 |
+
runner.save_checkpoint(
|
| 347 |
+
self.out_dir, best_ckpt_name, create_symlink=False)
|
| 348 |
+
runner.logger.info(
|
| 349 |
+
f'Now best checkpoint is saved as {best_ckpt_name}.')
|
| 350 |
+
runner.logger.info(
|
| 351 |
+
f'Best {self.key_indicator} is {best_score:0.4f} '
|
| 352 |
+
f'at {cur_time} {cur_type}.')
|
| 353 |
+
|
| 354 |
+
def evaluate(self, runner, results):
|
| 355 |
+
"""Evaluate the results.
|
| 356 |
+
|
| 357 |
+
Args:
|
| 358 |
+
runner (:obj:`mmcv.Runner`): The underlined training runner.
|
| 359 |
+
results (list): Output results.
|
| 360 |
+
"""
|
| 361 |
+
eval_res = self.dataloader.dataset.evaluate(
|
| 362 |
+
results, logger=runner.logger, **self.eval_kwargs)
|
| 363 |
+
|
| 364 |
+
for name, val in eval_res.items():
|
| 365 |
+
runner.log_buffer.output[name] = val
|
| 366 |
+
runner.log_buffer.ready = True
|
| 367 |
+
|
| 368 |
+
if self.save_best is not None:
|
| 369 |
+
# If the performance of model is pool, the `eval_res` may be an
|
| 370 |
+
# empty dict and it will raise exception when `self.save_best` is
|
| 371 |
+
# not None. More details at
|
| 372 |
+
# https://github.com/open-mmlab/mmdetection/issues/6265.
|
| 373 |
+
if not eval_res:
|
| 374 |
+
warnings.warn(
|
| 375 |
+
'Since `eval_res` is an empty dict, the behavior to save '
|
| 376 |
+
'the best checkpoint will be skipped in this evaluation.')
|
| 377 |
+
return None
|
| 378 |
+
|
| 379 |
+
if self.key_indicator == 'auto':
|
| 380 |
+
# infer from eval_results
|
| 381 |
+
self._init_rule(self.rule, list(eval_res.keys())[0])
|
| 382 |
+
return eval_res[self.key_indicator]
|
| 383 |
+
|
| 384 |
+
return None
|
| 385 |
+
|
| 386 |
+
|
| 387 |
+
class DistEvalHook(EvalHook):
|
| 388 |
+
"""Distributed evaluation hook.
|
| 389 |
+
|
| 390 |
+
This hook will regularly perform evaluation in a given interval when
|
| 391 |
+
performing in distributed environment.
|
| 392 |
+
|
| 393 |
+
Args:
|
| 394 |
+
dataloader (DataLoader): A PyTorch dataloader, whose dataset has
|
| 395 |
+
implemented ``evaluate`` function.
|
| 396 |
+
start (int | None, optional): Evaluation starting epoch. It enables
|
| 397 |
+
evaluation before the training starts if ``start`` <= the resuming
|
| 398 |
+
epoch. If None, whether to evaluate is merely decided by
|
| 399 |
+
``interval``. Default: None.
|
| 400 |
+
interval (int): Evaluation interval. Default: 1.
|
| 401 |
+
by_epoch (bool): Determine perform evaluation by epoch or by iteration.
|
| 402 |
+
If set to True, it will perform by epoch. Otherwise, by iteration.
|
| 403 |
+
default: True.
|
| 404 |
+
save_best (str, optional): If a metric is specified, it would measure
|
| 405 |
+
the best checkpoint during evaluation. The information about best
|
| 406 |
+
checkpoint would be saved in ``runner.meta['hook_msgs']`` to keep
|
| 407 |
+
best score value and best checkpoint path, which will be also
|
| 408 |
+
loaded when resume checkpoint. Options are the evaluation metrics
|
| 409 |
+
on the test dataset. e.g., ``bbox_mAP``, ``segm_mAP`` for bbox
|
| 410 |
+
detection and instance segmentation. ``AR@100`` for proposal
|
| 411 |
+
recall. If ``save_best`` is ``auto``, the first key of the returned
|
| 412 |
+
``OrderedDict`` result will be used. Default: None.
|
| 413 |
+
rule (str | None, optional): Comparison rule for best score. If set to
|
| 414 |
+
None, it will infer a reasonable rule. Keys such as 'acc', 'top'
|
| 415 |
+
.etc will be inferred by 'greater' rule. Keys contain 'loss' will
|
| 416 |
+
be inferred by 'less' rule. Options are 'greater', 'less', None.
|
| 417 |
+
Default: None.
|
| 418 |
+
test_fn (callable, optional): test a model with samples from a
|
| 419 |
+
dataloader in a multi-gpu manner, and return the test results. If
|
| 420 |
+
``None``, the default test function ``mmcv.engine.multi_gpu_test``
|
| 421 |
+
will be used. (default: ``None``)
|
| 422 |
+
tmpdir (str | None): Temporary directory to save the results of all
|
| 423 |
+
processes. Default: None.
|
| 424 |
+
gpu_collect (bool): Whether to use gpu or cpu to collect results.
|
| 425 |
+
Default: False.
|
| 426 |
+
broadcast_bn_buffer (bool): Whether to broadcast the
|
| 427 |
+
buffer(running_mean and running_var) of rank 0 to other rank
|
| 428 |
+
before evaluation. Default: True.
|
| 429 |
+
out_dir (str, optional): The root directory to save checkpoints. If not
|
| 430 |
+
specified, `runner.work_dir` will be used by default. If specified,
|
| 431 |
+
the `out_dir` will be the concatenation of `out_dir` and the last
|
| 432 |
+
level directory of `runner.work_dir`.
|
| 433 |
+
file_client_args (dict): Arguments to instantiate a FileClient.
|
| 434 |
+
See :class:`mmcv.fileio.FileClient` for details. Default: None.
|
| 435 |
+
**eval_kwargs: Evaluation arguments fed into the evaluate function of
|
| 436 |
+
the dataset.
|
| 437 |
+
"""
|
| 438 |
+
|
| 439 |
+
def __init__(self,
|
| 440 |
+
dataloader,
|
| 441 |
+
start=None,
|
| 442 |
+
interval=1,
|
| 443 |
+
by_epoch=True,
|
| 444 |
+
save_best=None,
|
| 445 |
+
rule=None,
|
| 446 |
+
test_fn=None,
|
| 447 |
+
greater_keys=None,
|
| 448 |
+
less_keys=None,
|
| 449 |
+
broadcast_bn_buffer=True,
|
| 450 |
+
tmpdir=None,
|
| 451 |
+
gpu_collect=False,
|
| 452 |
+
out_dir=None,
|
| 453 |
+
file_client_args=None,
|
| 454 |
+
**eval_kwargs):
|
| 455 |
+
|
| 456 |
+
if test_fn is None:
|
| 457 |
+
from annotator.mmpkg.mmcv.engine import multi_gpu_test
|
| 458 |
+
test_fn = multi_gpu_test
|
| 459 |
+
|
| 460 |
+
super().__init__(
|
| 461 |
+
dataloader,
|
| 462 |
+
start=start,
|
| 463 |
+
interval=interval,
|
| 464 |
+
by_epoch=by_epoch,
|
| 465 |
+
save_best=save_best,
|
| 466 |
+
rule=rule,
|
| 467 |
+
test_fn=test_fn,
|
| 468 |
+
greater_keys=greater_keys,
|
| 469 |
+
less_keys=less_keys,
|
| 470 |
+
out_dir=out_dir,
|
| 471 |
+
file_client_args=file_client_args,
|
| 472 |
+
**eval_kwargs)
|
| 473 |
+
|
| 474 |
+
self.broadcast_bn_buffer = broadcast_bn_buffer
|
| 475 |
+
self.tmpdir = tmpdir
|
| 476 |
+
self.gpu_collect = gpu_collect
|
| 477 |
+
|
| 478 |
+
def _do_evaluate(self, runner):
|
| 479 |
+
"""perform evaluation and save ckpt."""
|
| 480 |
+
# Synchronization of BatchNorm's buffer (running_mean
|
| 481 |
+
# and running_var) is not supported in the DDP of pytorch,
|
| 482 |
+
# which may cause the inconsistent performance of models in
|
| 483 |
+
# different ranks, so we broadcast BatchNorm's buffers
|
| 484 |
+
# of rank 0 to other ranks to avoid this.
|
| 485 |
+
if self.broadcast_bn_buffer:
|
| 486 |
+
model = runner.model
|
| 487 |
+
for name, module in model.named_modules():
|
| 488 |
+
if isinstance(module,
|
| 489 |
+
_BatchNorm) and module.track_running_stats:
|
| 490 |
+
dist.broadcast(module.running_var, 0)
|
| 491 |
+
dist.broadcast(module.running_mean, 0)
|
| 492 |
+
|
| 493 |
+
tmpdir = self.tmpdir
|
| 494 |
+
if tmpdir is None:
|
| 495 |
+
tmpdir = osp.join(runner.work_dir, '.eval_hook')
|
| 496 |
+
|
| 497 |
+
results = self.test_fn(
|
| 498 |
+
runner.model,
|
| 499 |
+
self.dataloader,
|
| 500 |
+
tmpdir=tmpdir,
|
| 501 |
+
gpu_collect=self.gpu_collect)
|
| 502 |
+
if runner.rank == 0:
|
| 503 |
+
print('\n')
|
| 504 |
+
runner.log_buffer.output['eval_iter_num'] = len(self.dataloader)
|
| 505 |
+
key_score = self.evaluate(runner, results)
|
| 506 |
+
# the key_score may be `None` so it needs to skip the action to
|
| 507 |
+
# save the best checkpoint
|
| 508 |
+
if self.save_best and key_score:
|
| 509 |
+
self._save_ckpt(runner, key_score)
|
RAVE-main/annotator/mmpkg/mmcv/runner/hooks/hook.py
ADDED
|
@@ -0,0 +1,92 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
from annotator.mmpkg.mmcv.utils import Registry, is_method_overridden
|
| 3 |
+
|
| 4 |
+
HOOKS = Registry('hook')
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
class Hook:
|
| 8 |
+
stages = ('before_run', 'before_train_epoch', 'before_train_iter',
|
| 9 |
+
'after_train_iter', 'after_train_epoch', 'before_val_epoch',
|
| 10 |
+
'before_val_iter', 'after_val_iter', 'after_val_epoch',
|
| 11 |
+
'after_run')
|
| 12 |
+
|
| 13 |
+
def before_run(self, runner):
|
| 14 |
+
pass
|
| 15 |
+
|
| 16 |
+
def after_run(self, runner):
|
| 17 |
+
pass
|
| 18 |
+
|
| 19 |
+
def before_epoch(self, runner):
|
| 20 |
+
pass
|
| 21 |
+
|
| 22 |
+
def after_epoch(self, runner):
|
| 23 |
+
pass
|
| 24 |
+
|
| 25 |
+
def before_iter(self, runner):
|
| 26 |
+
pass
|
| 27 |
+
|
| 28 |
+
def after_iter(self, runner):
|
| 29 |
+
pass
|
| 30 |
+
|
| 31 |
+
def before_train_epoch(self, runner):
|
| 32 |
+
self.before_epoch(runner)
|
| 33 |
+
|
| 34 |
+
def before_val_epoch(self, runner):
|
| 35 |
+
self.before_epoch(runner)
|
| 36 |
+
|
| 37 |
+
def after_train_epoch(self, runner):
|
| 38 |
+
self.after_epoch(runner)
|
| 39 |
+
|
| 40 |
+
def after_val_epoch(self, runner):
|
| 41 |
+
self.after_epoch(runner)
|
| 42 |
+
|
| 43 |
+
def before_train_iter(self, runner):
|
| 44 |
+
self.before_iter(runner)
|
| 45 |
+
|
| 46 |
+
def before_val_iter(self, runner):
|
| 47 |
+
self.before_iter(runner)
|
| 48 |
+
|
| 49 |
+
def after_train_iter(self, runner):
|
| 50 |
+
self.after_iter(runner)
|
| 51 |
+
|
| 52 |
+
def after_val_iter(self, runner):
|
| 53 |
+
self.after_iter(runner)
|
| 54 |
+
|
| 55 |
+
def every_n_epochs(self, runner, n):
|
| 56 |
+
return (runner.epoch + 1) % n == 0 if n > 0 else False
|
| 57 |
+
|
| 58 |
+
def every_n_inner_iters(self, runner, n):
|
| 59 |
+
return (runner.inner_iter + 1) % n == 0 if n > 0 else False
|
| 60 |
+
|
| 61 |
+
def every_n_iters(self, runner, n):
|
| 62 |
+
return (runner.iter + 1) % n == 0 if n > 0 else False
|
| 63 |
+
|
| 64 |
+
def end_of_epoch(self, runner):
|
| 65 |
+
return runner.inner_iter + 1 == len(runner.data_loader)
|
| 66 |
+
|
| 67 |
+
def is_last_epoch(self, runner):
|
| 68 |
+
return runner.epoch + 1 == runner._max_epochs
|
| 69 |
+
|
| 70 |
+
def is_last_iter(self, runner):
|
| 71 |
+
return runner.iter + 1 == runner._max_iters
|
| 72 |
+
|
| 73 |
+
def get_triggered_stages(self):
|
| 74 |
+
trigger_stages = set()
|
| 75 |
+
for stage in Hook.stages:
|
| 76 |
+
if is_method_overridden(stage, Hook, self):
|
| 77 |
+
trigger_stages.add(stage)
|
| 78 |
+
|
| 79 |
+
# some methods will be triggered in multi stages
|
| 80 |
+
# use this dict to map method to stages.
|
| 81 |
+
method_stages_map = {
|
| 82 |
+
'before_epoch': ['before_train_epoch', 'before_val_epoch'],
|
| 83 |
+
'after_epoch': ['after_train_epoch', 'after_val_epoch'],
|
| 84 |
+
'before_iter': ['before_train_iter', 'before_val_iter'],
|
| 85 |
+
'after_iter': ['after_train_iter', 'after_val_iter'],
|
| 86 |
+
}
|
| 87 |
+
|
| 88 |
+
for method, map_stages in method_stages_map.items():
|
| 89 |
+
if is_method_overridden(method, Hook, self):
|
| 90 |
+
trigger_stages.update(map_stages)
|
| 91 |
+
|
| 92 |
+
return [stage for stage in Hook.stages if stage in trigger_stages]
|
RAVE-main/annotator/mmpkg/mmcv/runner/hooks/logger/__init__.py
ADDED
|
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
from .base import LoggerHook
|
| 3 |
+
from .dvclive import DvcliveLoggerHook
|
| 4 |
+
from .mlflow import MlflowLoggerHook
|
| 5 |
+
from .neptune import NeptuneLoggerHook
|
| 6 |
+
from .pavi import PaviLoggerHook
|
| 7 |
+
from .tensorboard import TensorboardLoggerHook
|
| 8 |
+
from .text import TextLoggerHook
|
| 9 |
+
from .wandb import WandbLoggerHook
|
| 10 |
+
|
| 11 |
+
__all__ = [
|
| 12 |
+
'LoggerHook', 'MlflowLoggerHook', 'PaviLoggerHook',
|
| 13 |
+
'TensorboardLoggerHook', 'TextLoggerHook', 'WandbLoggerHook',
|
| 14 |
+
'NeptuneLoggerHook', 'DvcliveLoggerHook'
|
| 15 |
+
]
|
RAVE-main/annotator/mmpkg/mmcv/runner/hooks/logger/base.py
ADDED
|
@@ -0,0 +1,166 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
import numbers
|
| 3 |
+
from abc import ABCMeta, abstractmethod
|
| 4 |
+
|
| 5 |
+
import numpy as np
|
| 6 |
+
import torch
|
| 7 |
+
|
| 8 |
+
from ..hook import Hook
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
class LoggerHook(Hook):
|
| 12 |
+
"""Base class for logger hooks.
|
| 13 |
+
|
| 14 |
+
Args:
|
| 15 |
+
interval (int): Logging interval (every k iterations).
|
| 16 |
+
ignore_last (bool): Ignore the log of last iterations in each epoch
|
| 17 |
+
if less than `interval`.
|
| 18 |
+
reset_flag (bool): Whether to clear the output buffer after logging.
|
| 19 |
+
by_epoch (bool): Whether EpochBasedRunner is used.
|
| 20 |
+
"""
|
| 21 |
+
|
| 22 |
+
__metaclass__ = ABCMeta
|
| 23 |
+
|
| 24 |
+
def __init__(self,
|
| 25 |
+
interval=10,
|
| 26 |
+
ignore_last=True,
|
| 27 |
+
reset_flag=False,
|
| 28 |
+
by_epoch=True):
|
| 29 |
+
self.interval = interval
|
| 30 |
+
self.ignore_last = ignore_last
|
| 31 |
+
self.reset_flag = reset_flag
|
| 32 |
+
self.by_epoch = by_epoch
|
| 33 |
+
|
| 34 |
+
@abstractmethod
|
| 35 |
+
def log(self, runner):
|
| 36 |
+
pass
|
| 37 |
+
|
| 38 |
+
@staticmethod
|
| 39 |
+
def is_scalar(val, include_np=True, include_torch=True):
|
| 40 |
+
"""Tell the input variable is a scalar or not.
|
| 41 |
+
|
| 42 |
+
Args:
|
| 43 |
+
val: Input variable.
|
| 44 |
+
include_np (bool): Whether include 0-d np.ndarray as a scalar.
|
| 45 |
+
include_torch (bool): Whether include 0-d torch.Tensor as a scalar.
|
| 46 |
+
|
| 47 |
+
Returns:
|
| 48 |
+
bool: True or False.
|
| 49 |
+
"""
|
| 50 |
+
if isinstance(val, numbers.Number):
|
| 51 |
+
return True
|
| 52 |
+
elif include_np and isinstance(val, np.ndarray) and val.ndim == 0:
|
| 53 |
+
return True
|
| 54 |
+
elif include_torch and isinstance(val, torch.Tensor) and len(val) == 1:
|
| 55 |
+
return True
|
| 56 |
+
else:
|
| 57 |
+
return False
|
| 58 |
+
|
| 59 |
+
def get_mode(self, runner):
|
| 60 |
+
if runner.mode == 'train':
|
| 61 |
+
if 'time' in runner.log_buffer.output:
|
| 62 |
+
mode = 'train'
|
| 63 |
+
else:
|
| 64 |
+
mode = 'val'
|
| 65 |
+
elif runner.mode == 'val':
|
| 66 |
+
mode = 'val'
|
| 67 |
+
else:
|
| 68 |
+
raise ValueError(f"runner mode should be 'train' or 'val', "
|
| 69 |
+
f'but got {runner.mode}')
|
| 70 |
+
return mode
|
| 71 |
+
|
| 72 |
+
def get_epoch(self, runner):
|
| 73 |
+
if runner.mode == 'train':
|
| 74 |
+
epoch = runner.epoch + 1
|
| 75 |
+
elif runner.mode == 'val':
|
| 76 |
+
# normal val mode
|
| 77 |
+
# runner.epoch += 1 has been done before val workflow
|
| 78 |
+
epoch = runner.epoch
|
| 79 |
+
else:
|
| 80 |
+
raise ValueError(f"runner mode should be 'train' or 'val', "
|
| 81 |
+
f'but got {runner.mode}')
|
| 82 |
+
return epoch
|
| 83 |
+
|
| 84 |
+
def get_iter(self, runner, inner_iter=False):
|
| 85 |
+
"""Get the current training iteration step."""
|
| 86 |
+
if self.by_epoch and inner_iter:
|
| 87 |
+
current_iter = runner.inner_iter + 1
|
| 88 |
+
else:
|
| 89 |
+
current_iter = runner.iter + 1
|
| 90 |
+
return current_iter
|
| 91 |
+
|
| 92 |
+
def get_lr_tags(self, runner):
|
| 93 |
+
tags = {}
|
| 94 |
+
lrs = runner.current_lr()
|
| 95 |
+
if isinstance(lrs, dict):
|
| 96 |
+
for name, value in lrs.items():
|
| 97 |
+
tags[f'learning_rate/{name}'] = value[0]
|
| 98 |
+
else:
|
| 99 |
+
tags['learning_rate'] = lrs[0]
|
| 100 |
+
return tags
|
| 101 |
+
|
| 102 |
+
def get_momentum_tags(self, runner):
|
| 103 |
+
tags = {}
|
| 104 |
+
momentums = runner.current_momentum()
|
| 105 |
+
if isinstance(momentums, dict):
|
| 106 |
+
for name, value in momentums.items():
|
| 107 |
+
tags[f'momentum/{name}'] = value[0]
|
| 108 |
+
else:
|
| 109 |
+
tags['momentum'] = momentums[0]
|
| 110 |
+
return tags
|
| 111 |
+
|
| 112 |
+
def get_loggable_tags(self,
|
| 113 |
+
runner,
|
| 114 |
+
allow_scalar=True,
|
| 115 |
+
allow_text=False,
|
| 116 |
+
add_mode=True,
|
| 117 |
+
tags_to_skip=('time', 'data_time')):
|
| 118 |
+
tags = {}
|
| 119 |
+
for var, val in runner.log_buffer.output.items():
|
| 120 |
+
if var in tags_to_skip:
|
| 121 |
+
continue
|
| 122 |
+
if self.is_scalar(val) and not allow_scalar:
|
| 123 |
+
continue
|
| 124 |
+
if isinstance(val, str) and not allow_text:
|
| 125 |
+
continue
|
| 126 |
+
if add_mode:
|
| 127 |
+
var = f'{self.get_mode(runner)}/{var}'
|
| 128 |
+
tags[var] = val
|
| 129 |
+
tags.update(self.get_lr_tags(runner))
|
| 130 |
+
tags.update(self.get_momentum_tags(runner))
|
| 131 |
+
return tags
|
| 132 |
+
|
| 133 |
+
def before_run(self, runner):
|
| 134 |
+
for hook in runner.hooks[::-1]:
|
| 135 |
+
if isinstance(hook, LoggerHook):
|
| 136 |
+
hook.reset_flag = True
|
| 137 |
+
break
|
| 138 |
+
|
| 139 |
+
def before_epoch(self, runner):
|
| 140 |
+
runner.log_buffer.clear() # clear logs of last epoch
|
| 141 |
+
|
| 142 |
+
def after_train_iter(self, runner):
|
| 143 |
+
if self.by_epoch and self.every_n_inner_iters(runner, self.interval):
|
| 144 |
+
runner.log_buffer.average(self.interval)
|
| 145 |
+
elif not self.by_epoch and self.every_n_iters(runner, self.interval):
|
| 146 |
+
runner.log_buffer.average(self.interval)
|
| 147 |
+
elif self.end_of_epoch(runner) and not self.ignore_last:
|
| 148 |
+
# not precise but more stable
|
| 149 |
+
runner.log_buffer.average(self.interval)
|
| 150 |
+
|
| 151 |
+
if runner.log_buffer.ready:
|
| 152 |
+
self.log(runner)
|
| 153 |
+
if self.reset_flag:
|
| 154 |
+
runner.log_buffer.clear_output()
|
| 155 |
+
|
| 156 |
+
def after_train_epoch(self, runner):
|
| 157 |
+
if runner.log_buffer.ready:
|
| 158 |
+
self.log(runner)
|
| 159 |
+
if self.reset_flag:
|
| 160 |
+
runner.log_buffer.clear_output()
|
| 161 |
+
|
| 162 |
+
def after_val_epoch(self, runner):
|
| 163 |
+
runner.log_buffer.average()
|
| 164 |
+
self.log(runner)
|
| 165 |
+
if self.reset_flag:
|
| 166 |
+
runner.log_buffer.clear_output()
|
RAVE-main/annotator/mmpkg/mmcv/runner/hooks/logger/dvclive.py
ADDED
|
@@ -0,0 +1,58 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
from ...dist_utils import master_only
|
| 3 |
+
from ..hook import HOOKS
|
| 4 |
+
from .base import LoggerHook
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
@HOOKS.register_module()
|
| 8 |
+
class DvcliveLoggerHook(LoggerHook):
|
| 9 |
+
"""Class to log metrics with dvclive.
|
| 10 |
+
|
| 11 |
+
It requires `dvclive`_ to be installed.
|
| 12 |
+
|
| 13 |
+
Args:
|
| 14 |
+
path (str): Directory where dvclive will write TSV log files.
|
| 15 |
+
interval (int): Logging interval (every k iterations).
|
| 16 |
+
Default 10.
|
| 17 |
+
ignore_last (bool): Ignore the log of last iterations in each epoch
|
| 18 |
+
if less than `interval`.
|
| 19 |
+
Default: True.
|
| 20 |
+
reset_flag (bool): Whether to clear the output buffer after logging.
|
| 21 |
+
Default: True.
|
| 22 |
+
by_epoch (bool): Whether EpochBasedRunner is used.
|
| 23 |
+
Default: True.
|
| 24 |
+
|
| 25 |
+
.. _dvclive:
|
| 26 |
+
https://dvc.org/doc/dvclive
|
| 27 |
+
"""
|
| 28 |
+
|
| 29 |
+
def __init__(self,
|
| 30 |
+
path,
|
| 31 |
+
interval=10,
|
| 32 |
+
ignore_last=True,
|
| 33 |
+
reset_flag=True,
|
| 34 |
+
by_epoch=True):
|
| 35 |
+
|
| 36 |
+
super(DvcliveLoggerHook, self).__init__(interval, ignore_last,
|
| 37 |
+
reset_flag, by_epoch)
|
| 38 |
+
self.path = path
|
| 39 |
+
self.import_dvclive()
|
| 40 |
+
|
| 41 |
+
def import_dvclive(self):
|
| 42 |
+
try:
|
| 43 |
+
import dvclive
|
| 44 |
+
except ImportError:
|
| 45 |
+
raise ImportError(
|
| 46 |
+
'Please run "pip install dvclive" to install dvclive')
|
| 47 |
+
self.dvclive = dvclive
|
| 48 |
+
|
| 49 |
+
@master_only
|
| 50 |
+
def before_run(self, runner):
|
| 51 |
+
self.dvclive.init(self.path)
|
| 52 |
+
|
| 53 |
+
@master_only
|
| 54 |
+
def log(self, runner):
|
| 55 |
+
tags = self.get_loggable_tags(runner)
|
| 56 |
+
if tags:
|
| 57 |
+
for k, v in tags.items():
|
| 58 |
+
self.dvclive.log(k, v, step=self.get_iter(runner))
|
RAVE-main/annotator/mmpkg/mmcv/runner/hooks/logger/mlflow.py
ADDED
|
@@ -0,0 +1,78 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
from ...dist_utils import master_only
|
| 3 |
+
from ..hook import HOOKS
|
| 4 |
+
from .base import LoggerHook
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
@HOOKS.register_module()
|
| 8 |
+
class MlflowLoggerHook(LoggerHook):
|
| 9 |
+
|
| 10 |
+
def __init__(self,
|
| 11 |
+
exp_name=None,
|
| 12 |
+
tags=None,
|
| 13 |
+
log_model=True,
|
| 14 |
+
interval=10,
|
| 15 |
+
ignore_last=True,
|
| 16 |
+
reset_flag=False,
|
| 17 |
+
by_epoch=True):
|
| 18 |
+
"""Class to log metrics and (optionally) a trained model to MLflow.
|
| 19 |
+
|
| 20 |
+
It requires `MLflow`_ to be installed.
|
| 21 |
+
|
| 22 |
+
Args:
|
| 23 |
+
exp_name (str, optional): Name of the experiment to be used.
|
| 24 |
+
Default None.
|
| 25 |
+
If not None, set the active experiment.
|
| 26 |
+
If experiment does not exist, an experiment with provided name
|
| 27 |
+
will be created.
|
| 28 |
+
tags (dict of str: str, optional): Tags for the current run.
|
| 29 |
+
Default None.
|
| 30 |
+
If not None, set tags for the current run.
|
| 31 |
+
log_model (bool, optional): Whether to log an MLflow artifact.
|
| 32 |
+
Default True.
|
| 33 |
+
If True, log runner.model as an MLflow artifact
|
| 34 |
+
for the current run.
|
| 35 |
+
interval (int): Logging interval (every k iterations).
|
| 36 |
+
ignore_last (bool): Ignore the log of last iterations in each epoch
|
| 37 |
+
if less than `interval`.
|
| 38 |
+
reset_flag (bool): Whether to clear the output buffer after logging
|
| 39 |
+
by_epoch (bool): Whether EpochBasedRunner is used.
|
| 40 |
+
|
| 41 |
+
.. _MLflow:
|
| 42 |
+
https://www.mlflow.org/docs/latest/index.html
|
| 43 |
+
"""
|
| 44 |
+
super(MlflowLoggerHook, self).__init__(interval, ignore_last,
|
| 45 |
+
reset_flag, by_epoch)
|
| 46 |
+
self.import_mlflow()
|
| 47 |
+
self.exp_name = exp_name
|
| 48 |
+
self.tags = tags
|
| 49 |
+
self.log_model = log_model
|
| 50 |
+
|
| 51 |
+
def import_mlflow(self):
|
| 52 |
+
try:
|
| 53 |
+
import mlflow
|
| 54 |
+
import mlflow.pytorch as mlflow_pytorch
|
| 55 |
+
except ImportError:
|
| 56 |
+
raise ImportError(
|
| 57 |
+
'Please run "pip install mlflow" to install mlflow')
|
| 58 |
+
self.mlflow = mlflow
|
| 59 |
+
self.mlflow_pytorch = mlflow_pytorch
|
| 60 |
+
|
| 61 |
+
@master_only
|
| 62 |
+
def before_run(self, runner):
|
| 63 |
+
super(MlflowLoggerHook, self).before_run(runner)
|
| 64 |
+
if self.exp_name is not None:
|
| 65 |
+
self.mlflow.set_experiment(self.exp_name)
|
| 66 |
+
if self.tags is not None:
|
| 67 |
+
self.mlflow.set_tags(self.tags)
|
| 68 |
+
|
| 69 |
+
@master_only
|
| 70 |
+
def log(self, runner):
|
| 71 |
+
tags = self.get_loggable_tags(runner)
|
| 72 |
+
if tags:
|
| 73 |
+
self.mlflow.log_metrics(tags, step=self.get_iter(runner))
|
| 74 |
+
|
| 75 |
+
@master_only
|
| 76 |
+
def after_run(self, runner):
|
| 77 |
+
if self.log_model:
|
| 78 |
+
self.mlflow_pytorch.log_model(runner.model, 'models')
|
RAVE-main/annotator/mmpkg/mmcv/runner/hooks/logger/neptune.py
ADDED
|
@@ -0,0 +1,82 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
from ...dist_utils import master_only
|
| 3 |
+
from ..hook import HOOKS
|
| 4 |
+
from .base import LoggerHook
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
@HOOKS.register_module()
|
| 8 |
+
class NeptuneLoggerHook(LoggerHook):
|
| 9 |
+
"""Class to log metrics to NeptuneAI.
|
| 10 |
+
|
| 11 |
+
It requires `neptune-client` to be installed.
|
| 12 |
+
|
| 13 |
+
Args:
|
| 14 |
+
init_kwargs (dict): a dict contains the initialization keys as below:
|
| 15 |
+
- project (str): Name of a project in a form of
|
| 16 |
+
namespace/project_name. If None, the value of
|
| 17 |
+
NEPTUNE_PROJECT environment variable will be taken.
|
| 18 |
+
- api_token (str): User’s API token.
|
| 19 |
+
If None, the value of NEPTUNE_API_TOKEN environment
|
| 20 |
+
variable will be taken. Note: It is strongly recommended
|
| 21 |
+
to use NEPTUNE_API_TOKEN environment variable rather than
|
| 22 |
+
placing your API token in plain text in your source code.
|
| 23 |
+
- name (str, optional, default is 'Untitled'): Editable name of
|
| 24 |
+
the run. Name is displayed in the run's Details and in
|
| 25 |
+
Runs table as a column.
|
| 26 |
+
Check https://docs.neptune.ai/api-reference/neptune#init for
|
| 27 |
+
more init arguments.
|
| 28 |
+
interval (int): Logging interval (every k iterations).
|
| 29 |
+
ignore_last (bool): Ignore the log of last iterations in each epoch
|
| 30 |
+
if less than `interval`.
|
| 31 |
+
reset_flag (bool): Whether to clear the output buffer after logging
|
| 32 |
+
by_epoch (bool): Whether EpochBasedRunner is used.
|
| 33 |
+
|
| 34 |
+
.. _NeptuneAI:
|
| 35 |
+
https://docs.neptune.ai/you-should-know/logging-metadata
|
| 36 |
+
"""
|
| 37 |
+
|
| 38 |
+
def __init__(self,
|
| 39 |
+
init_kwargs=None,
|
| 40 |
+
interval=10,
|
| 41 |
+
ignore_last=True,
|
| 42 |
+
reset_flag=True,
|
| 43 |
+
with_step=True,
|
| 44 |
+
by_epoch=True):
|
| 45 |
+
|
| 46 |
+
super(NeptuneLoggerHook, self).__init__(interval, ignore_last,
|
| 47 |
+
reset_flag, by_epoch)
|
| 48 |
+
self.import_neptune()
|
| 49 |
+
self.init_kwargs = init_kwargs
|
| 50 |
+
self.with_step = with_step
|
| 51 |
+
|
| 52 |
+
def import_neptune(self):
|
| 53 |
+
try:
|
| 54 |
+
import neptune.new as neptune
|
| 55 |
+
except ImportError:
|
| 56 |
+
raise ImportError(
|
| 57 |
+
'Please run "pip install neptune-client" to install neptune')
|
| 58 |
+
self.neptune = neptune
|
| 59 |
+
self.run = None
|
| 60 |
+
|
| 61 |
+
@master_only
|
| 62 |
+
def before_run(self, runner):
|
| 63 |
+
if self.init_kwargs:
|
| 64 |
+
self.run = self.neptune.init(**self.init_kwargs)
|
| 65 |
+
else:
|
| 66 |
+
self.run = self.neptune.init()
|
| 67 |
+
|
| 68 |
+
@master_only
|
| 69 |
+
def log(self, runner):
|
| 70 |
+
tags = self.get_loggable_tags(runner)
|
| 71 |
+
if tags:
|
| 72 |
+
for tag_name, tag_value in tags.items():
|
| 73 |
+
if self.with_step:
|
| 74 |
+
self.run[tag_name].log(
|
| 75 |
+
tag_value, step=self.get_iter(runner))
|
| 76 |
+
else:
|
| 77 |
+
tags['global_step'] = self.get_iter(runner)
|
| 78 |
+
self.run[tag_name].log(tags)
|
| 79 |
+
|
| 80 |
+
@master_only
|
| 81 |
+
def after_run(self, runner):
|
| 82 |
+
self.run.stop()
|
RAVE-main/annotator/mmpkg/mmcv/runner/hooks/logger/pavi.py
ADDED
|
@@ -0,0 +1,117 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
import json
|
| 3 |
+
import os
|
| 4 |
+
import os.path as osp
|
| 5 |
+
|
| 6 |
+
import torch
|
| 7 |
+
import yaml
|
| 8 |
+
|
| 9 |
+
import annotator.mmpkg.mmcv as mmcv
|
| 10 |
+
from ....parallel.utils import is_module_wrapper
|
| 11 |
+
from ...dist_utils import master_only
|
| 12 |
+
from ..hook import HOOKS
|
| 13 |
+
from .base import LoggerHook
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
@HOOKS.register_module()
|
| 17 |
+
class PaviLoggerHook(LoggerHook):
|
| 18 |
+
|
| 19 |
+
def __init__(self,
|
| 20 |
+
init_kwargs=None,
|
| 21 |
+
add_graph=False,
|
| 22 |
+
add_last_ckpt=False,
|
| 23 |
+
interval=10,
|
| 24 |
+
ignore_last=True,
|
| 25 |
+
reset_flag=False,
|
| 26 |
+
by_epoch=True,
|
| 27 |
+
img_key='img_info'):
|
| 28 |
+
super(PaviLoggerHook, self).__init__(interval, ignore_last, reset_flag,
|
| 29 |
+
by_epoch)
|
| 30 |
+
self.init_kwargs = init_kwargs
|
| 31 |
+
self.add_graph = add_graph
|
| 32 |
+
self.add_last_ckpt = add_last_ckpt
|
| 33 |
+
self.img_key = img_key
|
| 34 |
+
|
| 35 |
+
@master_only
|
| 36 |
+
def before_run(self, runner):
|
| 37 |
+
super(PaviLoggerHook, self).before_run(runner)
|
| 38 |
+
try:
|
| 39 |
+
from pavi import SummaryWriter
|
| 40 |
+
except ImportError:
|
| 41 |
+
raise ImportError('Please run "pip install pavi" to install pavi.')
|
| 42 |
+
|
| 43 |
+
self.run_name = runner.work_dir.split('/')[-1]
|
| 44 |
+
|
| 45 |
+
if not self.init_kwargs:
|
| 46 |
+
self.init_kwargs = dict()
|
| 47 |
+
self.init_kwargs['name'] = self.run_name
|
| 48 |
+
self.init_kwargs['model'] = runner._model_name
|
| 49 |
+
if runner.meta is not None:
|
| 50 |
+
if 'config_dict' in runner.meta:
|
| 51 |
+
config_dict = runner.meta['config_dict']
|
| 52 |
+
assert isinstance(
|
| 53 |
+
config_dict,
|
| 54 |
+
dict), ('meta["config_dict"] has to be of a dict, '
|
| 55 |
+
f'but got {type(config_dict)}')
|
| 56 |
+
elif 'config_file' in runner.meta:
|
| 57 |
+
config_file = runner.meta['config_file']
|
| 58 |
+
config_dict = dict(mmcv.Config.fromfile(config_file))
|
| 59 |
+
else:
|
| 60 |
+
config_dict = None
|
| 61 |
+
if config_dict is not None:
|
| 62 |
+
# 'max_.*iter' is parsed in pavi sdk as the maximum iterations
|
| 63 |
+
# to properly set up the progress bar.
|
| 64 |
+
config_dict = config_dict.copy()
|
| 65 |
+
config_dict.setdefault('max_iter', runner.max_iters)
|
| 66 |
+
# non-serializable values are first converted in
|
| 67 |
+
# mmcv.dump to json
|
| 68 |
+
config_dict = json.loads(
|
| 69 |
+
mmcv.dump(config_dict, file_format='json'))
|
| 70 |
+
session_text = yaml.dump(config_dict)
|
| 71 |
+
self.init_kwargs['session_text'] = session_text
|
| 72 |
+
self.writer = SummaryWriter(**self.init_kwargs)
|
| 73 |
+
|
| 74 |
+
def get_step(self, runner):
|
| 75 |
+
"""Get the total training step/epoch."""
|
| 76 |
+
if self.get_mode(runner) == 'val' and self.by_epoch:
|
| 77 |
+
return self.get_epoch(runner)
|
| 78 |
+
else:
|
| 79 |
+
return self.get_iter(runner)
|
| 80 |
+
|
| 81 |
+
@master_only
|
| 82 |
+
def log(self, runner):
|
| 83 |
+
tags = self.get_loggable_tags(runner, add_mode=False)
|
| 84 |
+
if tags:
|
| 85 |
+
self.writer.add_scalars(
|
| 86 |
+
self.get_mode(runner), tags, self.get_step(runner))
|
| 87 |
+
|
| 88 |
+
@master_only
|
| 89 |
+
def after_run(self, runner):
|
| 90 |
+
if self.add_last_ckpt:
|
| 91 |
+
ckpt_path = osp.join(runner.work_dir, 'latest.pth')
|
| 92 |
+
if osp.islink(ckpt_path):
|
| 93 |
+
ckpt_path = osp.join(runner.work_dir, os.readlink(ckpt_path))
|
| 94 |
+
|
| 95 |
+
if osp.isfile(ckpt_path):
|
| 96 |
+
# runner.epoch += 1 has been done before `after_run`.
|
| 97 |
+
iteration = runner.epoch if self.by_epoch else runner.iter
|
| 98 |
+
return self.writer.add_snapshot_file(
|
| 99 |
+
tag=self.run_name,
|
| 100 |
+
snapshot_file_path=ckpt_path,
|
| 101 |
+
iteration=iteration)
|
| 102 |
+
|
| 103 |
+
# flush the buffer and send a task ending signal to Pavi
|
| 104 |
+
self.writer.close()
|
| 105 |
+
|
| 106 |
+
@master_only
|
| 107 |
+
def before_epoch(self, runner):
|
| 108 |
+
if runner.epoch == 0 and self.add_graph:
|
| 109 |
+
if is_module_wrapper(runner.model):
|
| 110 |
+
_model = runner.model.module
|
| 111 |
+
else:
|
| 112 |
+
_model = runner.model
|
| 113 |
+
device = next(_model.parameters()).device
|
| 114 |
+
data = next(iter(runner.data_loader))
|
| 115 |
+
image = data[self.img_key][0:1].to(device)
|
| 116 |
+
with torch.no_grad():
|
| 117 |
+
self.writer.add_graph(_model, image)
|
RAVE-main/annotator/mmpkg/mmcv/runner/hooks/logger/tensorboard.py
ADDED
|
@@ -0,0 +1,57 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
import os.path as osp
|
| 3 |
+
|
| 4 |
+
from annotator.mmpkg.mmcv.utils import TORCH_VERSION, digit_version
|
| 5 |
+
from ...dist_utils import master_only
|
| 6 |
+
from ..hook import HOOKS
|
| 7 |
+
from .base import LoggerHook
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
@HOOKS.register_module()
|
| 11 |
+
class TensorboardLoggerHook(LoggerHook):
|
| 12 |
+
|
| 13 |
+
def __init__(self,
|
| 14 |
+
log_dir=None,
|
| 15 |
+
interval=10,
|
| 16 |
+
ignore_last=True,
|
| 17 |
+
reset_flag=False,
|
| 18 |
+
by_epoch=True):
|
| 19 |
+
super(TensorboardLoggerHook, self).__init__(interval, ignore_last,
|
| 20 |
+
reset_flag, by_epoch)
|
| 21 |
+
self.log_dir = log_dir
|
| 22 |
+
|
| 23 |
+
@master_only
|
| 24 |
+
def before_run(self, runner):
|
| 25 |
+
super(TensorboardLoggerHook, self).before_run(runner)
|
| 26 |
+
if (TORCH_VERSION == 'parrots'
|
| 27 |
+
or digit_version(TORCH_VERSION) < digit_version('1.1')):
|
| 28 |
+
try:
|
| 29 |
+
from tensorboardX import SummaryWriter
|
| 30 |
+
except ImportError:
|
| 31 |
+
raise ImportError('Please install tensorboardX to use '
|
| 32 |
+
'TensorboardLoggerHook.')
|
| 33 |
+
else:
|
| 34 |
+
try:
|
| 35 |
+
from torch.utils.tensorboard import SummaryWriter
|
| 36 |
+
except ImportError:
|
| 37 |
+
raise ImportError(
|
| 38 |
+
'Please run "pip install future tensorboard" to install '
|
| 39 |
+
'the dependencies to use torch.utils.tensorboard '
|
| 40 |
+
'(applicable to PyTorch 1.1 or higher)')
|
| 41 |
+
|
| 42 |
+
if self.log_dir is None:
|
| 43 |
+
self.log_dir = osp.join(runner.work_dir, 'tf_logs')
|
| 44 |
+
self.writer = SummaryWriter(self.log_dir)
|
| 45 |
+
|
| 46 |
+
@master_only
|
| 47 |
+
def log(self, runner):
|
| 48 |
+
tags = self.get_loggable_tags(runner, allow_text=True)
|
| 49 |
+
for tag, val in tags.items():
|
| 50 |
+
if isinstance(val, str):
|
| 51 |
+
self.writer.add_text(tag, val, self.get_iter(runner))
|
| 52 |
+
else:
|
| 53 |
+
self.writer.add_scalar(tag, val, self.get_iter(runner))
|
| 54 |
+
|
| 55 |
+
@master_only
|
| 56 |
+
def after_run(self, runner):
|
| 57 |
+
self.writer.close()
|
RAVE-main/annotator/mmpkg/mmcv/runner/hooks/logger/text.py
ADDED
|
@@ -0,0 +1,256 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
import datetime
|
| 3 |
+
import os
|
| 4 |
+
import os.path as osp
|
| 5 |
+
from collections import OrderedDict
|
| 6 |
+
|
| 7 |
+
import torch
|
| 8 |
+
import torch.distributed as dist
|
| 9 |
+
|
| 10 |
+
import annotator.mmpkg.mmcv as mmcv
|
| 11 |
+
from annotator.mmpkg.mmcv.fileio.file_client import FileClient
|
| 12 |
+
from annotator.mmpkg.mmcv.utils import is_tuple_of, scandir
|
| 13 |
+
from ..hook import HOOKS
|
| 14 |
+
from .base import LoggerHook
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
@HOOKS.register_module()
|
| 18 |
+
class TextLoggerHook(LoggerHook):
|
| 19 |
+
"""Logger hook in text.
|
| 20 |
+
|
| 21 |
+
In this logger hook, the information will be printed on terminal and
|
| 22 |
+
saved in json file.
|
| 23 |
+
|
| 24 |
+
Args:
|
| 25 |
+
by_epoch (bool, optional): Whether EpochBasedRunner is used.
|
| 26 |
+
Default: True.
|
| 27 |
+
interval (int, optional): Logging interval (every k iterations).
|
| 28 |
+
Default: 10.
|
| 29 |
+
ignore_last (bool, optional): Ignore the log of last iterations in each
|
| 30 |
+
epoch if less than :attr:`interval`. Default: True.
|
| 31 |
+
reset_flag (bool, optional): Whether to clear the output buffer after
|
| 32 |
+
logging. Default: False.
|
| 33 |
+
interval_exp_name (int, optional): Logging interval for experiment
|
| 34 |
+
name. This feature is to help users conveniently get the experiment
|
| 35 |
+
information from screen or log file. Default: 1000.
|
| 36 |
+
out_dir (str, optional): Logs are saved in ``runner.work_dir`` default.
|
| 37 |
+
If ``out_dir`` is specified, logs will be copied to a new directory
|
| 38 |
+
which is the concatenation of ``out_dir`` and the last level
|
| 39 |
+
directory of ``runner.work_dir``. Default: None.
|
| 40 |
+
`New in version 1.3.16.`
|
| 41 |
+
out_suffix (str or tuple[str], optional): Those filenames ending with
|
| 42 |
+
``out_suffix`` will be copied to ``out_dir``.
|
| 43 |
+
Default: ('.log.json', '.log', '.py').
|
| 44 |
+
`New in version 1.3.16.`
|
| 45 |
+
keep_local (bool, optional): Whether to keep local log when
|
| 46 |
+
:attr:`out_dir` is specified. If False, the local log will be
|
| 47 |
+
removed. Default: True.
|
| 48 |
+
`New in version 1.3.16.`
|
| 49 |
+
file_client_args (dict, optional): Arguments to instantiate a
|
| 50 |
+
FileClient. See :class:`mmcv.fileio.FileClient` for details.
|
| 51 |
+
Default: None.
|
| 52 |
+
`New in version 1.3.16.`
|
| 53 |
+
"""
|
| 54 |
+
|
| 55 |
+
def __init__(self,
|
| 56 |
+
by_epoch=True,
|
| 57 |
+
interval=10,
|
| 58 |
+
ignore_last=True,
|
| 59 |
+
reset_flag=False,
|
| 60 |
+
interval_exp_name=1000,
|
| 61 |
+
out_dir=None,
|
| 62 |
+
out_suffix=('.log.json', '.log', '.py'),
|
| 63 |
+
keep_local=True,
|
| 64 |
+
file_client_args=None):
|
| 65 |
+
super(TextLoggerHook, self).__init__(interval, ignore_last, reset_flag,
|
| 66 |
+
by_epoch)
|
| 67 |
+
self.by_epoch = by_epoch
|
| 68 |
+
self.time_sec_tot = 0
|
| 69 |
+
self.interval_exp_name = interval_exp_name
|
| 70 |
+
|
| 71 |
+
if out_dir is None and file_client_args is not None:
|
| 72 |
+
raise ValueError(
|
| 73 |
+
'file_client_args should be "None" when `out_dir` is not'
|
| 74 |
+
'specified.')
|
| 75 |
+
self.out_dir = out_dir
|
| 76 |
+
|
| 77 |
+
if not (out_dir is None or isinstance(out_dir, str)
|
| 78 |
+
or is_tuple_of(out_dir, str)):
|
| 79 |
+
raise TypeError('out_dir should be "None" or string or tuple of '
|
| 80 |
+
'string, but got {out_dir}')
|
| 81 |
+
self.out_suffix = out_suffix
|
| 82 |
+
|
| 83 |
+
self.keep_local = keep_local
|
| 84 |
+
self.file_client_args = file_client_args
|
| 85 |
+
if self.out_dir is not None:
|
| 86 |
+
self.file_client = FileClient.infer_client(file_client_args,
|
| 87 |
+
self.out_dir)
|
| 88 |
+
|
| 89 |
+
def before_run(self, runner):
|
| 90 |
+
super(TextLoggerHook, self).before_run(runner)
|
| 91 |
+
|
| 92 |
+
if self.out_dir is not None:
|
| 93 |
+
self.file_client = FileClient.infer_client(self.file_client_args,
|
| 94 |
+
self.out_dir)
|
| 95 |
+
# The final `self.out_dir` is the concatenation of `self.out_dir`
|
| 96 |
+
# and the last level directory of `runner.work_dir`
|
| 97 |
+
basename = osp.basename(runner.work_dir.rstrip(osp.sep))
|
| 98 |
+
self.out_dir = self.file_client.join_path(self.out_dir, basename)
|
| 99 |
+
runner.logger.info(
|
| 100 |
+
(f'Text logs will be saved to {self.out_dir} by '
|
| 101 |
+
f'{self.file_client.name} after the training process.'))
|
| 102 |
+
|
| 103 |
+
self.start_iter = runner.iter
|
| 104 |
+
self.json_log_path = osp.join(runner.work_dir,
|
| 105 |
+
f'{runner.timestamp}.log.json')
|
| 106 |
+
if runner.meta is not None:
|
| 107 |
+
self._dump_log(runner.meta, runner)
|
| 108 |
+
|
| 109 |
+
def _get_max_memory(self, runner):
|
| 110 |
+
device = getattr(runner.model, 'output_device', None)
|
| 111 |
+
mem = torch.cuda.max_memory_allocated(device=device)
|
| 112 |
+
mem_mb = torch.tensor([mem / (1024 * 1024)],
|
| 113 |
+
dtype=torch.int,
|
| 114 |
+
device=device)
|
| 115 |
+
if runner.world_size > 1:
|
| 116 |
+
dist.reduce(mem_mb, 0, op=dist.ReduceOp.MAX)
|
| 117 |
+
return mem_mb.item()
|
| 118 |
+
|
| 119 |
+
def _log_info(self, log_dict, runner):
|
| 120 |
+
# print exp name for users to distinguish experiments
|
| 121 |
+
# at every ``interval_exp_name`` iterations and the end of each epoch
|
| 122 |
+
if runner.meta is not None and 'exp_name' in runner.meta:
|
| 123 |
+
if (self.every_n_iters(runner, self.interval_exp_name)) or (
|
| 124 |
+
self.by_epoch and self.end_of_epoch(runner)):
|
| 125 |
+
exp_info = f'Exp name: {runner.meta["exp_name"]}'
|
| 126 |
+
runner.logger.info(exp_info)
|
| 127 |
+
|
| 128 |
+
if log_dict['mode'] == 'train':
|
| 129 |
+
if isinstance(log_dict['lr'], dict):
|
| 130 |
+
lr_str = []
|
| 131 |
+
for k, val in log_dict['lr'].items():
|
| 132 |
+
lr_str.append(f'lr_{k}: {val:.3e}')
|
| 133 |
+
lr_str = ' '.join(lr_str)
|
| 134 |
+
else:
|
| 135 |
+
lr_str = f'lr: {log_dict["lr"]:.3e}'
|
| 136 |
+
|
| 137 |
+
# by epoch: Epoch [4][100/1000]
|
| 138 |
+
# by iter: Iter [100/100000]
|
| 139 |
+
if self.by_epoch:
|
| 140 |
+
log_str = f'Epoch [{log_dict["epoch"]}]' \
|
| 141 |
+
f'[{log_dict["iter"]}/{len(runner.data_loader)}]\t'
|
| 142 |
+
else:
|
| 143 |
+
log_str = f'Iter [{log_dict["iter"]}/{runner.max_iters}]\t'
|
| 144 |
+
log_str += f'{lr_str}, '
|
| 145 |
+
|
| 146 |
+
if 'time' in log_dict.keys():
|
| 147 |
+
self.time_sec_tot += (log_dict['time'] * self.interval)
|
| 148 |
+
time_sec_avg = self.time_sec_tot / (
|
| 149 |
+
runner.iter - self.start_iter + 1)
|
| 150 |
+
eta_sec = time_sec_avg * (runner.max_iters - runner.iter - 1)
|
| 151 |
+
eta_str = str(datetime.timedelta(seconds=int(eta_sec)))
|
| 152 |
+
log_str += f'eta: {eta_str}, '
|
| 153 |
+
log_str += f'time: {log_dict["time"]:.3f}, ' \
|
| 154 |
+
f'data_time: {log_dict["data_time"]:.3f}, '
|
| 155 |
+
# statistic memory
|
| 156 |
+
if torch.cuda.is_available():
|
| 157 |
+
log_str += f'memory: {log_dict["memory"]}, '
|
| 158 |
+
else:
|
| 159 |
+
# val/test time
|
| 160 |
+
# here 1000 is the length of the val dataloader
|
| 161 |
+
# by epoch: Epoch[val] [4][1000]
|
| 162 |
+
# by iter: Iter[val] [1000]
|
| 163 |
+
if self.by_epoch:
|
| 164 |
+
log_str = f'Epoch({log_dict["mode"]}) ' \
|
| 165 |
+
f'[{log_dict["epoch"]}][{log_dict["iter"]}]\t'
|
| 166 |
+
else:
|
| 167 |
+
log_str = f'Iter({log_dict["mode"]}) [{log_dict["iter"]}]\t'
|
| 168 |
+
|
| 169 |
+
log_items = []
|
| 170 |
+
for name, val in log_dict.items():
|
| 171 |
+
# TODO: resolve this hack
|
| 172 |
+
# these items have been in log_str
|
| 173 |
+
if name in [
|
| 174 |
+
'mode', 'Epoch', 'iter', 'lr', 'time', 'data_time',
|
| 175 |
+
'memory', 'epoch'
|
| 176 |
+
]:
|
| 177 |
+
continue
|
| 178 |
+
if isinstance(val, float):
|
| 179 |
+
val = f'{val:.4f}'
|
| 180 |
+
log_items.append(f'{name}: {val}')
|
| 181 |
+
log_str += ', '.join(log_items)
|
| 182 |
+
|
| 183 |
+
runner.logger.info(log_str)
|
| 184 |
+
|
| 185 |
+
def _dump_log(self, log_dict, runner):
|
| 186 |
+
# dump log in json format
|
| 187 |
+
json_log = OrderedDict()
|
| 188 |
+
for k, v in log_dict.items():
|
| 189 |
+
json_log[k] = self._round_float(v)
|
| 190 |
+
# only append log at last line
|
| 191 |
+
if runner.rank == 0:
|
| 192 |
+
with open(self.json_log_path, 'a+') as f:
|
| 193 |
+
mmcv.dump(json_log, f, file_format='json')
|
| 194 |
+
f.write('\n')
|
| 195 |
+
|
| 196 |
+
def _round_float(self, items):
|
| 197 |
+
if isinstance(items, list):
|
| 198 |
+
return [self._round_float(item) for item in items]
|
| 199 |
+
elif isinstance(items, float):
|
| 200 |
+
return round(items, 5)
|
| 201 |
+
else:
|
| 202 |
+
return items
|
| 203 |
+
|
| 204 |
+
def log(self, runner):
|
| 205 |
+
if 'eval_iter_num' in runner.log_buffer.output:
|
| 206 |
+
# this doesn't modify runner.iter and is regardless of by_epoch
|
| 207 |
+
cur_iter = runner.log_buffer.output.pop('eval_iter_num')
|
| 208 |
+
else:
|
| 209 |
+
cur_iter = self.get_iter(runner, inner_iter=True)
|
| 210 |
+
|
| 211 |
+
log_dict = OrderedDict(
|
| 212 |
+
mode=self.get_mode(runner),
|
| 213 |
+
epoch=self.get_epoch(runner),
|
| 214 |
+
iter=cur_iter)
|
| 215 |
+
|
| 216 |
+
# only record lr of the first param group
|
| 217 |
+
cur_lr = runner.current_lr()
|
| 218 |
+
if isinstance(cur_lr, list):
|
| 219 |
+
log_dict['lr'] = cur_lr[0]
|
| 220 |
+
else:
|
| 221 |
+
assert isinstance(cur_lr, dict)
|
| 222 |
+
log_dict['lr'] = {}
|
| 223 |
+
for k, lr_ in cur_lr.items():
|
| 224 |
+
assert isinstance(lr_, list)
|
| 225 |
+
log_dict['lr'].update({k: lr_[0]})
|
| 226 |
+
|
| 227 |
+
if 'time' in runner.log_buffer.output:
|
| 228 |
+
# statistic memory
|
| 229 |
+
if torch.cuda.is_available():
|
| 230 |
+
log_dict['memory'] = self._get_max_memory(runner)
|
| 231 |
+
|
| 232 |
+
log_dict = dict(log_dict, **runner.log_buffer.output)
|
| 233 |
+
|
| 234 |
+
self._log_info(log_dict, runner)
|
| 235 |
+
self._dump_log(log_dict, runner)
|
| 236 |
+
return log_dict
|
| 237 |
+
|
| 238 |
+
def after_run(self, runner):
|
| 239 |
+
# copy or upload logs to self.out_dir
|
| 240 |
+
if self.out_dir is not None:
|
| 241 |
+
for filename in scandir(runner.work_dir, self.out_suffix, True):
|
| 242 |
+
local_filepath = osp.join(runner.work_dir, filename)
|
| 243 |
+
out_filepath = self.file_client.join_path(
|
| 244 |
+
self.out_dir, filename)
|
| 245 |
+
with open(local_filepath, 'r') as f:
|
| 246 |
+
self.file_client.put_text(f.read(), out_filepath)
|
| 247 |
+
|
| 248 |
+
runner.logger.info(
|
| 249 |
+
(f'The file {local_filepath} has been uploaded to '
|
| 250 |
+
f'{out_filepath}.'))
|
| 251 |
+
|
| 252 |
+
if not self.keep_local:
|
| 253 |
+
os.remove(local_filepath)
|
| 254 |
+
runner.logger.info(
|
| 255 |
+
(f'{local_filepath} was removed due to the '
|
| 256 |
+
'`self.keep_local=False`'))
|
RAVE-main/annotator/mmpkg/mmcv/runner/hooks/logger/wandb.py
ADDED
|
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
from ...dist_utils import master_only
|
| 3 |
+
from ..hook import HOOKS
|
| 4 |
+
from .base import LoggerHook
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
@HOOKS.register_module()
|
| 8 |
+
class WandbLoggerHook(LoggerHook):
|
| 9 |
+
|
| 10 |
+
def __init__(self,
|
| 11 |
+
init_kwargs=None,
|
| 12 |
+
interval=10,
|
| 13 |
+
ignore_last=True,
|
| 14 |
+
reset_flag=False,
|
| 15 |
+
commit=True,
|
| 16 |
+
by_epoch=True,
|
| 17 |
+
with_step=True):
|
| 18 |
+
super(WandbLoggerHook, self).__init__(interval, ignore_last,
|
| 19 |
+
reset_flag, by_epoch)
|
| 20 |
+
self.import_wandb()
|
| 21 |
+
self.init_kwargs = init_kwargs
|
| 22 |
+
self.commit = commit
|
| 23 |
+
self.with_step = with_step
|
| 24 |
+
|
| 25 |
+
def import_wandb(self):
|
| 26 |
+
try:
|
| 27 |
+
import wandb
|
| 28 |
+
except ImportError:
|
| 29 |
+
raise ImportError(
|
| 30 |
+
'Please run "pip install wandb" to install wandb')
|
| 31 |
+
self.wandb = wandb
|
| 32 |
+
|
| 33 |
+
@master_only
|
| 34 |
+
def before_run(self, runner):
|
| 35 |
+
super(WandbLoggerHook, self).before_run(runner)
|
| 36 |
+
if self.wandb is None:
|
| 37 |
+
self.import_wandb()
|
| 38 |
+
if self.init_kwargs:
|
| 39 |
+
self.wandb.init(**self.init_kwargs)
|
| 40 |
+
else:
|
| 41 |
+
self.wandb.init()
|
| 42 |
+
|
| 43 |
+
@master_only
|
| 44 |
+
def log(self, runner):
|
| 45 |
+
tags = self.get_loggable_tags(runner)
|
| 46 |
+
if tags:
|
| 47 |
+
if self.with_step:
|
| 48 |
+
self.wandb.log(
|
| 49 |
+
tags, step=self.get_iter(runner), commit=self.commit)
|
| 50 |
+
else:
|
| 51 |
+
tags['global_step'] = self.get_iter(runner)
|
| 52 |
+
self.wandb.log(tags, commit=self.commit)
|
| 53 |
+
|
| 54 |
+
@master_only
|
| 55 |
+
def after_run(self, runner):
|
| 56 |
+
self.wandb.join()
|
RAVE-main/annotator/mmpkg/mmcv/runner/hooks/momentum_updater.py
ADDED
|
@@ -0,0 +1,493 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
import annotator.mmpkg.mmcv as mmcv
|
| 3 |
+
from .hook import HOOKS, Hook
|
| 4 |
+
from .lr_updater import annealing_cos, annealing_linear, format_param
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
class MomentumUpdaterHook(Hook):
|
| 8 |
+
|
| 9 |
+
def __init__(self,
|
| 10 |
+
by_epoch=True,
|
| 11 |
+
warmup=None,
|
| 12 |
+
warmup_iters=0,
|
| 13 |
+
warmup_ratio=0.9):
|
| 14 |
+
# validate the "warmup" argument
|
| 15 |
+
if warmup is not None:
|
| 16 |
+
if warmup not in ['constant', 'linear', 'exp']:
|
| 17 |
+
raise ValueError(
|
| 18 |
+
f'"{warmup}" is not a supported type for warming up, valid'
|
| 19 |
+
' types are "constant" and "linear"')
|
| 20 |
+
if warmup is not None:
|
| 21 |
+
assert warmup_iters > 0, \
|
| 22 |
+
'"warmup_iters" must be a positive integer'
|
| 23 |
+
assert 0 < warmup_ratio <= 1.0, \
|
| 24 |
+
'"warmup_momentum" must be in range (0,1]'
|
| 25 |
+
|
| 26 |
+
self.by_epoch = by_epoch
|
| 27 |
+
self.warmup = warmup
|
| 28 |
+
self.warmup_iters = warmup_iters
|
| 29 |
+
self.warmup_ratio = warmup_ratio
|
| 30 |
+
|
| 31 |
+
self.base_momentum = [] # initial momentum for all param groups
|
| 32 |
+
self.regular_momentum = [
|
| 33 |
+
] # expected momentum if no warming up is performed
|
| 34 |
+
|
| 35 |
+
def _set_momentum(self, runner, momentum_groups):
|
| 36 |
+
if isinstance(runner.optimizer, dict):
|
| 37 |
+
for k, optim in runner.optimizer.items():
|
| 38 |
+
for param_group, mom in zip(optim.param_groups,
|
| 39 |
+
momentum_groups[k]):
|
| 40 |
+
if 'momentum' in param_group.keys():
|
| 41 |
+
param_group['momentum'] = mom
|
| 42 |
+
elif 'betas' in param_group.keys():
|
| 43 |
+
param_group['betas'] = (mom, param_group['betas'][1])
|
| 44 |
+
else:
|
| 45 |
+
for param_group, mom in zip(runner.optimizer.param_groups,
|
| 46 |
+
momentum_groups):
|
| 47 |
+
if 'momentum' in param_group.keys():
|
| 48 |
+
param_group['momentum'] = mom
|
| 49 |
+
elif 'betas' in param_group.keys():
|
| 50 |
+
param_group['betas'] = (mom, param_group['betas'][1])
|
| 51 |
+
|
| 52 |
+
def get_momentum(self, runner, base_momentum):
|
| 53 |
+
raise NotImplementedError
|
| 54 |
+
|
| 55 |
+
def get_regular_momentum(self, runner):
|
| 56 |
+
if isinstance(runner.optimizer, dict):
|
| 57 |
+
momentum_groups = {}
|
| 58 |
+
for k in runner.optimizer.keys():
|
| 59 |
+
_momentum_group = [
|
| 60 |
+
self.get_momentum(runner, _base_momentum)
|
| 61 |
+
for _base_momentum in self.base_momentum[k]
|
| 62 |
+
]
|
| 63 |
+
momentum_groups.update({k: _momentum_group})
|
| 64 |
+
return momentum_groups
|
| 65 |
+
else:
|
| 66 |
+
return [
|
| 67 |
+
self.get_momentum(runner, _base_momentum)
|
| 68 |
+
for _base_momentum in self.base_momentum
|
| 69 |
+
]
|
| 70 |
+
|
| 71 |
+
def get_warmup_momentum(self, cur_iters):
|
| 72 |
+
|
| 73 |
+
def _get_warmup_momentum(cur_iters, regular_momentum):
|
| 74 |
+
if self.warmup == 'constant':
|
| 75 |
+
warmup_momentum = [
|
| 76 |
+
_momentum / self.warmup_ratio
|
| 77 |
+
for _momentum in self.regular_momentum
|
| 78 |
+
]
|
| 79 |
+
elif self.warmup == 'linear':
|
| 80 |
+
k = (1 - cur_iters / self.warmup_iters) * (1 -
|
| 81 |
+
self.warmup_ratio)
|
| 82 |
+
warmup_momentum = [
|
| 83 |
+
_momentum / (1 - k) for _momentum in self.regular_mom
|
| 84 |
+
]
|
| 85 |
+
elif self.warmup == 'exp':
|
| 86 |
+
k = self.warmup_ratio**(1 - cur_iters / self.warmup_iters)
|
| 87 |
+
warmup_momentum = [
|
| 88 |
+
_momentum / k for _momentum in self.regular_mom
|
| 89 |
+
]
|
| 90 |
+
return warmup_momentum
|
| 91 |
+
|
| 92 |
+
if isinstance(self.regular_momentum, dict):
|
| 93 |
+
momentum_groups = {}
|
| 94 |
+
for key, regular_momentum in self.regular_momentum.items():
|
| 95 |
+
momentum_groups[key] = _get_warmup_momentum(
|
| 96 |
+
cur_iters, regular_momentum)
|
| 97 |
+
return momentum_groups
|
| 98 |
+
else:
|
| 99 |
+
return _get_warmup_momentum(cur_iters, self.regular_momentum)
|
| 100 |
+
|
| 101 |
+
def before_run(self, runner):
|
| 102 |
+
# NOTE: when resuming from a checkpoint,
|
| 103 |
+
# if 'initial_momentum' is not saved,
|
| 104 |
+
# it will be set according to the optimizer params
|
| 105 |
+
if isinstance(runner.optimizer, dict):
|
| 106 |
+
self.base_momentum = {}
|
| 107 |
+
for k, optim in runner.optimizer.items():
|
| 108 |
+
for group in optim.param_groups:
|
| 109 |
+
if 'momentum' in group.keys():
|
| 110 |
+
group.setdefault('initial_momentum', group['momentum'])
|
| 111 |
+
else:
|
| 112 |
+
group.setdefault('initial_momentum', group['betas'][0])
|
| 113 |
+
_base_momentum = [
|
| 114 |
+
group['initial_momentum'] for group in optim.param_groups
|
| 115 |
+
]
|
| 116 |
+
self.base_momentum.update({k: _base_momentum})
|
| 117 |
+
else:
|
| 118 |
+
for group in runner.optimizer.param_groups:
|
| 119 |
+
if 'momentum' in group.keys():
|
| 120 |
+
group.setdefault('initial_momentum', group['momentum'])
|
| 121 |
+
else:
|
| 122 |
+
group.setdefault('initial_momentum', group['betas'][0])
|
| 123 |
+
self.base_momentum = [
|
| 124 |
+
group['initial_momentum']
|
| 125 |
+
for group in runner.optimizer.param_groups
|
| 126 |
+
]
|
| 127 |
+
|
| 128 |
+
def before_train_epoch(self, runner):
|
| 129 |
+
if not self.by_epoch:
|
| 130 |
+
return
|
| 131 |
+
self.regular_mom = self.get_regular_momentum(runner)
|
| 132 |
+
self._set_momentum(runner, self.regular_mom)
|
| 133 |
+
|
| 134 |
+
def before_train_iter(self, runner):
|
| 135 |
+
cur_iter = runner.iter
|
| 136 |
+
if not self.by_epoch:
|
| 137 |
+
self.regular_mom = self.get_regular_momentum(runner)
|
| 138 |
+
if self.warmup is None or cur_iter >= self.warmup_iters:
|
| 139 |
+
self._set_momentum(runner, self.regular_mom)
|
| 140 |
+
else:
|
| 141 |
+
warmup_momentum = self.get_warmup_momentum(cur_iter)
|
| 142 |
+
self._set_momentum(runner, warmup_momentum)
|
| 143 |
+
elif self.by_epoch:
|
| 144 |
+
if self.warmup is None or cur_iter > self.warmup_iters:
|
| 145 |
+
return
|
| 146 |
+
elif cur_iter == self.warmup_iters:
|
| 147 |
+
self._set_momentum(runner, self.regular_mom)
|
| 148 |
+
else:
|
| 149 |
+
warmup_momentum = self.get_warmup_momentum(cur_iter)
|
| 150 |
+
self._set_momentum(runner, warmup_momentum)
|
| 151 |
+
|
| 152 |
+
|
| 153 |
+
@HOOKS.register_module()
|
| 154 |
+
class StepMomentumUpdaterHook(MomentumUpdaterHook):
|
| 155 |
+
"""Step momentum scheduler with min value clipping.
|
| 156 |
+
|
| 157 |
+
Args:
|
| 158 |
+
step (int | list[int]): Step to decay the momentum. If an int value is
|
| 159 |
+
given, regard it as the decay interval. If a list is given, decay
|
| 160 |
+
momentum at these steps.
|
| 161 |
+
gamma (float, optional): Decay momentum ratio. Default: 0.5.
|
| 162 |
+
min_momentum (float, optional): Minimum momentum value to keep. If
|
| 163 |
+
momentum after decay is lower than this value, it will be clipped
|
| 164 |
+
accordingly. If None is given, we don't perform lr clipping.
|
| 165 |
+
Default: None.
|
| 166 |
+
"""
|
| 167 |
+
|
| 168 |
+
def __init__(self, step, gamma=0.5, min_momentum=None, **kwargs):
|
| 169 |
+
if isinstance(step, list):
|
| 170 |
+
assert mmcv.is_list_of(step, int)
|
| 171 |
+
assert all([s > 0 for s in step])
|
| 172 |
+
elif isinstance(step, int):
|
| 173 |
+
assert step > 0
|
| 174 |
+
else:
|
| 175 |
+
raise TypeError('"step" must be a list or integer')
|
| 176 |
+
self.step = step
|
| 177 |
+
self.gamma = gamma
|
| 178 |
+
self.min_momentum = min_momentum
|
| 179 |
+
super(StepMomentumUpdaterHook, self).__init__(**kwargs)
|
| 180 |
+
|
| 181 |
+
def get_momentum(self, runner, base_momentum):
|
| 182 |
+
progress = runner.epoch if self.by_epoch else runner.iter
|
| 183 |
+
|
| 184 |
+
# calculate exponential term
|
| 185 |
+
if isinstance(self.step, int):
|
| 186 |
+
exp = progress // self.step
|
| 187 |
+
else:
|
| 188 |
+
exp = len(self.step)
|
| 189 |
+
for i, s in enumerate(self.step):
|
| 190 |
+
if progress < s:
|
| 191 |
+
exp = i
|
| 192 |
+
break
|
| 193 |
+
|
| 194 |
+
momentum = base_momentum * (self.gamma**exp)
|
| 195 |
+
if self.min_momentum is not None:
|
| 196 |
+
# clip to a minimum value
|
| 197 |
+
momentum = max(momentum, self.min_momentum)
|
| 198 |
+
return momentum
|
| 199 |
+
|
| 200 |
+
|
| 201 |
+
@HOOKS.register_module()
|
| 202 |
+
class CosineAnnealingMomentumUpdaterHook(MomentumUpdaterHook):
|
| 203 |
+
|
| 204 |
+
def __init__(self, min_momentum=None, min_momentum_ratio=None, **kwargs):
|
| 205 |
+
assert (min_momentum is None) ^ (min_momentum_ratio is None)
|
| 206 |
+
self.min_momentum = min_momentum
|
| 207 |
+
self.min_momentum_ratio = min_momentum_ratio
|
| 208 |
+
super(CosineAnnealingMomentumUpdaterHook, self).__init__(**kwargs)
|
| 209 |
+
|
| 210 |
+
def get_momentum(self, runner, base_momentum):
|
| 211 |
+
if self.by_epoch:
|
| 212 |
+
progress = runner.epoch
|
| 213 |
+
max_progress = runner.max_epochs
|
| 214 |
+
else:
|
| 215 |
+
progress = runner.iter
|
| 216 |
+
max_progress = runner.max_iters
|
| 217 |
+
if self.min_momentum_ratio is not None:
|
| 218 |
+
target_momentum = base_momentum * self.min_momentum_ratio
|
| 219 |
+
else:
|
| 220 |
+
target_momentum = self.min_momentum
|
| 221 |
+
return annealing_cos(base_momentum, target_momentum,
|
| 222 |
+
progress / max_progress)
|
| 223 |
+
|
| 224 |
+
|
| 225 |
+
@HOOKS.register_module()
|
| 226 |
+
class CyclicMomentumUpdaterHook(MomentumUpdaterHook):
|
| 227 |
+
"""Cyclic momentum Scheduler.
|
| 228 |
+
|
| 229 |
+
Implement the cyclical momentum scheduler policy described in
|
| 230 |
+
https://arxiv.org/pdf/1708.07120.pdf
|
| 231 |
+
|
| 232 |
+
This momentum scheduler usually used together with the CyclicLRUpdater
|
| 233 |
+
to improve the performance in the 3D detection area.
|
| 234 |
+
|
| 235 |
+
Attributes:
|
| 236 |
+
target_ratio (tuple[float]): Relative ratio of the lowest momentum and
|
| 237 |
+
the highest momentum to the initial momentum.
|
| 238 |
+
cyclic_times (int): Number of cycles during training
|
| 239 |
+
step_ratio_up (float): The ratio of the increasing process of momentum
|
| 240 |
+
in the total cycle.
|
| 241 |
+
by_epoch (bool): Whether to update momentum by epoch.
|
| 242 |
+
"""
|
| 243 |
+
|
| 244 |
+
def __init__(self,
|
| 245 |
+
by_epoch=False,
|
| 246 |
+
target_ratio=(0.85 / 0.95, 1),
|
| 247 |
+
cyclic_times=1,
|
| 248 |
+
step_ratio_up=0.4,
|
| 249 |
+
**kwargs):
|
| 250 |
+
if isinstance(target_ratio, float):
|
| 251 |
+
target_ratio = (target_ratio, target_ratio / 1e5)
|
| 252 |
+
elif isinstance(target_ratio, tuple):
|
| 253 |
+
target_ratio = (target_ratio[0], target_ratio[0] / 1e5) \
|
| 254 |
+
if len(target_ratio) == 1 else target_ratio
|
| 255 |
+
else:
|
| 256 |
+
raise ValueError('target_ratio should be either float '
|
| 257 |
+
f'or tuple, got {type(target_ratio)}')
|
| 258 |
+
|
| 259 |
+
assert len(target_ratio) == 2, \
|
| 260 |
+
'"target_ratio" must be list or tuple of two floats'
|
| 261 |
+
assert 0 <= step_ratio_up < 1.0, \
|
| 262 |
+
'"step_ratio_up" must be in range [0,1)'
|
| 263 |
+
|
| 264 |
+
self.target_ratio = target_ratio
|
| 265 |
+
self.cyclic_times = cyclic_times
|
| 266 |
+
self.step_ratio_up = step_ratio_up
|
| 267 |
+
self.momentum_phases = [] # init momentum_phases
|
| 268 |
+
# currently only support by_epoch=False
|
| 269 |
+
assert not by_epoch, \
|
| 270 |
+
'currently only support "by_epoch" = False'
|
| 271 |
+
super(CyclicMomentumUpdaterHook, self).__init__(by_epoch, **kwargs)
|
| 272 |
+
|
| 273 |
+
def before_run(self, runner):
|
| 274 |
+
super(CyclicMomentumUpdaterHook, self).before_run(runner)
|
| 275 |
+
# initiate momentum_phases
|
| 276 |
+
# total momentum_phases are separated as up and down
|
| 277 |
+
max_iter_per_phase = runner.max_iters // self.cyclic_times
|
| 278 |
+
iter_up_phase = int(self.step_ratio_up * max_iter_per_phase)
|
| 279 |
+
self.momentum_phases.append(
|
| 280 |
+
[0, iter_up_phase, max_iter_per_phase, 1, self.target_ratio[0]])
|
| 281 |
+
self.momentum_phases.append([
|
| 282 |
+
iter_up_phase, max_iter_per_phase, max_iter_per_phase,
|
| 283 |
+
self.target_ratio[0], self.target_ratio[1]
|
| 284 |
+
])
|
| 285 |
+
|
| 286 |
+
def get_momentum(self, runner, base_momentum):
|
| 287 |
+
curr_iter = runner.iter
|
| 288 |
+
for (start_iter, end_iter, max_iter_per_phase, start_ratio,
|
| 289 |
+
end_ratio) in self.momentum_phases:
|
| 290 |
+
curr_iter %= max_iter_per_phase
|
| 291 |
+
if start_iter <= curr_iter < end_iter:
|
| 292 |
+
progress = curr_iter - start_iter
|
| 293 |
+
return annealing_cos(base_momentum * start_ratio,
|
| 294 |
+
base_momentum * end_ratio,
|
| 295 |
+
progress / (end_iter - start_iter))
|
| 296 |
+
|
| 297 |
+
|
| 298 |
+
@HOOKS.register_module()
|
| 299 |
+
class OneCycleMomentumUpdaterHook(MomentumUpdaterHook):
|
| 300 |
+
"""OneCycle momentum Scheduler.
|
| 301 |
+
|
| 302 |
+
This momentum scheduler usually used together with the OneCycleLrUpdater
|
| 303 |
+
to improve the performance.
|
| 304 |
+
|
| 305 |
+
Args:
|
| 306 |
+
base_momentum (float or list): Lower momentum boundaries in the cycle
|
| 307 |
+
for each parameter group. Note that momentum is cycled inversely
|
| 308 |
+
to learning rate; at the peak of a cycle, momentum is
|
| 309 |
+
'base_momentum' and learning rate is 'max_lr'.
|
| 310 |
+
Default: 0.85
|
| 311 |
+
max_momentum (float or list): Upper momentum boundaries in the cycle
|
| 312 |
+
for each parameter group. Functionally,
|
| 313 |
+
it defines the cycle amplitude (max_momentum - base_momentum).
|
| 314 |
+
Note that momentum is cycled inversely
|
| 315 |
+
to learning rate; at the start of a cycle, momentum is
|
| 316 |
+
'max_momentum' and learning rate is 'base_lr'
|
| 317 |
+
Default: 0.95
|
| 318 |
+
pct_start (float): The percentage of the cycle (in number of steps)
|
| 319 |
+
spent increasing the learning rate.
|
| 320 |
+
Default: 0.3
|
| 321 |
+
anneal_strategy (str): {'cos', 'linear'}
|
| 322 |
+
Specifies the annealing strategy: 'cos' for cosine annealing,
|
| 323 |
+
'linear' for linear annealing.
|
| 324 |
+
Default: 'cos'
|
| 325 |
+
three_phase (bool): If three_phase is True, use a third phase of the
|
| 326 |
+
schedule to annihilate the learning rate according to
|
| 327 |
+
final_div_factor instead of modifying the second phase (the first
|
| 328 |
+
two phases will be symmetrical about the step indicated by
|
| 329 |
+
pct_start).
|
| 330 |
+
Default: False
|
| 331 |
+
"""
|
| 332 |
+
|
| 333 |
+
def __init__(self,
|
| 334 |
+
base_momentum=0.85,
|
| 335 |
+
max_momentum=0.95,
|
| 336 |
+
pct_start=0.3,
|
| 337 |
+
anneal_strategy='cos',
|
| 338 |
+
three_phase=False,
|
| 339 |
+
**kwargs):
|
| 340 |
+
# validate by_epoch, currently only support by_epoch=False
|
| 341 |
+
if 'by_epoch' not in kwargs:
|
| 342 |
+
kwargs['by_epoch'] = False
|
| 343 |
+
else:
|
| 344 |
+
assert not kwargs['by_epoch'], \
|
| 345 |
+
'currently only support "by_epoch" = False'
|
| 346 |
+
if not isinstance(base_momentum, (float, list, dict)):
|
| 347 |
+
raise ValueError('base_momentum must be the type among of float,'
|
| 348 |
+
'list or dict.')
|
| 349 |
+
self._base_momentum = base_momentum
|
| 350 |
+
if not isinstance(max_momentum, (float, list, dict)):
|
| 351 |
+
raise ValueError('max_momentum must be the type among of float,'
|
| 352 |
+
'list or dict.')
|
| 353 |
+
self._max_momentum = max_momentum
|
| 354 |
+
# validate pct_start
|
| 355 |
+
if pct_start < 0 or pct_start > 1 or not isinstance(pct_start, float):
|
| 356 |
+
raise ValueError('Expected float between 0 and 1 pct_start, but '
|
| 357 |
+
f'got {pct_start}')
|
| 358 |
+
self.pct_start = pct_start
|
| 359 |
+
# validate anneal_strategy
|
| 360 |
+
if anneal_strategy not in ['cos', 'linear']:
|
| 361 |
+
raise ValueError('anneal_strategy must by one of "cos" or '
|
| 362 |
+
f'"linear", instead got {anneal_strategy}')
|
| 363 |
+
elif anneal_strategy == 'cos':
|
| 364 |
+
self.anneal_func = annealing_cos
|
| 365 |
+
elif anneal_strategy == 'linear':
|
| 366 |
+
self.anneal_func = annealing_linear
|
| 367 |
+
self.three_phase = three_phase
|
| 368 |
+
self.momentum_phases = [] # init momentum_phases
|
| 369 |
+
super(OneCycleMomentumUpdaterHook, self).__init__(**kwargs)
|
| 370 |
+
|
| 371 |
+
def before_run(self, runner):
|
| 372 |
+
if isinstance(runner.optimizer, dict):
|
| 373 |
+
for k, optim in runner.optimizer.items():
|
| 374 |
+
if ('momentum' not in optim.defaults
|
| 375 |
+
and 'betas' not in optim.defaults):
|
| 376 |
+
raise ValueError('optimizer must support momentum with'
|
| 377 |
+
'option enabled')
|
| 378 |
+
self.use_beta1 = 'betas' in optim.defaults
|
| 379 |
+
_base_momentum = format_param(k, optim, self._base_momentum)
|
| 380 |
+
_max_momentum = format_param(k, optim, self._max_momentum)
|
| 381 |
+
for group, b_momentum, m_momentum in zip(
|
| 382 |
+
optim.param_groups, _base_momentum, _max_momentum):
|
| 383 |
+
if self.use_beta1:
|
| 384 |
+
_, beta2 = group['betas']
|
| 385 |
+
group['betas'] = (m_momentum, beta2)
|
| 386 |
+
else:
|
| 387 |
+
group['momentum'] = m_momentum
|
| 388 |
+
group['base_momentum'] = b_momentum
|
| 389 |
+
group['max_momentum'] = m_momentum
|
| 390 |
+
else:
|
| 391 |
+
optim = runner.optimizer
|
| 392 |
+
if ('momentum' not in optim.defaults
|
| 393 |
+
and 'betas' not in optim.defaults):
|
| 394 |
+
raise ValueError('optimizer must support momentum with'
|
| 395 |
+
'option enabled')
|
| 396 |
+
self.use_beta1 = 'betas' in optim.defaults
|
| 397 |
+
k = type(optim).__name__
|
| 398 |
+
_base_momentum = format_param(k, optim, self._base_momentum)
|
| 399 |
+
_max_momentum = format_param(k, optim, self._max_momentum)
|
| 400 |
+
for group, b_momentum, m_momentum in zip(optim.param_groups,
|
| 401 |
+
_base_momentum,
|
| 402 |
+
_max_momentum):
|
| 403 |
+
if self.use_beta1:
|
| 404 |
+
_, beta2 = group['betas']
|
| 405 |
+
group['betas'] = (m_momentum, beta2)
|
| 406 |
+
else:
|
| 407 |
+
group['momentum'] = m_momentum
|
| 408 |
+
group['base_momentum'] = b_momentum
|
| 409 |
+
group['max_momentum'] = m_momentum
|
| 410 |
+
|
| 411 |
+
if self.three_phase:
|
| 412 |
+
self.momentum_phases.append({
|
| 413 |
+
'end_iter':
|
| 414 |
+
float(self.pct_start * runner.max_iters) - 1,
|
| 415 |
+
'start_momentum':
|
| 416 |
+
'max_momentum',
|
| 417 |
+
'end_momentum':
|
| 418 |
+
'base_momentum'
|
| 419 |
+
})
|
| 420 |
+
self.momentum_phases.append({
|
| 421 |
+
'end_iter':
|
| 422 |
+
float(2 * self.pct_start * runner.max_iters) - 2,
|
| 423 |
+
'start_momentum':
|
| 424 |
+
'base_momentum',
|
| 425 |
+
'end_momentum':
|
| 426 |
+
'max_momentum'
|
| 427 |
+
})
|
| 428 |
+
self.momentum_phases.append({
|
| 429 |
+
'end_iter': runner.max_iters - 1,
|
| 430 |
+
'start_momentum': 'max_momentum',
|
| 431 |
+
'end_momentum': 'max_momentum'
|
| 432 |
+
})
|
| 433 |
+
else:
|
| 434 |
+
self.momentum_phases.append({
|
| 435 |
+
'end_iter':
|
| 436 |
+
float(self.pct_start * runner.max_iters) - 1,
|
| 437 |
+
'start_momentum':
|
| 438 |
+
'max_momentum',
|
| 439 |
+
'end_momentum':
|
| 440 |
+
'base_momentum'
|
| 441 |
+
})
|
| 442 |
+
self.momentum_phases.append({
|
| 443 |
+
'end_iter': runner.max_iters - 1,
|
| 444 |
+
'start_momentum': 'base_momentum',
|
| 445 |
+
'end_momentum': 'max_momentum'
|
| 446 |
+
})
|
| 447 |
+
|
| 448 |
+
def _set_momentum(self, runner, momentum_groups):
|
| 449 |
+
if isinstance(runner.optimizer, dict):
|
| 450 |
+
for k, optim in runner.optimizer.items():
|
| 451 |
+
for param_group, mom in zip(optim.param_groups,
|
| 452 |
+
momentum_groups[k]):
|
| 453 |
+
if 'momentum' in param_group.keys():
|
| 454 |
+
param_group['momentum'] = mom
|
| 455 |
+
elif 'betas' in param_group.keys():
|
| 456 |
+
param_group['betas'] = (mom, param_group['betas'][1])
|
| 457 |
+
else:
|
| 458 |
+
for param_group, mom in zip(runner.optimizer.param_groups,
|
| 459 |
+
momentum_groups):
|
| 460 |
+
if 'momentum' in param_group.keys():
|
| 461 |
+
param_group['momentum'] = mom
|
| 462 |
+
elif 'betas' in param_group.keys():
|
| 463 |
+
param_group['betas'] = (mom, param_group['betas'][1])
|
| 464 |
+
|
| 465 |
+
def get_momentum(self, runner, param_group):
|
| 466 |
+
curr_iter = runner.iter
|
| 467 |
+
start_iter = 0
|
| 468 |
+
for i, phase in enumerate(self.momentum_phases):
|
| 469 |
+
end_iter = phase['end_iter']
|
| 470 |
+
if curr_iter <= end_iter or i == len(self.momentum_phases) - 1:
|
| 471 |
+
pct = (curr_iter - start_iter) / (end_iter - start_iter)
|
| 472 |
+
momentum = self.anneal_func(
|
| 473 |
+
param_group[phase['start_momentum']],
|
| 474 |
+
param_group[phase['end_momentum']], pct)
|
| 475 |
+
break
|
| 476 |
+
start_iter = end_iter
|
| 477 |
+
return momentum
|
| 478 |
+
|
| 479 |
+
def get_regular_momentum(self, runner):
|
| 480 |
+
if isinstance(runner.optimizer, dict):
|
| 481 |
+
momentum_groups = {}
|
| 482 |
+
for k, optim in runner.optimizer.items():
|
| 483 |
+
_momentum_group = [
|
| 484 |
+
self.get_momentum(runner, param_group)
|
| 485 |
+
for param_group in optim.param_groups
|
| 486 |
+
]
|
| 487 |
+
momentum_groups.update({k: _momentum_group})
|
| 488 |
+
return momentum_groups
|
| 489 |
+
else:
|
| 490 |
+
momentum_groups = []
|
| 491 |
+
for param_group in runner.optimizer.param_groups:
|
| 492 |
+
momentum_groups.append(self.get_momentum(runner, param_group))
|
| 493 |
+
return momentum_groups
|
RAVE-main/annotator/mmpkg/mmcv/runner/hooks/sampler_seed.py
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
from .hook import HOOKS, Hook
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
@HOOKS.register_module()
|
| 6 |
+
class DistSamplerSeedHook(Hook):
|
| 7 |
+
"""Data-loading sampler for distributed training.
|
| 8 |
+
|
| 9 |
+
When distributed training, it is only useful in conjunction with
|
| 10 |
+
:obj:`EpochBasedRunner`, while :obj:`IterBasedRunner` achieves the same
|
| 11 |
+
purpose with :obj:`IterLoader`.
|
| 12 |
+
"""
|
| 13 |
+
|
| 14 |
+
def before_epoch(self, runner):
|
| 15 |
+
if hasattr(runner.data_loader.sampler, 'set_epoch'):
|
| 16 |
+
# in case the data loader uses `SequentialSampler` in Pytorch
|
| 17 |
+
runner.data_loader.sampler.set_epoch(runner.epoch)
|
| 18 |
+
elif hasattr(runner.data_loader.batch_sampler.sampler, 'set_epoch'):
|
| 19 |
+
# batch sampler in pytorch warps the sampler as its attributes.
|
| 20 |
+
runner.data_loader.batch_sampler.sampler.set_epoch(runner.epoch)
|
RAVE-main/annotator/mmpkg/mmcv/runner/hooks/sync_buffer.py
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
from ..dist_utils import allreduce_params
|
| 3 |
+
from .hook import HOOKS, Hook
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
@HOOKS.register_module()
|
| 7 |
+
class SyncBuffersHook(Hook):
|
| 8 |
+
"""Synchronize model buffers such as running_mean and running_var in BN at
|
| 9 |
+
the end of each epoch.
|
| 10 |
+
|
| 11 |
+
Args:
|
| 12 |
+
distributed (bool): Whether distributed training is used. It is
|
| 13 |
+
effective only for distributed training. Defaults to True.
|
| 14 |
+
"""
|
| 15 |
+
|
| 16 |
+
def __init__(self, distributed=True):
|
| 17 |
+
self.distributed = distributed
|
| 18 |
+
|
| 19 |
+
def after_epoch(self, runner):
|
| 20 |
+
"""All-reduce model buffers at the end of each epoch."""
|
| 21 |
+
if self.distributed:
|
| 22 |
+
allreduce_params(runner.model.buffers())
|
RAVE-main/annotator/mmpkg/mmseg/models/__init__.py
ADDED
|
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .backbones import * # noqa: F401,F403
|
| 2 |
+
from .builder import (BACKBONES, HEADS, LOSSES, SEGMENTORS, build_backbone,
|
| 3 |
+
build_head, build_loss, build_segmentor)
|
| 4 |
+
from .decode_heads import * # noqa: F401,F403
|
| 5 |
+
from .losses import * # noqa: F401,F403
|
| 6 |
+
from .necks import * # noqa: F401,F403
|
| 7 |
+
from .segmentors import * # noqa: F401,F403
|
| 8 |
+
|
| 9 |
+
__all__ = [
|
| 10 |
+
'BACKBONES', 'HEADS', 'LOSSES', 'SEGMENTORS', 'build_backbone',
|
| 11 |
+
'build_head', 'build_loss', 'build_segmentor'
|
| 12 |
+
]
|
RAVE-main/annotator/mmpkg/mmseg/models/backbones/fast_scnn.py
ADDED
|
@@ -0,0 +1,375 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import torch.nn as nn
|
| 3 |
+
from annotator.mmpkg.mmcv.cnn import (ConvModule, DepthwiseSeparableConvModule, constant_init,
|
| 4 |
+
kaiming_init)
|
| 5 |
+
from torch.nn.modules.batchnorm import _BatchNorm
|
| 6 |
+
|
| 7 |
+
from annotator.mmpkg.mmseg.models.decode_heads.psp_head import PPM
|
| 8 |
+
from annotator.mmpkg.mmseg.ops import resize
|
| 9 |
+
from ..builder import BACKBONES
|
| 10 |
+
from ..utils.inverted_residual import InvertedResidual
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
class LearningToDownsample(nn.Module):
|
| 14 |
+
"""Learning to downsample module.
|
| 15 |
+
|
| 16 |
+
Args:
|
| 17 |
+
in_channels (int): Number of input channels.
|
| 18 |
+
dw_channels (tuple[int]): Number of output channels of the first and
|
| 19 |
+
the second depthwise conv (dwconv) layers.
|
| 20 |
+
out_channels (int): Number of output channels of the whole
|
| 21 |
+
'learning to downsample' module.
|
| 22 |
+
conv_cfg (dict | None): Config of conv layers. Default: None
|
| 23 |
+
norm_cfg (dict | None): Config of norm layers. Default:
|
| 24 |
+
dict(type='BN')
|
| 25 |
+
act_cfg (dict): Config of activation layers. Default:
|
| 26 |
+
dict(type='ReLU')
|
| 27 |
+
"""
|
| 28 |
+
|
| 29 |
+
def __init__(self,
|
| 30 |
+
in_channels,
|
| 31 |
+
dw_channels,
|
| 32 |
+
out_channels,
|
| 33 |
+
conv_cfg=None,
|
| 34 |
+
norm_cfg=dict(type='BN'),
|
| 35 |
+
act_cfg=dict(type='ReLU')):
|
| 36 |
+
super(LearningToDownsample, self).__init__()
|
| 37 |
+
self.conv_cfg = conv_cfg
|
| 38 |
+
self.norm_cfg = norm_cfg
|
| 39 |
+
self.act_cfg = act_cfg
|
| 40 |
+
dw_channels1 = dw_channels[0]
|
| 41 |
+
dw_channels2 = dw_channels[1]
|
| 42 |
+
|
| 43 |
+
self.conv = ConvModule(
|
| 44 |
+
in_channels,
|
| 45 |
+
dw_channels1,
|
| 46 |
+
3,
|
| 47 |
+
stride=2,
|
| 48 |
+
conv_cfg=self.conv_cfg,
|
| 49 |
+
norm_cfg=self.norm_cfg,
|
| 50 |
+
act_cfg=self.act_cfg)
|
| 51 |
+
self.dsconv1 = DepthwiseSeparableConvModule(
|
| 52 |
+
dw_channels1,
|
| 53 |
+
dw_channels2,
|
| 54 |
+
kernel_size=3,
|
| 55 |
+
stride=2,
|
| 56 |
+
padding=1,
|
| 57 |
+
norm_cfg=self.norm_cfg)
|
| 58 |
+
self.dsconv2 = DepthwiseSeparableConvModule(
|
| 59 |
+
dw_channels2,
|
| 60 |
+
out_channels,
|
| 61 |
+
kernel_size=3,
|
| 62 |
+
stride=2,
|
| 63 |
+
padding=1,
|
| 64 |
+
norm_cfg=self.norm_cfg)
|
| 65 |
+
|
| 66 |
+
def forward(self, x):
|
| 67 |
+
x = self.conv(x)
|
| 68 |
+
x = self.dsconv1(x)
|
| 69 |
+
x = self.dsconv2(x)
|
| 70 |
+
return x
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
class GlobalFeatureExtractor(nn.Module):
|
| 74 |
+
"""Global feature extractor module.
|
| 75 |
+
|
| 76 |
+
Args:
|
| 77 |
+
in_channels (int): Number of input channels of the GFE module.
|
| 78 |
+
Default: 64
|
| 79 |
+
block_channels (tuple[int]): Tuple of ints. Each int specifies the
|
| 80 |
+
number of output channels of each Inverted Residual module.
|
| 81 |
+
Default: (64, 96, 128)
|
| 82 |
+
out_channels(int): Number of output channels of the GFE module.
|
| 83 |
+
Default: 128
|
| 84 |
+
expand_ratio (int): Adjusts number of channels of the hidden layer
|
| 85 |
+
in InvertedResidual by this amount.
|
| 86 |
+
Default: 6
|
| 87 |
+
num_blocks (tuple[int]): Tuple of ints. Each int specifies the
|
| 88 |
+
number of times each Inverted Residual module is repeated.
|
| 89 |
+
The repeated Inverted Residual modules are called a 'group'.
|
| 90 |
+
Default: (3, 3, 3)
|
| 91 |
+
strides (tuple[int]): Tuple of ints. Each int specifies
|
| 92 |
+
the downsampling factor of each 'group'.
|
| 93 |
+
Default: (2, 2, 1)
|
| 94 |
+
pool_scales (tuple[int]): Tuple of ints. Each int specifies
|
| 95 |
+
the parameter required in 'global average pooling' within PPM.
|
| 96 |
+
Default: (1, 2, 3, 6)
|
| 97 |
+
conv_cfg (dict | None): Config of conv layers. Default: None
|
| 98 |
+
norm_cfg (dict | None): Config of norm layers. Default:
|
| 99 |
+
dict(type='BN')
|
| 100 |
+
act_cfg (dict): Config of activation layers. Default:
|
| 101 |
+
dict(type='ReLU')
|
| 102 |
+
align_corners (bool): align_corners argument of F.interpolate.
|
| 103 |
+
Default: False
|
| 104 |
+
"""
|
| 105 |
+
|
| 106 |
+
def __init__(self,
|
| 107 |
+
in_channels=64,
|
| 108 |
+
block_channels=(64, 96, 128),
|
| 109 |
+
out_channels=128,
|
| 110 |
+
expand_ratio=6,
|
| 111 |
+
num_blocks=(3, 3, 3),
|
| 112 |
+
strides=(2, 2, 1),
|
| 113 |
+
pool_scales=(1, 2, 3, 6),
|
| 114 |
+
conv_cfg=None,
|
| 115 |
+
norm_cfg=dict(type='BN'),
|
| 116 |
+
act_cfg=dict(type='ReLU'),
|
| 117 |
+
align_corners=False):
|
| 118 |
+
super(GlobalFeatureExtractor, self).__init__()
|
| 119 |
+
self.conv_cfg = conv_cfg
|
| 120 |
+
self.norm_cfg = norm_cfg
|
| 121 |
+
self.act_cfg = act_cfg
|
| 122 |
+
assert len(block_channels) == len(num_blocks) == 3
|
| 123 |
+
self.bottleneck1 = self._make_layer(in_channels, block_channels[0],
|
| 124 |
+
num_blocks[0], strides[0],
|
| 125 |
+
expand_ratio)
|
| 126 |
+
self.bottleneck2 = self._make_layer(block_channels[0],
|
| 127 |
+
block_channels[1], num_blocks[1],
|
| 128 |
+
strides[1], expand_ratio)
|
| 129 |
+
self.bottleneck3 = self._make_layer(block_channels[1],
|
| 130 |
+
block_channels[2], num_blocks[2],
|
| 131 |
+
strides[2], expand_ratio)
|
| 132 |
+
self.ppm = PPM(
|
| 133 |
+
pool_scales,
|
| 134 |
+
block_channels[2],
|
| 135 |
+
block_channels[2] // 4,
|
| 136 |
+
conv_cfg=self.conv_cfg,
|
| 137 |
+
norm_cfg=self.norm_cfg,
|
| 138 |
+
act_cfg=self.act_cfg,
|
| 139 |
+
align_corners=align_corners)
|
| 140 |
+
self.out = ConvModule(
|
| 141 |
+
block_channels[2] * 2,
|
| 142 |
+
out_channels,
|
| 143 |
+
1,
|
| 144 |
+
conv_cfg=self.conv_cfg,
|
| 145 |
+
norm_cfg=self.norm_cfg,
|
| 146 |
+
act_cfg=self.act_cfg)
|
| 147 |
+
|
| 148 |
+
def _make_layer(self,
|
| 149 |
+
in_channels,
|
| 150 |
+
out_channels,
|
| 151 |
+
blocks,
|
| 152 |
+
stride=1,
|
| 153 |
+
expand_ratio=6):
|
| 154 |
+
layers = [
|
| 155 |
+
InvertedResidual(
|
| 156 |
+
in_channels,
|
| 157 |
+
out_channels,
|
| 158 |
+
stride,
|
| 159 |
+
expand_ratio,
|
| 160 |
+
norm_cfg=self.norm_cfg)
|
| 161 |
+
]
|
| 162 |
+
for i in range(1, blocks):
|
| 163 |
+
layers.append(
|
| 164 |
+
InvertedResidual(
|
| 165 |
+
out_channels,
|
| 166 |
+
out_channels,
|
| 167 |
+
1,
|
| 168 |
+
expand_ratio,
|
| 169 |
+
norm_cfg=self.norm_cfg))
|
| 170 |
+
return nn.Sequential(*layers)
|
| 171 |
+
|
| 172 |
+
def forward(self, x):
|
| 173 |
+
x = self.bottleneck1(x)
|
| 174 |
+
x = self.bottleneck2(x)
|
| 175 |
+
x = self.bottleneck3(x)
|
| 176 |
+
x = torch.cat([x, *self.ppm(x)], dim=1)
|
| 177 |
+
x = self.out(x)
|
| 178 |
+
return x
|
| 179 |
+
|
| 180 |
+
|
| 181 |
+
class FeatureFusionModule(nn.Module):
|
| 182 |
+
"""Feature fusion module.
|
| 183 |
+
|
| 184 |
+
Args:
|
| 185 |
+
higher_in_channels (int): Number of input channels of the
|
| 186 |
+
higher-resolution branch.
|
| 187 |
+
lower_in_channels (int): Number of input channels of the
|
| 188 |
+
lower-resolution branch.
|
| 189 |
+
out_channels (int): Number of output channels.
|
| 190 |
+
conv_cfg (dict | None): Config of conv layers. Default: None
|
| 191 |
+
norm_cfg (dict | None): Config of norm layers. Default:
|
| 192 |
+
dict(type='BN')
|
| 193 |
+
act_cfg (dict): Config of activation layers. Default:
|
| 194 |
+
dict(type='ReLU')
|
| 195 |
+
align_corners (bool): align_corners argument of F.interpolate.
|
| 196 |
+
Default: False
|
| 197 |
+
"""
|
| 198 |
+
|
| 199 |
+
def __init__(self,
|
| 200 |
+
higher_in_channels,
|
| 201 |
+
lower_in_channels,
|
| 202 |
+
out_channels,
|
| 203 |
+
conv_cfg=None,
|
| 204 |
+
norm_cfg=dict(type='BN'),
|
| 205 |
+
act_cfg=dict(type='ReLU'),
|
| 206 |
+
align_corners=False):
|
| 207 |
+
super(FeatureFusionModule, self).__init__()
|
| 208 |
+
self.conv_cfg = conv_cfg
|
| 209 |
+
self.norm_cfg = norm_cfg
|
| 210 |
+
self.act_cfg = act_cfg
|
| 211 |
+
self.align_corners = align_corners
|
| 212 |
+
self.dwconv = ConvModule(
|
| 213 |
+
lower_in_channels,
|
| 214 |
+
out_channels,
|
| 215 |
+
1,
|
| 216 |
+
conv_cfg=self.conv_cfg,
|
| 217 |
+
norm_cfg=self.norm_cfg,
|
| 218 |
+
act_cfg=self.act_cfg)
|
| 219 |
+
self.conv_lower_res = ConvModule(
|
| 220 |
+
out_channels,
|
| 221 |
+
out_channels,
|
| 222 |
+
1,
|
| 223 |
+
conv_cfg=self.conv_cfg,
|
| 224 |
+
norm_cfg=self.norm_cfg,
|
| 225 |
+
act_cfg=None)
|
| 226 |
+
self.conv_higher_res = ConvModule(
|
| 227 |
+
higher_in_channels,
|
| 228 |
+
out_channels,
|
| 229 |
+
1,
|
| 230 |
+
conv_cfg=self.conv_cfg,
|
| 231 |
+
norm_cfg=self.norm_cfg,
|
| 232 |
+
act_cfg=None)
|
| 233 |
+
self.relu = nn.ReLU(True)
|
| 234 |
+
|
| 235 |
+
def forward(self, higher_res_feature, lower_res_feature):
|
| 236 |
+
lower_res_feature = resize(
|
| 237 |
+
lower_res_feature,
|
| 238 |
+
size=higher_res_feature.size()[2:],
|
| 239 |
+
mode='bilinear',
|
| 240 |
+
align_corners=self.align_corners)
|
| 241 |
+
lower_res_feature = self.dwconv(lower_res_feature)
|
| 242 |
+
lower_res_feature = self.conv_lower_res(lower_res_feature)
|
| 243 |
+
|
| 244 |
+
higher_res_feature = self.conv_higher_res(higher_res_feature)
|
| 245 |
+
out = higher_res_feature + lower_res_feature
|
| 246 |
+
return self.relu(out)
|
| 247 |
+
|
| 248 |
+
|
| 249 |
+
@BACKBONES.register_module()
|
| 250 |
+
class FastSCNN(nn.Module):
|
| 251 |
+
"""Fast-SCNN Backbone.
|
| 252 |
+
|
| 253 |
+
Args:
|
| 254 |
+
in_channels (int): Number of input image channels. Default: 3.
|
| 255 |
+
downsample_dw_channels (tuple[int]): Number of output channels after
|
| 256 |
+
the first conv layer & the second conv layer in
|
| 257 |
+
Learning-To-Downsample (LTD) module.
|
| 258 |
+
Default: (32, 48).
|
| 259 |
+
global_in_channels (int): Number of input channels of
|
| 260 |
+
Global Feature Extractor(GFE).
|
| 261 |
+
Equal to number of output channels of LTD.
|
| 262 |
+
Default: 64.
|
| 263 |
+
global_block_channels (tuple[int]): Tuple of integers that describe
|
| 264 |
+
the output channels for each of the MobileNet-v2 bottleneck
|
| 265 |
+
residual blocks in GFE.
|
| 266 |
+
Default: (64, 96, 128).
|
| 267 |
+
global_block_strides (tuple[int]): Tuple of integers
|
| 268 |
+
that describe the strides (downsampling factors) for each of the
|
| 269 |
+
MobileNet-v2 bottleneck residual blocks in GFE.
|
| 270 |
+
Default: (2, 2, 1).
|
| 271 |
+
global_out_channels (int): Number of output channels of GFE.
|
| 272 |
+
Default: 128.
|
| 273 |
+
higher_in_channels (int): Number of input channels of the higher
|
| 274 |
+
resolution branch in FFM.
|
| 275 |
+
Equal to global_in_channels.
|
| 276 |
+
Default: 64.
|
| 277 |
+
lower_in_channels (int): Number of input channels of the lower
|
| 278 |
+
resolution branch in FFM.
|
| 279 |
+
Equal to global_out_channels.
|
| 280 |
+
Default: 128.
|
| 281 |
+
fusion_out_channels (int): Number of output channels of FFM.
|
| 282 |
+
Default: 128.
|
| 283 |
+
out_indices (tuple): Tuple of indices of list
|
| 284 |
+
[higher_res_features, lower_res_features, fusion_output].
|
| 285 |
+
Often set to (0,1,2) to enable aux. heads.
|
| 286 |
+
Default: (0, 1, 2).
|
| 287 |
+
conv_cfg (dict | None): Config of conv layers. Default: None
|
| 288 |
+
norm_cfg (dict | None): Config of norm layers. Default:
|
| 289 |
+
dict(type='BN')
|
| 290 |
+
act_cfg (dict): Config of activation layers. Default:
|
| 291 |
+
dict(type='ReLU')
|
| 292 |
+
align_corners (bool): align_corners argument of F.interpolate.
|
| 293 |
+
Default: False
|
| 294 |
+
"""
|
| 295 |
+
|
| 296 |
+
def __init__(self,
|
| 297 |
+
in_channels=3,
|
| 298 |
+
downsample_dw_channels=(32, 48),
|
| 299 |
+
global_in_channels=64,
|
| 300 |
+
global_block_channels=(64, 96, 128),
|
| 301 |
+
global_block_strides=(2, 2, 1),
|
| 302 |
+
global_out_channels=128,
|
| 303 |
+
higher_in_channels=64,
|
| 304 |
+
lower_in_channels=128,
|
| 305 |
+
fusion_out_channels=128,
|
| 306 |
+
out_indices=(0, 1, 2),
|
| 307 |
+
conv_cfg=None,
|
| 308 |
+
norm_cfg=dict(type='BN'),
|
| 309 |
+
act_cfg=dict(type='ReLU'),
|
| 310 |
+
align_corners=False):
|
| 311 |
+
|
| 312 |
+
super(FastSCNN, self).__init__()
|
| 313 |
+
if global_in_channels != higher_in_channels:
|
| 314 |
+
raise AssertionError('Global Input Channels must be the same \
|
| 315 |
+
with Higher Input Channels!')
|
| 316 |
+
elif global_out_channels != lower_in_channels:
|
| 317 |
+
raise AssertionError('Global Output Channels must be the same \
|
| 318 |
+
with Lower Input Channels!')
|
| 319 |
+
|
| 320 |
+
self.in_channels = in_channels
|
| 321 |
+
self.downsample_dw_channels1 = downsample_dw_channels[0]
|
| 322 |
+
self.downsample_dw_channels2 = downsample_dw_channels[1]
|
| 323 |
+
self.global_in_channels = global_in_channels
|
| 324 |
+
self.global_block_channels = global_block_channels
|
| 325 |
+
self.global_block_strides = global_block_strides
|
| 326 |
+
self.global_out_channels = global_out_channels
|
| 327 |
+
self.higher_in_channels = higher_in_channels
|
| 328 |
+
self.lower_in_channels = lower_in_channels
|
| 329 |
+
self.fusion_out_channels = fusion_out_channels
|
| 330 |
+
self.out_indices = out_indices
|
| 331 |
+
self.conv_cfg = conv_cfg
|
| 332 |
+
self.norm_cfg = norm_cfg
|
| 333 |
+
self.act_cfg = act_cfg
|
| 334 |
+
self.align_corners = align_corners
|
| 335 |
+
self.learning_to_downsample = LearningToDownsample(
|
| 336 |
+
in_channels,
|
| 337 |
+
downsample_dw_channels,
|
| 338 |
+
global_in_channels,
|
| 339 |
+
conv_cfg=self.conv_cfg,
|
| 340 |
+
norm_cfg=self.norm_cfg,
|
| 341 |
+
act_cfg=self.act_cfg)
|
| 342 |
+
self.global_feature_extractor = GlobalFeatureExtractor(
|
| 343 |
+
global_in_channels,
|
| 344 |
+
global_block_channels,
|
| 345 |
+
global_out_channels,
|
| 346 |
+
strides=self.global_block_strides,
|
| 347 |
+
conv_cfg=self.conv_cfg,
|
| 348 |
+
norm_cfg=self.norm_cfg,
|
| 349 |
+
act_cfg=self.act_cfg,
|
| 350 |
+
align_corners=self.align_corners)
|
| 351 |
+
self.feature_fusion = FeatureFusionModule(
|
| 352 |
+
higher_in_channels,
|
| 353 |
+
lower_in_channels,
|
| 354 |
+
fusion_out_channels,
|
| 355 |
+
conv_cfg=self.conv_cfg,
|
| 356 |
+
norm_cfg=self.norm_cfg,
|
| 357 |
+
act_cfg=self.act_cfg,
|
| 358 |
+
align_corners=self.align_corners)
|
| 359 |
+
|
| 360 |
+
def init_weights(self, pretrained=None):
|
| 361 |
+
for m in self.modules():
|
| 362 |
+
if isinstance(m, nn.Conv2d):
|
| 363 |
+
kaiming_init(m)
|
| 364 |
+
elif isinstance(m, (_BatchNorm, nn.GroupNorm)):
|
| 365 |
+
constant_init(m, 1)
|
| 366 |
+
|
| 367 |
+
def forward(self, x):
|
| 368 |
+
higher_res_features = self.learning_to_downsample(x)
|
| 369 |
+
lower_res_features = self.global_feature_extractor(higher_res_features)
|
| 370 |
+
fusion_output = self.feature_fusion(higher_res_features,
|
| 371 |
+
lower_res_features)
|
| 372 |
+
|
| 373 |
+
outs = [higher_res_features, lower_res_features, fusion_output]
|
| 374 |
+
outs = [outs[i] for i in self.out_indices]
|
| 375 |
+
return tuple(outs)
|
RAVE-main/annotator/mmpkg/mmseg/models/backbones/mobilenet_v3.py
ADDED
|
@@ -0,0 +1,255 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import logging
|
| 2 |
+
|
| 3 |
+
import annotator.mmpkg.mmcv as mmcv
|
| 4 |
+
import torch.nn as nn
|
| 5 |
+
from annotator.mmpkg.mmcv.cnn import ConvModule, constant_init, kaiming_init
|
| 6 |
+
from annotator.mmpkg.mmcv.cnn.bricks import Conv2dAdaptivePadding
|
| 7 |
+
from annotator.mmpkg.mmcv.runner import load_checkpoint
|
| 8 |
+
from torch.nn.modules.batchnorm import _BatchNorm
|
| 9 |
+
|
| 10 |
+
from ..builder import BACKBONES
|
| 11 |
+
from ..utils import InvertedResidualV3 as InvertedResidual
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
@BACKBONES.register_module()
|
| 15 |
+
class MobileNetV3(nn.Module):
|
| 16 |
+
"""MobileNetV3 backbone.
|
| 17 |
+
|
| 18 |
+
This backbone is the improved implementation of `Searching for MobileNetV3
|
| 19 |
+
<https://ieeexplore.ieee.org/document/9008835>`_.
|
| 20 |
+
|
| 21 |
+
Args:
|
| 22 |
+
arch (str): Architecture of mobilnetv3, from {'small', 'large'}.
|
| 23 |
+
Default: 'small'.
|
| 24 |
+
conv_cfg (dict): Config dict for convolution layer.
|
| 25 |
+
Default: None, which means using conv2d.
|
| 26 |
+
norm_cfg (dict): Config dict for normalization layer.
|
| 27 |
+
Default: dict(type='BN').
|
| 28 |
+
out_indices (tuple[int]): Output from which layer.
|
| 29 |
+
Default: (0, 1, 12).
|
| 30 |
+
frozen_stages (int): Stages to be frozen (all param fixed).
|
| 31 |
+
Default: -1, which means not freezing any parameters.
|
| 32 |
+
norm_eval (bool): Whether to set norm layers to eval mode, namely,
|
| 33 |
+
freeze running stats (mean and var). Note: Effect on Batch Norm
|
| 34 |
+
and its variants only. Default: False.
|
| 35 |
+
with_cp (bool): Use checkpoint or not. Using checkpoint will save
|
| 36 |
+
some memory while slowing down the training speed.
|
| 37 |
+
Default: False.
|
| 38 |
+
"""
|
| 39 |
+
# Parameters to build each block:
|
| 40 |
+
# [kernel size, mid channels, out channels, with_se, act type, stride]
|
| 41 |
+
arch_settings = {
|
| 42 |
+
'small': [[3, 16, 16, True, 'ReLU', 2], # block0 layer1 os=4
|
| 43 |
+
[3, 72, 24, False, 'ReLU', 2], # block1 layer2 os=8
|
| 44 |
+
[3, 88, 24, False, 'ReLU', 1],
|
| 45 |
+
[5, 96, 40, True, 'HSwish', 2], # block2 layer4 os=16
|
| 46 |
+
[5, 240, 40, True, 'HSwish', 1],
|
| 47 |
+
[5, 240, 40, True, 'HSwish', 1],
|
| 48 |
+
[5, 120, 48, True, 'HSwish', 1], # block3 layer7 os=16
|
| 49 |
+
[5, 144, 48, True, 'HSwish', 1],
|
| 50 |
+
[5, 288, 96, True, 'HSwish', 2], # block4 layer9 os=32
|
| 51 |
+
[5, 576, 96, True, 'HSwish', 1],
|
| 52 |
+
[5, 576, 96, True, 'HSwish', 1]],
|
| 53 |
+
'large': [[3, 16, 16, False, 'ReLU', 1], # block0 layer1 os=2
|
| 54 |
+
[3, 64, 24, False, 'ReLU', 2], # block1 layer2 os=4
|
| 55 |
+
[3, 72, 24, False, 'ReLU', 1],
|
| 56 |
+
[5, 72, 40, True, 'ReLU', 2], # block2 layer4 os=8
|
| 57 |
+
[5, 120, 40, True, 'ReLU', 1],
|
| 58 |
+
[5, 120, 40, True, 'ReLU', 1],
|
| 59 |
+
[3, 240, 80, False, 'HSwish', 2], # block3 layer7 os=16
|
| 60 |
+
[3, 200, 80, False, 'HSwish', 1],
|
| 61 |
+
[3, 184, 80, False, 'HSwish', 1],
|
| 62 |
+
[3, 184, 80, False, 'HSwish', 1],
|
| 63 |
+
[3, 480, 112, True, 'HSwish', 1], # block4 layer11 os=16
|
| 64 |
+
[3, 672, 112, True, 'HSwish', 1],
|
| 65 |
+
[5, 672, 160, True, 'HSwish', 2], # block5 layer13 os=32
|
| 66 |
+
[5, 960, 160, True, 'HSwish', 1],
|
| 67 |
+
[5, 960, 160, True, 'HSwish', 1]]
|
| 68 |
+
} # yapf: disable
|
| 69 |
+
|
| 70 |
+
def __init__(self,
|
| 71 |
+
arch='small',
|
| 72 |
+
conv_cfg=None,
|
| 73 |
+
norm_cfg=dict(type='BN'),
|
| 74 |
+
out_indices=(0, 1, 12),
|
| 75 |
+
frozen_stages=-1,
|
| 76 |
+
reduction_factor=1,
|
| 77 |
+
norm_eval=False,
|
| 78 |
+
with_cp=False):
|
| 79 |
+
super(MobileNetV3, self).__init__()
|
| 80 |
+
assert arch in self.arch_settings
|
| 81 |
+
assert isinstance(reduction_factor, int) and reduction_factor > 0
|
| 82 |
+
assert mmcv.is_tuple_of(out_indices, int)
|
| 83 |
+
for index in out_indices:
|
| 84 |
+
if index not in range(0, len(self.arch_settings[arch]) + 2):
|
| 85 |
+
raise ValueError(
|
| 86 |
+
'the item in out_indices must in '
|
| 87 |
+
f'range(0, {len(self.arch_settings[arch])+2}). '
|
| 88 |
+
f'But received {index}')
|
| 89 |
+
|
| 90 |
+
if frozen_stages not in range(-1, len(self.arch_settings[arch]) + 2):
|
| 91 |
+
raise ValueError('frozen_stages must be in range(-1, '
|
| 92 |
+
f'{len(self.arch_settings[arch])+2}). '
|
| 93 |
+
f'But received {frozen_stages}')
|
| 94 |
+
self.arch = arch
|
| 95 |
+
self.conv_cfg = conv_cfg
|
| 96 |
+
self.norm_cfg = norm_cfg
|
| 97 |
+
self.out_indices = out_indices
|
| 98 |
+
self.frozen_stages = frozen_stages
|
| 99 |
+
self.reduction_factor = reduction_factor
|
| 100 |
+
self.norm_eval = norm_eval
|
| 101 |
+
self.with_cp = with_cp
|
| 102 |
+
self.layers = self._make_layer()
|
| 103 |
+
|
| 104 |
+
def _make_layer(self):
|
| 105 |
+
layers = []
|
| 106 |
+
|
| 107 |
+
# build the first layer (layer0)
|
| 108 |
+
in_channels = 16
|
| 109 |
+
layer = ConvModule(
|
| 110 |
+
in_channels=3,
|
| 111 |
+
out_channels=in_channels,
|
| 112 |
+
kernel_size=3,
|
| 113 |
+
stride=2,
|
| 114 |
+
padding=1,
|
| 115 |
+
conv_cfg=dict(type='Conv2dAdaptivePadding'),
|
| 116 |
+
norm_cfg=self.norm_cfg,
|
| 117 |
+
act_cfg=dict(type='HSwish'))
|
| 118 |
+
self.add_module('layer0', layer)
|
| 119 |
+
layers.append('layer0')
|
| 120 |
+
|
| 121 |
+
layer_setting = self.arch_settings[self.arch]
|
| 122 |
+
for i, params in enumerate(layer_setting):
|
| 123 |
+
(kernel_size, mid_channels, out_channels, with_se, act,
|
| 124 |
+
stride) = params
|
| 125 |
+
|
| 126 |
+
if self.arch == 'large' and i >= 12 or self.arch == 'small' and \
|
| 127 |
+
i >= 8:
|
| 128 |
+
mid_channels = mid_channels // self.reduction_factor
|
| 129 |
+
out_channels = out_channels // self.reduction_factor
|
| 130 |
+
|
| 131 |
+
if with_se:
|
| 132 |
+
se_cfg = dict(
|
| 133 |
+
channels=mid_channels,
|
| 134 |
+
ratio=4,
|
| 135 |
+
act_cfg=(dict(type='ReLU'),
|
| 136 |
+
dict(type='HSigmoid', bias=3.0, divisor=6.0)))
|
| 137 |
+
else:
|
| 138 |
+
se_cfg = None
|
| 139 |
+
|
| 140 |
+
layer = InvertedResidual(
|
| 141 |
+
in_channels=in_channels,
|
| 142 |
+
out_channels=out_channels,
|
| 143 |
+
mid_channels=mid_channels,
|
| 144 |
+
kernel_size=kernel_size,
|
| 145 |
+
stride=stride,
|
| 146 |
+
se_cfg=se_cfg,
|
| 147 |
+
with_expand_conv=(in_channels != mid_channels),
|
| 148 |
+
conv_cfg=self.conv_cfg,
|
| 149 |
+
norm_cfg=self.norm_cfg,
|
| 150 |
+
act_cfg=dict(type=act),
|
| 151 |
+
with_cp=self.with_cp)
|
| 152 |
+
in_channels = out_channels
|
| 153 |
+
layer_name = 'layer{}'.format(i + 1)
|
| 154 |
+
self.add_module(layer_name, layer)
|
| 155 |
+
layers.append(layer_name)
|
| 156 |
+
|
| 157 |
+
# build the last layer
|
| 158 |
+
# block5 layer12 os=32 for small model
|
| 159 |
+
# block6 layer16 os=32 for large model
|
| 160 |
+
layer = ConvModule(
|
| 161 |
+
in_channels=in_channels,
|
| 162 |
+
out_channels=576 if self.arch == 'small' else 960,
|
| 163 |
+
kernel_size=1,
|
| 164 |
+
stride=1,
|
| 165 |
+
dilation=4,
|
| 166 |
+
padding=0,
|
| 167 |
+
conv_cfg=self.conv_cfg,
|
| 168 |
+
norm_cfg=self.norm_cfg,
|
| 169 |
+
act_cfg=dict(type='HSwish'))
|
| 170 |
+
layer_name = 'layer{}'.format(len(layer_setting) + 1)
|
| 171 |
+
self.add_module(layer_name, layer)
|
| 172 |
+
layers.append(layer_name)
|
| 173 |
+
|
| 174 |
+
# next, convert backbone MobileNetV3 to a semantic segmentation version
|
| 175 |
+
if self.arch == 'small':
|
| 176 |
+
self.layer4.depthwise_conv.conv.stride = (1, 1)
|
| 177 |
+
self.layer9.depthwise_conv.conv.stride = (1, 1)
|
| 178 |
+
for i in range(4, len(layers)):
|
| 179 |
+
layer = getattr(self, layers[i])
|
| 180 |
+
if isinstance(layer, InvertedResidual):
|
| 181 |
+
modified_module = layer.depthwise_conv.conv
|
| 182 |
+
else:
|
| 183 |
+
modified_module = layer.conv
|
| 184 |
+
|
| 185 |
+
if i < 9:
|
| 186 |
+
modified_module.dilation = (2, 2)
|
| 187 |
+
pad = 2
|
| 188 |
+
else:
|
| 189 |
+
modified_module.dilation = (4, 4)
|
| 190 |
+
pad = 4
|
| 191 |
+
|
| 192 |
+
if not isinstance(modified_module, Conv2dAdaptivePadding):
|
| 193 |
+
# Adjust padding
|
| 194 |
+
pad *= (modified_module.kernel_size[0] - 1) // 2
|
| 195 |
+
modified_module.padding = (pad, pad)
|
| 196 |
+
else:
|
| 197 |
+
self.layer7.depthwise_conv.conv.stride = (1, 1)
|
| 198 |
+
self.layer13.depthwise_conv.conv.stride = (1, 1)
|
| 199 |
+
for i in range(7, len(layers)):
|
| 200 |
+
layer = getattr(self, layers[i])
|
| 201 |
+
if isinstance(layer, InvertedResidual):
|
| 202 |
+
modified_module = layer.depthwise_conv.conv
|
| 203 |
+
else:
|
| 204 |
+
modified_module = layer.conv
|
| 205 |
+
|
| 206 |
+
if i < 13:
|
| 207 |
+
modified_module.dilation = (2, 2)
|
| 208 |
+
pad = 2
|
| 209 |
+
else:
|
| 210 |
+
modified_module.dilation = (4, 4)
|
| 211 |
+
pad = 4
|
| 212 |
+
|
| 213 |
+
if not isinstance(modified_module, Conv2dAdaptivePadding):
|
| 214 |
+
# Adjust padding
|
| 215 |
+
pad *= (modified_module.kernel_size[0] - 1) // 2
|
| 216 |
+
modified_module.padding = (pad, pad)
|
| 217 |
+
|
| 218 |
+
return layers
|
| 219 |
+
|
| 220 |
+
def init_weights(self, pretrained=None):
|
| 221 |
+
if isinstance(pretrained, str):
|
| 222 |
+
logger = logging.getLogger()
|
| 223 |
+
load_checkpoint(self, pretrained, strict=False, logger=logger)
|
| 224 |
+
elif pretrained is None:
|
| 225 |
+
for m in self.modules():
|
| 226 |
+
if isinstance(m, nn.Conv2d):
|
| 227 |
+
kaiming_init(m)
|
| 228 |
+
elif isinstance(m, nn.BatchNorm2d):
|
| 229 |
+
constant_init(m, 1)
|
| 230 |
+
else:
|
| 231 |
+
raise TypeError('pretrained must be a str or None')
|
| 232 |
+
|
| 233 |
+
def forward(self, x):
|
| 234 |
+
outs = []
|
| 235 |
+
for i, layer_name in enumerate(self.layers):
|
| 236 |
+
layer = getattr(self, layer_name)
|
| 237 |
+
x = layer(x)
|
| 238 |
+
if i in self.out_indices:
|
| 239 |
+
outs.append(x)
|
| 240 |
+
return outs
|
| 241 |
+
|
| 242 |
+
def _freeze_stages(self):
|
| 243 |
+
for i in range(self.frozen_stages + 1):
|
| 244 |
+
layer = getattr(self, f'layer{i}')
|
| 245 |
+
layer.eval()
|
| 246 |
+
for param in layer.parameters():
|
| 247 |
+
param.requires_grad = False
|
| 248 |
+
|
| 249 |
+
def train(self, mode=True):
|
| 250 |
+
super(MobileNetV3, self).train(mode)
|
| 251 |
+
self._freeze_stages()
|
| 252 |
+
if mode and self.norm_eval:
|
| 253 |
+
for m in self.modules():
|
| 254 |
+
if isinstance(m, _BatchNorm):
|
| 255 |
+
m.eval()
|
RAVE-main/annotator/mmpkg/mmseg/models/backbones/resnest.py
ADDED
|
@@ -0,0 +1,314 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import math
|
| 2 |
+
|
| 3 |
+
import torch
|
| 4 |
+
import torch.nn as nn
|
| 5 |
+
import torch.nn.functional as F
|
| 6 |
+
import torch.utils.checkpoint as cp
|
| 7 |
+
from annotator.mmpkg.mmcv.cnn import build_conv_layer, build_norm_layer
|
| 8 |
+
|
| 9 |
+
from ..builder import BACKBONES
|
| 10 |
+
from ..utils import ResLayer
|
| 11 |
+
from .resnet import Bottleneck as _Bottleneck
|
| 12 |
+
from .resnet import ResNetV1d
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
class RSoftmax(nn.Module):
|
| 16 |
+
"""Radix Softmax module in ``SplitAttentionConv2d``.
|
| 17 |
+
|
| 18 |
+
Args:
|
| 19 |
+
radix (int): Radix of input.
|
| 20 |
+
groups (int): Groups of input.
|
| 21 |
+
"""
|
| 22 |
+
|
| 23 |
+
def __init__(self, radix, groups):
|
| 24 |
+
super().__init__()
|
| 25 |
+
self.radix = radix
|
| 26 |
+
self.groups = groups
|
| 27 |
+
|
| 28 |
+
def forward(self, x):
|
| 29 |
+
batch = x.size(0)
|
| 30 |
+
if self.radix > 1:
|
| 31 |
+
x = x.view(batch, self.groups, self.radix, -1).transpose(1, 2)
|
| 32 |
+
x = F.softmax(x, dim=1)
|
| 33 |
+
x = x.reshape(batch, -1)
|
| 34 |
+
else:
|
| 35 |
+
x = torch.sigmoid(x)
|
| 36 |
+
return x
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
class SplitAttentionConv2d(nn.Module):
|
| 40 |
+
"""Split-Attention Conv2d in ResNeSt.
|
| 41 |
+
|
| 42 |
+
Args:
|
| 43 |
+
in_channels (int): Same as nn.Conv2d.
|
| 44 |
+
out_channels (int): Same as nn.Conv2d.
|
| 45 |
+
kernel_size (int | tuple[int]): Same as nn.Conv2d.
|
| 46 |
+
stride (int | tuple[int]): Same as nn.Conv2d.
|
| 47 |
+
padding (int | tuple[int]): Same as nn.Conv2d.
|
| 48 |
+
dilation (int | tuple[int]): Same as nn.Conv2d.
|
| 49 |
+
groups (int): Same as nn.Conv2d.
|
| 50 |
+
radix (int): Radix of SpltAtConv2d. Default: 2
|
| 51 |
+
reduction_factor (int): Reduction factor of inter_channels. Default: 4.
|
| 52 |
+
conv_cfg (dict): Config dict for convolution layer. Default: None,
|
| 53 |
+
which means using conv2d.
|
| 54 |
+
norm_cfg (dict): Config dict for normalization layer. Default: None.
|
| 55 |
+
dcn (dict): Config dict for DCN. Default: None.
|
| 56 |
+
"""
|
| 57 |
+
|
| 58 |
+
def __init__(self,
|
| 59 |
+
in_channels,
|
| 60 |
+
channels,
|
| 61 |
+
kernel_size,
|
| 62 |
+
stride=1,
|
| 63 |
+
padding=0,
|
| 64 |
+
dilation=1,
|
| 65 |
+
groups=1,
|
| 66 |
+
radix=2,
|
| 67 |
+
reduction_factor=4,
|
| 68 |
+
conv_cfg=None,
|
| 69 |
+
norm_cfg=dict(type='BN'),
|
| 70 |
+
dcn=None):
|
| 71 |
+
super(SplitAttentionConv2d, self).__init__()
|
| 72 |
+
inter_channels = max(in_channels * radix // reduction_factor, 32)
|
| 73 |
+
self.radix = radix
|
| 74 |
+
self.groups = groups
|
| 75 |
+
self.channels = channels
|
| 76 |
+
self.with_dcn = dcn is not None
|
| 77 |
+
self.dcn = dcn
|
| 78 |
+
fallback_on_stride = False
|
| 79 |
+
if self.with_dcn:
|
| 80 |
+
fallback_on_stride = self.dcn.pop('fallback_on_stride', False)
|
| 81 |
+
if self.with_dcn and not fallback_on_stride:
|
| 82 |
+
assert conv_cfg is None, 'conv_cfg must be None for DCN'
|
| 83 |
+
conv_cfg = dcn
|
| 84 |
+
self.conv = build_conv_layer(
|
| 85 |
+
conv_cfg,
|
| 86 |
+
in_channels,
|
| 87 |
+
channels * radix,
|
| 88 |
+
kernel_size,
|
| 89 |
+
stride=stride,
|
| 90 |
+
padding=padding,
|
| 91 |
+
dilation=dilation,
|
| 92 |
+
groups=groups * radix,
|
| 93 |
+
bias=False)
|
| 94 |
+
self.norm0_name, norm0 = build_norm_layer(
|
| 95 |
+
norm_cfg, channels * radix, postfix=0)
|
| 96 |
+
self.add_module(self.norm0_name, norm0)
|
| 97 |
+
self.relu = nn.ReLU(inplace=True)
|
| 98 |
+
self.fc1 = build_conv_layer(
|
| 99 |
+
None, channels, inter_channels, 1, groups=self.groups)
|
| 100 |
+
self.norm1_name, norm1 = build_norm_layer(
|
| 101 |
+
norm_cfg, inter_channels, postfix=1)
|
| 102 |
+
self.add_module(self.norm1_name, norm1)
|
| 103 |
+
self.fc2 = build_conv_layer(
|
| 104 |
+
None, inter_channels, channels * radix, 1, groups=self.groups)
|
| 105 |
+
self.rsoftmax = RSoftmax(radix, groups)
|
| 106 |
+
|
| 107 |
+
@property
|
| 108 |
+
def norm0(self):
|
| 109 |
+
"""nn.Module: the normalization layer named "norm0" """
|
| 110 |
+
return getattr(self, self.norm0_name)
|
| 111 |
+
|
| 112 |
+
@property
|
| 113 |
+
def norm1(self):
|
| 114 |
+
"""nn.Module: the normalization layer named "norm1" """
|
| 115 |
+
return getattr(self, self.norm1_name)
|
| 116 |
+
|
| 117 |
+
def forward(self, x):
|
| 118 |
+
x = self.conv(x)
|
| 119 |
+
x = self.norm0(x)
|
| 120 |
+
x = self.relu(x)
|
| 121 |
+
|
| 122 |
+
batch, rchannel = x.shape[:2]
|
| 123 |
+
batch = x.size(0)
|
| 124 |
+
if self.radix > 1:
|
| 125 |
+
splits = x.view(batch, self.radix, -1, *x.shape[2:])
|
| 126 |
+
gap = splits.sum(dim=1)
|
| 127 |
+
else:
|
| 128 |
+
gap = x
|
| 129 |
+
gap = F.adaptive_avg_pool2d(gap, 1)
|
| 130 |
+
gap = self.fc1(gap)
|
| 131 |
+
|
| 132 |
+
gap = self.norm1(gap)
|
| 133 |
+
gap = self.relu(gap)
|
| 134 |
+
|
| 135 |
+
atten = self.fc2(gap)
|
| 136 |
+
atten = self.rsoftmax(atten).view(batch, -1, 1, 1)
|
| 137 |
+
|
| 138 |
+
if self.radix > 1:
|
| 139 |
+
attens = atten.view(batch, self.radix, -1, *atten.shape[2:])
|
| 140 |
+
out = torch.sum(attens * splits, dim=1)
|
| 141 |
+
else:
|
| 142 |
+
out = atten * x
|
| 143 |
+
return out.contiguous()
|
| 144 |
+
|
| 145 |
+
|
| 146 |
+
class Bottleneck(_Bottleneck):
|
| 147 |
+
"""Bottleneck block for ResNeSt.
|
| 148 |
+
|
| 149 |
+
Args:
|
| 150 |
+
inplane (int): Input planes of this block.
|
| 151 |
+
planes (int): Middle planes of this block.
|
| 152 |
+
groups (int): Groups of conv2.
|
| 153 |
+
width_per_group (int): Width per group of conv2. 64x4d indicates
|
| 154 |
+
``groups=64, width_per_group=4`` and 32x8d indicates
|
| 155 |
+
``groups=32, width_per_group=8``.
|
| 156 |
+
radix (int): Radix of SpltAtConv2d. Default: 2
|
| 157 |
+
reduction_factor (int): Reduction factor of inter_channels in
|
| 158 |
+
SplitAttentionConv2d. Default: 4.
|
| 159 |
+
avg_down_stride (bool): Whether to use average pool for stride in
|
| 160 |
+
Bottleneck. Default: True.
|
| 161 |
+
kwargs (dict): Key word arguments for base class.
|
| 162 |
+
"""
|
| 163 |
+
expansion = 4
|
| 164 |
+
|
| 165 |
+
def __init__(self,
|
| 166 |
+
inplanes,
|
| 167 |
+
planes,
|
| 168 |
+
groups=1,
|
| 169 |
+
base_width=4,
|
| 170 |
+
base_channels=64,
|
| 171 |
+
radix=2,
|
| 172 |
+
reduction_factor=4,
|
| 173 |
+
avg_down_stride=True,
|
| 174 |
+
**kwargs):
|
| 175 |
+
"""Bottleneck block for ResNeSt."""
|
| 176 |
+
super(Bottleneck, self).__init__(inplanes, planes, **kwargs)
|
| 177 |
+
|
| 178 |
+
if groups == 1:
|
| 179 |
+
width = self.planes
|
| 180 |
+
else:
|
| 181 |
+
width = math.floor(self.planes *
|
| 182 |
+
(base_width / base_channels)) * groups
|
| 183 |
+
|
| 184 |
+
self.avg_down_stride = avg_down_stride and self.conv2_stride > 1
|
| 185 |
+
|
| 186 |
+
self.norm1_name, norm1 = build_norm_layer(
|
| 187 |
+
self.norm_cfg, width, postfix=1)
|
| 188 |
+
self.norm3_name, norm3 = build_norm_layer(
|
| 189 |
+
self.norm_cfg, self.planes * self.expansion, postfix=3)
|
| 190 |
+
|
| 191 |
+
self.conv1 = build_conv_layer(
|
| 192 |
+
self.conv_cfg,
|
| 193 |
+
self.inplanes,
|
| 194 |
+
width,
|
| 195 |
+
kernel_size=1,
|
| 196 |
+
stride=self.conv1_stride,
|
| 197 |
+
bias=False)
|
| 198 |
+
self.add_module(self.norm1_name, norm1)
|
| 199 |
+
self.with_modulated_dcn = False
|
| 200 |
+
self.conv2 = SplitAttentionConv2d(
|
| 201 |
+
width,
|
| 202 |
+
width,
|
| 203 |
+
kernel_size=3,
|
| 204 |
+
stride=1 if self.avg_down_stride else self.conv2_stride,
|
| 205 |
+
padding=self.dilation,
|
| 206 |
+
dilation=self.dilation,
|
| 207 |
+
groups=groups,
|
| 208 |
+
radix=radix,
|
| 209 |
+
reduction_factor=reduction_factor,
|
| 210 |
+
conv_cfg=self.conv_cfg,
|
| 211 |
+
norm_cfg=self.norm_cfg,
|
| 212 |
+
dcn=self.dcn)
|
| 213 |
+
delattr(self, self.norm2_name)
|
| 214 |
+
|
| 215 |
+
if self.avg_down_stride:
|
| 216 |
+
self.avd_layer = nn.AvgPool2d(3, self.conv2_stride, padding=1)
|
| 217 |
+
|
| 218 |
+
self.conv3 = build_conv_layer(
|
| 219 |
+
self.conv_cfg,
|
| 220 |
+
width,
|
| 221 |
+
self.planes * self.expansion,
|
| 222 |
+
kernel_size=1,
|
| 223 |
+
bias=False)
|
| 224 |
+
self.add_module(self.norm3_name, norm3)
|
| 225 |
+
|
| 226 |
+
def forward(self, x):
|
| 227 |
+
|
| 228 |
+
def _inner_forward(x):
|
| 229 |
+
identity = x
|
| 230 |
+
|
| 231 |
+
out = self.conv1(x)
|
| 232 |
+
out = self.norm1(out)
|
| 233 |
+
out = self.relu(out)
|
| 234 |
+
|
| 235 |
+
if self.with_plugins:
|
| 236 |
+
out = self.forward_plugin(out, self.after_conv1_plugin_names)
|
| 237 |
+
|
| 238 |
+
out = self.conv2(out)
|
| 239 |
+
|
| 240 |
+
if self.avg_down_stride:
|
| 241 |
+
out = self.avd_layer(out)
|
| 242 |
+
|
| 243 |
+
if self.with_plugins:
|
| 244 |
+
out = self.forward_plugin(out, self.after_conv2_plugin_names)
|
| 245 |
+
|
| 246 |
+
out = self.conv3(out)
|
| 247 |
+
out = self.norm3(out)
|
| 248 |
+
|
| 249 |
+
if self.with_plugins:
|
| 250 |
+
out = self.forward_plugin(out, self.after_conv3_plugin_names)
|
| 251 |
+
|
| 252 |
+
if self.downsample is not None:
|
| 253 |
+
identity = self.downsample(x)
|
| 254 |
+
|
| 255 |
+
out += identity
|
| 256 |
+
|
| 257 |
+
return out
|
| 258 |
+
|
| 259 |
+
if self.with_cp and x.requires_grad:
|
| 260 |
+
out = cp.checkpoint(_inner_forward, x)
|
| 261 |
+
else:
|
| 262 |
+
out = _inner_forward(x)
|
| 263 |
+
|
| 264 |
+
out = self.relu(out)
|
| 265 |
+
|
| 266 |
+
return out
|
| 267 |
+
|
| 268 |
+
|
| 269 |
+
@BACKBONES.register_module()
|
| 270 |
+
class ResNeSt(ResNetV1d):
|
| 271 |
+
"""ResNeSt backbone.
|
| 272 |
+
|
| 273 |
+
Args:
|
| 274 |
+
groups (int): Number of groups of Bottleneck. Default: 1
|
| 275 |
+
base_width (int): Base width of Bottleneck. Default: 4
|
| 276 |
+
radix (int): Radix of SpltAtConv2d. Default: 2
|
| 277 |
+
reduction_factor (int): Reduction factor of inter_channels in
|
| 278 |
+
SplitAttentionConv2d. Default: 4.
|
| 279 |
+
avg_down_stride (bool): Whether to use average pool for stride in
|
| 280 |
+
Bottleneck. Default: True.
|
| 281 |
+
kwargs (dict): Keyword arguments for ResNet.
|
| 282 |
+
"""
|
| 283 |
+
|
| 284 |
+
arch_settings = {
|
| 285 |
+
50: (Bottleneck, (3, 4, 6, 3)),
|
| 286 |
+
101: (Bottleneck, (3, 4, 23, 3)),
|
| 287 |
+
152: (Bottleneck, (3, 8, 36, 3)),
|
| 288 |
+
200: (Bottleneck, (3, 24, 36, 3))
|
| 289 |
+
}
|
| 290 |
+
|
| 291 |
+
def __init__(self,
|
| 292 |
+
groups=1,
|
| 293 |
+
base_width=4,
|
| 294 |
+
radix=2,
|
| 295 |
+
reduction_factor=4,
|
| 296 |
+
avg_down_stride=True,
|
| 297 |
+
**kwargs):
|
| 298 |
+
self.groups = groups
|
| 299 |
+
self.base_width = base_width
|
| 300 |
+
self.radix = radix
|
| 301 |
+
self.reduction_factor = reduction_factor
|
| 302 |
+
self.avg_down_stride = avg_down_stride
|
| 303 |
+
super(ResNeSt, self).__init__(**kwargs)
|
| 304 |
+
|
| 305 |
+
def make_res_layer(self, **kwargs):
|
| 306 |
+
"""Pack all blocks in a stage into a ``ResLayer``."""
|
| 307 |
+
return ResLayer(
|
| 308 |
+
groups=self.groups,
|
| 309 |
+
base_width=self.base_width,
|
| 310 |
+
base_channels=self.base_channels,
|
| 311 |
+
radix=self.radix,
|
| 312 |
+
reduction_factor=self.reduction_factor,
|
| 313 |
+
avg_down_stride=self.avg_down_stride,
|
| 314 |
+
**kwargs)
|
RAVE-main/annotator/mmpkg/mmseg/models/backbones/resnet.py
ADDED
|
@@ -0,0 +1,688 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch.nn as nn
|
| 2 |
+
import torch.utils.checkpoint as cp
|
| 3 |
+
from annotator.mmpkg.mmcv.cnn import (build_conv_layer, build_norm_layer, build_plugin_layer,
|
| 4 |
+
constant_init, kaiming_init)
|
| 5 |
+
from annotator.mmpkg.mmcv.runner import load_checkpoint
|
| 6 |
+
from annotator.mmpkg.mmcv.utils.parrots_wrapper import _BatchNorm
|
| 7 |
+
|
| 8 |
+
from annotator.mmpkg.mmseg.utils import get_root_logger
|
| 9 |
+
from ..builder import BACKBONES
|
| 10 |
+
from ..utils import ResLayer
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
class BasicBlock(nn.Module):
|
| 14 |
+
"""Basic block for ResNet."""
|
| 15 |
+
|
| 16 |
+
expansion = 1
|
| 17 |
+
|
| 18 |
+
def __init__(self,
|
| 19 |
+
inplanes,
|
| 20 |
+
planes,
|
| 21 |
+
stride=1,
|
| 22 |
+
dilation=1,
|
| 23 |
+
downsample=None,
|
| 24 |
+
style='pytorch',
|
| 25 |
+
with_cp=False,
|
| 26 |
+
conv_cfg=None,
|
| 27 |
+
norm_cfg=dict(type='BN'),
|
| 28 |
+
dcn=None,
|
| 29 |
+
plugins=None):
|
| 30 |
+
super(BasicBlock, self).__init__()
|
| 31 |
+
assert dcn is None, 'Not implemented yet.'
|
| 32 |
+
assert plugins is None, 'Not implemented yet.'
|
| 33 |
+
|
| 34 |
+
self.norm1_name, norm1 = build_norm_layer(norm_cfg, planes, postfix=1)
|
| 35 |
+
self.norm2_name, norm2 = build_norm_layer(norm_cfg, planes, postfix=2)
|
| 36 |
+
|
| 37 |
+
self.conv1 = build_conv_layer(
|
| 38 |
+
conv_cfg,
|
| 39 |
+
inplanes,
|
| 40 |
+
planes,
|
| 41 |
+
3,
|
| 42 |
+
stride=stride,
|
| 43 |
+
padding=dilation,
|
| 44 |
+
dilation=dilation,
|
| 45 |
+
bias=False)
|
| 46 |
+
self.add_module(self.norm1_name, norm1)
|
| 47 |
+
self.conv2 = build_conv_layer(
|
| 48 |
+
conv_cfg, planes, planes, 3, padding=1, bias=False)
|
| 49 |
+
self.add_module(self.norm2_name, norm2)
|
| 50 |
+
|
| 51 |
+
self.relu = nn.ReLU(inplace=True)
|
| 52 |
+
self.downsample = downsample
|
| 53 |
+
self.stride = stride
|
| 54 |
+
self.dilation = dilation
|
| 55 |
+
self.with_cp = with_cp
|
| 56 |
+
|
| 57 |
+
@property
|
| 58 |
+
def norm1(self):
|
| 59 |
+
"""nn.Module: normalization layer after the first convolution layer"""
|
| 60 |
+
return getattr(self, self.norm1_name)
|
| 61 |
+
|
| 62 |
+
@property
|
| 63 |
+
def norm2(self):
|
| 64 |
+
"""nn.Module: normalization layer after the second convolution layer"""
|
| 65 |
+
return getattr(self, self.norm2_name)
|
| 66 |
+
|
| 67 |
+
def forward(self, x):
|
| 68 |
+
"""Forward function."""
|
| 69 |
+
|
| 70 |
+
def _inner_forward(x):
|
| 71 |
+
identity = x
|
| 72 |
+
|
| 73 |
+
out = self.conv1(x)
|
| 74 |
+
out = self.norm1(out)
|
| 75 |
+
out = self.relu(out)
|
| 76 |
+
|
| 77 |
+
out = self.conv2(out)
|
| 78 |
+
out = self.norm2(out)
|
| 79 |
+
|
| 80 |
+
if self.downsample is not None:
|
| 81 |
+
identity = self.downsample(x)
|
| 82 |
+
|
| 83 |
+
out += identity
|
| 84 |
+
|
| 85 |
+
return out
|
| 86 |
+
|
| 87 |
+
if self.with_cp and x.requires_grad:
|
| 88 |
+
out = cp.checkpoint(_inner_forward, x)
|
| 89 |
+
else:
|
| 90 |
+
out = _inner_forward(x)
|
| 91 |
+
|
| 92 |
+
out = self.relu(out)
|
| 93 |
+
|
| 94 |
+
return out
|
| 95 |
+
|
| 96 |
+
|
| 97 |
+
class Bottleneck(nn.Module):
|
| 98 |
+
"""Bottleneck block for ResNet.
|
| 99 |
+
|
| 100 |
+
If style is "pytorch", the stride-two layer is the 3x3 conv layer, if it is
|
| 101 |
+
"caffe", the stride-two layer is the first 1x1 conv layer.
|
| 102 |
+
"""
|
| 103 |
+
|
| 104 |
+
expansion = 4
|
| 105 |
+
|
| 106 |
+
def __init__(self,
|
| 107 |
+
inplanes,
|
| 108 |
+
planes,
|
| 109 |
+
stride=1,
|
| 110 |
+
dilation=1,
|
| 111 |
+
downsample=None,
|
| 112 |
+
style='pytorch',
|
| 113 |
+
with_cp=False,
|
| 114 |
+
conv_cfg=None,
|
| 115 |
+
norm_cfg=dict(type='BN'),
|
| 116 |
+
dcn=None,
|
| 117 |
+
plugins=None):
|
| 118 |
+
super(Bottleneck, self).__init__()
|
| 119 |
+
assert style in ['pytorch', 'caffe']
|
| 120 |
+
assert dcn is None or isinstance(dcn, dict)
|
| 121 |
+
assert plugins is None or isinstance(plugins, list)
|
| 122 |
+
if plugins is not None:
|
| 123 |
+
allowed_position = ['after_conv1', 'after_conv2', 'after_conv3']
|
| 124 |
+
assert all(p['position'] in allowed_position for p in plugins)
|
| 125 |
+
|
| 126 |
+
self.inplanes = inplanes
|
| 127 |
+
self.planes = planes
|
| 128 |
+
self.stride = stride
|
| 129 |
+
self.dilation = dilation
|
| 130 |
+
self.style = style
|
| 131 |
+
self.with_cp = with_cp
|
| 132 |
+
self.conv_cfg = conv_cfg
|
| 133 |
+
self.norm_cfg = norm_cfg
|
| 134 |
+
self.dcn = dcn
|
| 135 |
+
self.with_dcn = dcn is not None
|
| 136 |
+
self.plugins = plugins
|
| 137 |
+
self.with_plugins = plugins is not None
|
| 138 |
+
|
| 139 |
+
if self.with_plugins:
|
| 140 |
+
# collect plugins for conv1/conv2/conv3
|
| 141 |
+
self.after_conv1_plugins = [
|
| 142 |
+
plugin['cfg'] for plugin in plugins
|
| 143 |
+
if plugin['position'] == 'after_conv1'
|
| 144 |
+
]
|
| 145 |
+
self.after_conv2_plugins = [
|
| 146 |
+
plugin['cfg'] for plugin in plugins
|
| 147 |
+
if plugin['position'] == 'after_conv2'
|
| 148 |
+
]
|
| 149 |
+
self.after_conv3_plugins = [
|
| 150 |
+
plugin['cfg'] for plugin in plugins
|
| 151 |
+
if plugin['position'] == 'after_conv3'
|
| 152 |
+
]
|
| 153 |
+
|
| 154 |
+
if self.style == 'pytorch':
|
| 155 |
+
self.conv1_stride = 1
|
| 156 |
+
self.conv2_stride = stride
|
| 157 |
+
else:
|
| 158 |
+
self.conv1_stride = stride
|
| 159 |
+
self.conv2_stride = 1
|
| 160 |
+
|
| 161 |
+
self.norm1_name, norm1 = build_norm_layer(norm_cfg, planes, postfix=1)
|
| 162 |
+
self.norm2_name, norm2 = build_norm_layer(norm_cfg, planes, postfix=2)
|
| 163 |
+
self.norm3_name, norm3 = build_norm_layer(
|
| 164 |
+
norm_cfg, planes * self.expansion, postfix=3)
|
| 165 |
+
|
| 166 |
+
self.conv1 = build_conv_layer(
|
| 167 |
+
conv_cfg,
|
| 168 |
+
inplanes,
|
| 169 |
+
planes,
|
| 170 |
+
kernel_size=1,
|
| 171 |
+
stride=self.conv1_stride,
|
| 172 |
+
bias=False)
|
| 173 |
+
self.add_module(self.norm1_name, norm1)
|
| 174 |
+
fallback_on_stride = False
|
| 175 |
+
if self.with_dcn:
|
| 176 |
+
fallback_on_stride = dcn.pop('fallback_on_stride', False)
|
| 177 |
+
if not self.with_dcn or fallback_on_stride:
|
| 178 |
+
self.conv2 = build_conv_layer(
|
| 179 |
+
conv_cfg,
|
| 180 |
+
planes,
|
| 181 |
+
planes,
|
| 182 |
+
kernel_size=3,
|
| 183 |
+
stride=self.conv2_stride,
|
| 184 |
+
padding=dilation,
|
| 185 |
+
dilation=dilation,
|
| 186 |
+
bias=False)
|
| 187 |
+
else:
|
| 188 |
+
assert self.conv_cfg is None, 'conv_cfg must be None for DCN'
|
| 189 |
+
self.conv2 = build_conv_layer(
|
| 190 |
+
dcn,
|
| 191 |
+
planes,
|
| 192 |
+
planes,
|
| 193 |
+
kernel_size=3,
|
| 194 |
+
stride=self.conv2_stride,
|
| 195 |
+
padding=dilation,
|
| 196 |
+
dilation=dilation,
|
| 197 |
+
bias=False)
|
| 198 |
+
|
| 199 |
+
self.add_module(self.norm2_name, norm2)
|
| 200 |
+
self.conv3 = build_conv_layer(
|
| 201 |
+
conv_cfg,
|
| 202 |
+
planes,
|
| 203 |
+
planes * self.expansion,
|
| 204 |
+
kernel_size=1,
|
| 205 |
+
bias=False)
|
| 206 |
+
self.add_module(self.norm3_name, norm3)
|
| 207 |
+
|
| 208 |
+
self.relu = nn.ReLU(inplace=True)
|
| 209 |
+
self.downsample = downsample
|
| 210 |
+
|
| 211 |
+
if self.with_plugins:
|
| 212 |
+
self.after_conv1_plugin_names = self.make_block_plugins(
|
| 213 |
+
planes, self.after_conv1_plugins)
|
| 214 |
+
self.after_conv2_plugin_names = self.make_block_plugins(
|
| 215 |
+
planes, self.after_conv2_plugins)
|
| 216 |
+
self.after_conv3_plugin_names = self.make_block_plugins(
|
| 217 |
+
planes * self.expansion, self.after_conv3_plugins)
|
| 218 |
+
|
| 219 |
+
def make_block_plugins(self, in_channels, plugins):
|
| 220 |
+
"""make plugins for block.
|
| 221 |
+
|
| 222 |
+
Args:
|
| 223 |
+
in_channels (int): Input channels of plugin.
|
| 224 |
+
plugins (list[dict]): List of plugins cfg to build.
|
| 225 |
+
|
| 226 |
+
Returns:
|
| 227 |
+
list[str]: List of the names of plugin.
|
| 228 |
+
"""
|
| 229 |
+
assert isinstance(plugins, list)
|
| 230 |
+
plugin_names = []
|
| 231 |
+
for plugin in plugins:
|
| 232 |
+
plugin = plugin.copy()
|
| 233 |
+
name, layer = build_plugin_layer(
|
| 234 |
+
plugin,
|
| 235 |
+
in_channels=in_channels,
|
| 236 |
+
postfix=plugin.pop('postfix', ''))
|
| 237 |
+
assert not hasattr(self, name), f'duplicate plugin {name}'
|
| 238 |
+
self.add_module(name, layer)
|
| 239 |
+
plugin_names.append(name)
|
| 240 |
+
return plugin_names
|
| 241 |
+
|
| 242 |
+
def forward_plugin(self, x, plugin_names):
|
| 243 |
+
"""Forward function for plugins."""
|
| 244 |
+
out = x
|
| 245 |
+
for name in plugin_names:
|
| 246 |
+
out = getattr(self, name)(x)
|
| 247 |
+
return out
|
| 248 |
+
|
| 249 |
+
@property
|
| 250 |
+
def norm1(self):
|
| 251 |
+
"""nn.Module: normalization layer after the first convolution layer"""
|
| 252 |
+
return getattr(self, self.norm1_name)
|
| 253 |
+
|
| 254 |
+
@property
|
| 255 |
+
def norm2(self):
|
| 256 |
+
"""nn.Module: normalization layer after the second convolution layer"""
|
| 257 |
+
return getattr(self, self.norm2_name)
|
| 258 |
+
|
| 259 |
+
@property
|
| 260 |
+
def norm3(self):
|
| 261 |
+
"""nn.Module: normalization layer after the third convolution layer"""
|
| 262 |
+
return getattr(self, self.norm3_name)
|
| 263 |
+
|
| 264 |
+
def forward(self, x):
|
| 265 |
+
"""Forward function."""
|
| 266 |
+
|
| 267 |
+
def _inner_forward(x):
|
| 268 |
+
identity = x
|
| 269 |
+
|
| 270 |
+
out = self.conv1(x)
|
| 271 |
+
out = self.norm1(out)
|
| 272 |
+
out = self.relu(out)
|
| 273 |
+
|
| 274 |
+
if self.with_plugins:
|
| 275 |
+
out = self.forward_plugin(out, self.after_conv1_plugin_names)
|
| 276 |
+
|
| 277 |
+
out = self.conv2(out)
|
| 278 |
+
out = self.norm2(out)
|
| 279 |
+
out = self.relu(out)
|
| 280 |
+
|
| 281 |
+
if self.with_plugins:
|
| 282 |
+
out = self.forward_plugin(out, self.after_conv2_plugin_names)
|
| 283 |
+
|
| 284 |
+
out = self.conv3(out)
|
| 285 |
+
out = self.norm3(out)
|
| 286 |
+
|
| 287 |
+
if self.with_plugins:
|
| 288 |
+
out = self.forward_plugin(out, self.after_conv3_plugin_names)
|
| 289 |
+
|
| 290 |
+
if self.downsample is not None:
|
| 291 |
+
identity = self.downsample(x)
|
| 292 |
+
|
| 293 |
+
out += identity
|
| 294 |
+
|
| 295 |
+
return out
|
| 296 |
+
|
| 297 |
+
if self.with_cp and x.requires_grad:
|
| 298 |
+
out = cp.checkpoint(_inner_forward, x)
|
| 299 |
+
else:
|
| 300 |
+
out = _inner_forward(x)
|
| 301 |
+
|
| 302 |
+
out = self.relu(out)
|
| 303 |
+
|
| 304 |
+
return out
|
| 305 |
+
|
| 306 |
+
|
| 307 |
+
@BACKBONES.register_module()
|
| 308 |
+
class ResNet(nn.Module):
|
| 309 |
+
"""ResNet backbone.
|
| 310 |
+
|
| 311 |
+
Args:
|
| 312 |
+
depth (int): Depth of resnet, from {18, 34, 50, 101, 152}.
|
| 313 |
+
in_channels (int): Number of input image channels. Default" 3.
|
| 314 |
+
stem_channels (int): Number of stem channels. Default: 64.
|
| 315 |
+
base_channels (int): Number of base channels of res layer. Default: 64.
|
| 316 |
+
num_stages (int): Resnet stages, normally 4.
|
| 317 |
+
strides (Sequence[int]): Strides of the first block of each stage.
|
| 318 |
+
dilations (Sequence[int]): Dilation of each stage.
|
| 319 |
+
out_indices (Sequence[int]): Output from which stages.
|
| 320 |
+
style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two
|
| 321 |
+
layer is the 3x3 conv layer, otherwise the stride-two layer is
|
| 322 |
+
the first 1x1 conv layer.
|
| 323 |
+
deep_stem (bool): Replace 7x7 conv in input stem with 3 3x3 conv
|
| 324 |
+
avg_down (bool): Use AvgPool instead of stride conv when
|
| 325 |
+
downsampling in the bottleneck.
|
| 326 |
+
frozen_stages (int): Stages to be frozen (stop grad and set eval mode).
|
| 327 |
+
-1 means not freezing any parameters.
|
| 328 |
+
norm_cfg (dict): Dictionary to construct and config norm layer.
|
| 329 |
+
norm_eval (bool): Whether to set norm layers to eval mode, namely,
|
| 330 |
+
freeze running stats (mean and var). Note: Effect on Batch Norm
|
| 331 |
+
and its variants only.
|
| 332 |
+
plugins (list[dict]): List of plugins for stages, each dict contains:
|
| 333 |
+
|
| 334 |
+
- cfg (dict, required): Cfg dict to build plugin.
|
| 335 |
+
|
| 336 |
+
- position (str, required): Position inside block to insert plugin,
|
| 337 |
+
options: 'after_conv1', 'after_conv2', 'after_conv3'.
|
| 338 |
+
|
| 339 |
+
- stages (tuple[bool], optional): Stages to apply plugin, length
|
| 340 |
+
should be same as 'num_stages'
|
| 341 |
+
multi_grid (Sequence[int]|None): Multi grid dilation rates of last
|
| 342 |
+
stage. Default: None
|
| 343 |
+
contract_dilation (bool): Whether contract first dilation of each layer
|
| 344 |
+
Default: False
|
| 345 |
+
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
|
| 346 |
+
memory while slowing down the training speed.
|
| 347 |
+
zero_init_residual (bool): Whether to use zero init for last norm layer
|
| 348 |
+
in resblocks to let them behave as identity.
|
| 349 |
+
|
| 350 |
+
Example:
|
| 351 |
+
>>> from annotator.mmpkg.mmseg.models import ResNet
|
| 352 |
+
>>> import torch
|
| 353 |
+
>>> self = ResNet(depth=18)
|
| 354 |
+
>>> self.eval()
|
| 355 |
+
>>> inputs = torch.rand(1, 3, 32, 32)
|
| 356 |
+
>>> level_outputs = self.forward(inputs)
|
| 357 |
+
>>> for level_out in level_outputs:
|
| 358 |
+
... print(tuple(level_out.shape))
|
| 359 |
+
(1, 64, 8, 8)
|
| 360 |
+
(1, 128, 4, 4)
|
| 361 |
+
(1, 256, 2, 2)
|
| 362 |
+
(1, 512, 1, 1)
|
| 363 |
+
"""
|
| 364 |
+
|
| 365 |
+
arch_settings = {
|
| 366 |
+
18: (BasicBlock, (2, 2, 2, 2)),
|
| 367 |
+
34: (BasicBlock, (3, 4, 6, 3)),
|
| 368 |
+
50: (Bottleneck, (3, 4, 6, 3)),
|
| 369 |
+
101: (Bottleneck, (3, 4, 23, 3)),
|
| 370 |
+
152: (Bottleneck, (3, 8, 36, 3))
|
| 371 |
+
}
|
| 372 |
+
|
| 373 |
+
def __init__(self,
|
| 374 |
+
depth,
|
| 375 |
+
in_channels=3,
|
| 376 |
+
stem_channels=64,
|
| 377 |
+
base_channels=64,
|
| 378 |
+
num_stages=4,
|
| 379 |
+
strides=(1, 2, 2, 2),
|
| 380 |
+
dilations=(1, 1, 1, 1),
|
| 381 |
+
out_indices=(0, 1, 2, 3),
|
| 382 |
+
style='pytorch',
|
| 383 |
+
deep_stem=False,
|
| 384 |
+
avg_down=False,
|
| 385 |
+
frozen_stages=-1,
|
| 386 |
+
conv_cfg=None,
|
| 387 |
+
norm_cfg=dict(type='BN', requires_grad=True),
|
| 388 |
+
norm_eval=False,
|
| 389 |
+
dcn=None,
|
| 390 |
+
stage_with_dcn=(False, False, False, False),
|
| 391 |
+
plugins=None,
|
| 392 |
+
multi_grid=None,
|
| 393 |
+
contract_dilation=False,
|
| 394 |
+
with_cp=False,
|
| 395 |
+
zero_init_residual=True):
|
| 396 |
+
super(ResNet, self).__init__()
|
| 397 |
+
if depth not in self.arch_settings:
|
| 398 |
+
raise KeyError(f'invalid depth {depth} for resnet')
|
| 399 |
+
self.depth = depth
|
| 400 |
+
self.stem_channels = stem_channels
|
| 401 |
+
self.base_channels = base_channels
|
| 402 |
+
self.num_stages = num_stages
|
| 403 |
+
assert num_stages >= 1 and num_stages <= 4
|
| 404 |
+
self.strides = strides
|
| 405 |
+
self.dilations = dilations
|
| 406 |
+
assert len(strides) == len(dilations) == num_stages
|
| 407 |
+
self.out_indices = out_indices
|
| 408 |
+
assert max(out_indices) < num_stages
|
| 409 |
+
self.style = style
|
| 410 |
+
self.deep_stem = deep_stem
|
| 411 |
+
self.avg_down = avg_down
|
| 412 |
+
self.frozen_stages = frozen_stages
|
| 413 |
+
self.conv_cfg = conv_cfg
|
| 414 |
+
self.norm_cfg = norm_cfg
|
| 415 |
+
self.with_cp = with_cp
|
| 416 |
+
self.norm_eval = norm_eval
|
| 417 |
+
self.dcn = dcn
|
| 418 |
+
self.stage_with_dcn = stage_with_dcn
|
| 419 |
+
if dcn is not None:
|
| 420 |
+
assert len(stage_with_dcn) == num_stages
|
| 421 |
+
self.plugins = plugins
|
| 422 |
+
self.multi_grid = multi_grid
|
| 423 |
+
self.contract_dilation = contract_dilation
|
| 424 |
+
self.zero_init_residual = zero_init_residual
|
| 425 |
+
self.block, stage_blocks = self.arch_settings[depth]
|
| 426 |
+
self.stage_blocks = stage_blocks[:num_stages]
|
| 427 |
+
self.inplanes = stem_channels
|
| 428 |
+
|
| 429 |
+
self._make_stem_layer(in_channels, stem_channels)
|
| 430 |
+
|
| 431 |
+
self.res_layers = []
|
| 432 |
+
for i, num_blocks in enumerate(self.stage_blocks):
|
| 433 |
+
stride = strides[i]
|
| 434 |
+
dilation = dilations[i]
|
| 435 |
+
dcn = self.dcn if self.stage_with_dcn[i] else None
|
| 436 |
+
if plugins is not None:
|
| 437 |
+
stage_plugins = self.make_stage_plugins(plugins, i)
|
| 438 |
+
else:
|
| 439 |
+
stage_plugins = None
|
| 440 |
+
# multi grid is applied to last layer only
|
| 441 |
+
stage_multi_grid = multi_grid if i == len(
|
| 442 |
+
self.stage_blocks) - 1 else None
|
| 443 |
+
planes = base_channels * 2**i
|
| 444 |
+
res_layer = self.make_res_layer(
|
| 445 |
+
block=self.block,
|
| 446 |
+
inplanes=self.inplanes,
|
| 447 |
+
planes=planes,
|
| 448 |
+
num_blocks=num_blocks,
|
| 449 |
+
stride=stride,
|
| 450 |
+
dilation=dilation,
|
| 451 |
+
style=self.style,
|
| 452 |
+
avg_down=self.avg_down,
|
| 453 |
+
with_cp=with_cp,
|
| 454 |
+
conv_cfg=conv_cfg,
|
| 455 |
+
norm_cfg=norm_cfg,
|
| 456 |
+
dcn=dcn,
|
| 457 |
+
plugins=stage_plugins,
|
| 458 |
+
multi_grid=stage_multi_grid,
|
| 459 |
+
contract_dilation=contract_dilation)
|
| 460 |
+
self.inplanes = planes * self.block.expansion
|
| 461 |
+
layer_name = f'layer{i+1}'
|
| 462 |
+
self.add_module(layer_name, res_layer)
|
| 463 |
+
self.res_layers.append(layer_name)
|
| 464 |
+
|
| 465 |
+
self._freeze_stages()
|
| 466 |
+
|
| 467 |
+
self.feat_dim = self.block.expansion * base_channels * 2**(
|
| 468 |
+
len(self.stage_blocks) - 1)
|
| 469 |
+
|
| 470 |
+
def make_stage_plugins(self, plugins, stage_idx):
|
| 471 |
+
"""make plugins for ResNet 'stage_idx'th stage .
|
| 472 |
+
|
| 473 |
+
Currently we support to insert 'context_block',
|
| 474 |
+
'empirical_attention_block', 'nonlocal_block' into the backbone like
|
| 475 |
+
ResNet/ResNeXt. They could be inserted after conv1/conv2/conv3 of
|
| 476 |
+
Bottleneck.
|
| 477 |
+
|
| 478 |
+
An example of plugins format could be :
|
| 479 |
+
>>> plugins=[
|
| 480 |
+
... dict(cfg=dict(type='xxx', arg1='xxx'),
|
| 481 |
+
... stages=(False, True, True, True),
|
| 482 |
+
... position='after_conv2'),
|
| 483 |
+
... dict(cfg=dict(type='yyy'),
|
| 484 |
+
... stages=(True, True, True, True),
|
| 485 |
+
... position='after_conv3'),
|
| 486 |
+
... dict(cfg=dict(type='zzz', postfix='1'),
|
| 487 |
+
... stages=(True, True, True, True),
|
| 488 |
+
... position='after_conv3'),
|
| 489 |
+
... dict(cfg=dict(type='zzz', postfix='2'),
|
| 490 |
+
... stages=(True, True, True, True),
|
| 491 |
+
... position='after_conv3')
|
| 492 |
+
... ]
|
| 493 |
+
>>> self = ResNet(depth=18)
|
| 494 |
+
>>> stage_plugins = self.make_stage_plugins(plugins, 0)
|
| 495 |
+
>>> assert len(stage_plugins) == 3
|
| 496 |
+
|
| 497 |
+
Suppose 'stage_idx=0', the structure of blocks in the stage would be:
|
| 498 |
+
conv1-> conv2->conv3->yyy->zzz1->zzz2
|
| 499 |
+
Suppose 'stage_idx=1', the structure of blocks in the stage would be:
|
| 500 |
+
conv1-> conv2->xxx->conv3->yyy->zzz1->zzz2
|
| 501 |
+
|
| 502 |
+
If stages is missing, the plugin would be applied to all stages.
|
| 503 |
+
|
| 504 |
+
Args:
|
| 505 |
+
plugins (list[dict]): List of plugins cfg to build. The postfix is
|
| 506 |
+
required if multiple same type plugins are inserted.
|
| 507 |
+
stage_idx (int): Index of stage to build
|
| 508 |
+
|
| 509 |
+
Returns:
|
| 510 |
+
list[dict]: Plugins for current stage
|
| 511 |
+
"""
|
| 512 |
+
stage_plugins = []
|
| 513 |
+
for plugin in plugins:
|
| 514 |
+
plugin = plugin.copy()
|
| 515 |
+
stages = plugin.pop('stages', None)
|
| 516 |
+
assert stages is None or len(stages) == self.num_stages
|
| 517 |
+
# whether to insert plugin into current stage
|
| 518 |
+
if stages is None or stages[stage_idx]:
|
| 519 |
+
stage_plugins.append(plugin)
|
| 520 |
+
|
| 521 |
+
return stage_plugins
|
| 522 |
+
|
| 523 |
+
def make_res_layer(self, **kwargs):
|
| 524 |
+
"""Pack all blocks in a stage into a ``ResLayer``."""
|
| 525 |
+
return ResLayer(**kwargs)
|
| 526 |
+
|
| 527 |
+
@property
|
| 528 |
+
def norm1(self):
|
| 529 |
+
"""nn.Module: the normalization layer named "norm1" """
|
| 530 |
+
return getattr(self, self.norm1_name)
|
| 531 |
+
|
| 532 |
+
def _make_stem_layer(self, in_channels, stem_channels):
|
| 533 |
+
"""Make stem layer for ResNet."""
|
| 534 |
+
if self.deep_stem:
|
| 535 |
+
self.stem = nn.Sequential(
|
| 536 |
+
build_conv_layer(
|
| 537 |
+
self.conv_cfg,
|
| 538 |
+
in_channels,
|
| 539 |
+
stem_channels // 2,
|
| 540 |
+
kernel_size=3,
|
| 541 |
+
stride=2,
|
| 542 |
+
padding=1,
|
| 543 |
+
bias=False),
|
| 544 |
+
build_norm_layer(self.norm_cfg, stem_channels // 2)[1],
|
| 545 |
+
nn.ReLU(inplace=True),
|
| 546 |
+
build_conv_layer(
|
| 547 |
+
self.conv_cfg,
|
| 548 |
+
stem_channels // 2,
|
| 549 |
+
stem_channels // 2,
|
| 550 |
+
kernel_size=3,
|
| 551 |
+
stride=1,
|
| 552 |
+
padding=1,
|
| 553 |
+
bias=False),
|
| 554 |
+
build_norm_layer(self.norm_cfg, stem_channels // 2)[1],
|
| 555 |
+
nn.ReLU(inplace=True),
|
| 556 |
+
build_conv_layer(
|
| 557 |
+
self.conv_cfg,
|
| 558 |
+
stem_channels // 2,
|
| 559 |
+
stem_channels,
|
| 560 |
+
kernel_size=3,
|
| 561 |
+
stride=1,
|
| 562 |
+
padding=1,
|
| 563 |
+
bias=False),
|
| 564 |
+
build_norm_layer(self.norm_cfg, stem_channels)[1],
|
| 565 |
+
nn.ReLU(inplace=True))
|
| 566 |
+
else:
|
| 567 |
+
self.conv1 = build_conv_layer(
|
| 568 |
+
self.conv_cfg,
|
| 569 |
+
in_channels,
|
| 570 |
+
stem_channels,
|
| 571 |
+
kernel_size=7,
|
| 572 |
+
stride=2,
|
| 573 |
+
padding=3,
|
| 574 |
+
bias=False)
|
| 575 |
+
self.norm1_name, norm1 = build_norm_layer(
|
| 576 |
+
self.norm_cfg, stem_channels, postfix=1)
|
| 577 |
+
self.add_module(self.norm1_name, norm1)
|
| 578 |
+
self.relu = nn.ReLU(inplace=True)
|
| 579 |
+
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
|
| 580 |
+
|
| 581 |
+
def _freeze_stages(self):
|
| 582 |
+
"""Freeze stages param and norm stats."""
|
| 583 |
+
if self.frozen_stages >= 0:
|
| 584 |
+
if self.deep_stem:
|
| 585 |
+
self.stem.eval()
|
| 586 |
+
for param in self.stem.parameters():
|
| 587 |
+
param.requires_grad = False
|
| 588 |
+
else:
|
| 589 |
+
self.norm1.eval()
|
| 590 |
+
for m in [self.conv1, self.norm1]:
|
| 591 |
+
for param in m.parameters():
|
| 592 |
+
param.requires_grad = False
|
| 593 |
+
|
| 594 |
+
for i in range(1, self.frozen_stages + 1):
|
| 595 |
+
m = getattr(self, f'layer{i}')
|
| 596 |
+
m.eval()
|
| 597 |
+
for param in m.parameters():
|
| 598 |
+
param.requires_grad = False
|
| 599 |
+
|
| 600 |
+
def init_weights(self, pretrained=None):
|
| 601 |
+
"""Initialize the weights in backbone.
|
| 602 |
+
|
| 603 |
+
Args:
|
| 604 |
+
pretrained (str, optional): Path to pre-trained weights.
|
| 605 |
+
Defaults to None.
|
| 606 |
+
"""
|
| 607 |
+
if isinstance(pretrained, str):
|
| 608 |
+
logger = get_root_logger()
|
| 609 |
+
load_checkpoint(self, pretrained, strict=False, logger=logger)
|
| 610 |
+
elif pretrained is None:
|
| 611 |
+
for m in self.modules():
|
| 612 |
+
if isinstance(m, nn.Conv2d):
|
| 613 |
+
kaiming_init(m)
|
| 614 |
+
elif isinstance(m, (_BatchNorm, nn.GroupNorm)):
|
| 615 |
+
constant_init(m, 1)
|
| 616 |
+
|
| 617 |
+
if self.dcn is not None:
|
| 618 |
+
for m in self.modules():
|
| 619 |
+
if isinstance(m, Bottleneck) and hasattr(
|
| 620 |
+
m, 'conv2_offset'):
|
| 621 |
+
constant_init(m.conv2_offset, 0)
|
| 622 |
+
|
| 623 |
+
if self.zero_init_residual:
|
| 624 |
+
for m in self.modules():
|
| 625 |
+
if isinstance(m, Bottleneck):
|
| 626 |
+
constant_init(m.norm3, 0)
|
| 627 |
+
elif isinstance(m, BasicBlock):
|
| 628 |
+
constant_init(m.norm2, 0)
|
| 629 |
+
else:
|
| 630 |
+
raise TypeError('pretrained must be a str or None')
|
| 631 |
+
|
| 632 |
+
def forward(self, x):
|
| 633 |
+
"""Forward function."""
|
| 634 |
+
if self.deep_stem:
|
| 635 |
+
x = self.stem(x)
|
| 636 |
+
else:
|
| 637 |
+
x = self.conv1(x)
|
| 638 |
+
x = self.norm1(x)
|
| 639 |
+
x = self.relu(x)
|
| 640 |
+
x = self.maxpool(x)
|
| 641 |
+
outs = []
|
| 642 |
+
for i, layer_name in enumerate(self.res_layers):
|
| 643 |
+
res_layer = getattr(self, layer_name)
|
| 644 |
+
x = res_layer(x)
|
| 645 |
+
if i in self.out_indices:
|
| 646 |
+
outs.append(x)
|
| 647 |
+
return tuple(outs)
|
| 648 |
+
|
| 649 |
+
def train(self, mode=True):
|
| 650 |
+
"""Convert the model into training mode while keep normalization layer
|
| 651 |
+
freezed."""
|
| 652 |
+
super(ResNet, self).train(mode)
|
| 653 |
+
self._freeze_stages()
|
| 654 |
+
if mode and self.norm_eval:
|
| 655 |
+
for m in self.modules():
|
| 656 |
+
# trick: eval have effect on BatchNorm only
|
| 657 |
+
if isinstance(m, _BatchNorm):
|
| 658 |
+
m.eval()
|
| 659 |
+
|
| 660 |
+
|
| 661 |
+
@BACKBONES.register_module()
|
| 662 |
+
class ResNetV1c(ResNet):
|
| 663 |
+
"""ResNetV1c variant described in [1]_.
|
| 664 |
+
|
| 665 |
+
Compared with default ResNet(ResNetV1b), ResNetV1c replaces the 7x7 conv
|
| 666 |
+
in the input stem with three 3x3 convs.
|
| 667 |
+
|
| 668 |
+
References:
|
| 669 |
+
.. [1] https://arxiv.org/pdf/1812.01187.pdf
|
| 670 |
+
"""
|
| 671 |
+
|
| 672 |
+
def __init__(self, **kwargs):
|
| 673 |
+
super(ResNetV1c, self).__init__(
|
| 674 |
+
deep_stem=True, avg_down=False, **kwargs)
|
| 675 |
+
|
| 676 |
+
|
| 677 |
+
@BACKBONES.register_module()
|
| 678 |
+
class ResNetV1d(ResNet):
|
| 679 |
+
"""ResNetV1d variant described in [1]_.
|
| 680 |
+
|
| 681 |
+
Compared with default ResNet(ResNetV1b), ResNetV1d replaces the 7x7 conv in
|
| 682 |
+
the input stem with three 3x3 convs. And in the downsampling block, a 2x2
|
| 683 |
+
avg_pool with stride 2 is added before conv, whose stride is changed to 1.
|
| 684 |
+
"""
|
| 685 |
+
|
| 686 |
+
def __init__(self, **kwargs):
|
| 687 |
+
super(ResNetV1d, self).__init__(
|
| 688 |
+
deep_stem=True, avg_down=True, **kwargs)
|
RAVE-main/annotator/mmpkg/mmseg/models/backbones/resnext.py
ADDED
|
@@ -0,0 +1,145 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import math
|
| 2 |
+
|
| 3 |
+
from annotator.mmpkg.mmcv.cnn import build_conv_layer, build_norm_layer
|
| 4 |
+
|
| 5 |
+
from ..builder import BACKBONES
|
| 6 |
+
from ..utils import ResLayer
|
| 7 |
+
from .resnet import Bottleneck as _Bottleneck
|
| 8 |
+
from .resnet import ResNet
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
class Bottleneck(_Bottleneck):
|
| 12 |
+
"""Bottleneck block for ResNeXt.
|
| 13 |
+
|
| 14 |
+
If style is "pytorch", the stride-two layer is the 3x3 conv layer, if it is
|
| 15 |
+
"caffe", the stride-two layer is the first 1x1 conv layer.
|
| 16 |
+
"""
|
| 17 |
+
|
| 18 |
+
def __init__(self,
|
| 19 |
+
inplanes,
|
| 20 |
+
planes,
|
| 21 |
+
groups=1,
|
| 22 |
+
base_width=4,
|
| 23 |
+
base_channels=64,
|
| 24 |
+
**kwargs):
|
| 25 |
+
super(Bottleneck, self).__init__(inplanes, planes, **kwargs)
|
| 26 |
+
|
| 27 |
+
if groups == 1:
|
| 28 |
+
width = self.planes
|
| 29 |
+
else:
|
| 30 |
+
width = math.floor(self.planes *
|
| 31 |
+
(base_width / base_channels)) * groups
|
| 32 |
+
|
| 33 |
+
self.norm1_name, norm1 = build_norm_layer(
|
| 34 |
+
self.norm_cfg, width, postfix=1)
|
| 35 |
+
self.norm2_name, norm2 = build_norm_layer(
|
| 36 |
+
self.norm_cfg, width, postfix=2)
|
| 37 |
+
self.norm3_name, norm3 = build_norm_layer(
|
| 38 |
+
self.norm_cfg, self.planes * self.expansion, postfix=3)
|
| 39 |
+
|
| 40 |
+
self.conv1 = build_conv_layer(
|
| 41 |
+
self.conv_cfg,
|
| 42 |
+
self.inplanes,
|
| 43 |
+
width,
|
| 44 |
+
kernel_size=1,
|
| 45 |
+
stride=self.conv1_stride,
|
| 46 |
+
bias=False)
|
| 47 |
+
self.add_module(self.norm1_name, norm1)
|
| 48 |
+
fallback_on_stride = False
|
| 49 |
+
self.with_modulated_dcn = False
|
| 50 |
+
if self.with_dcn:
|
| 51 |
+
fallback_on_stride = self.dcn.pop('fallback_on_stride', False)
|
| 52 |
+
if not self.with_dcn or fallback_on_stride:
|
| 53 |
+
self.conv2 = build_conv_layer(
|
| 54 |
+
self.conv_cfg,
|
| 55 |
+
width,
|
| 56 |
+
width,
|
| 57 |
+
kernel_size=3,
|
| 58 |
+
stride=self.conv2_stride,
|
| 59 |
+
padding=self.dilation,
|
| 60 |
+
dilation=self.dilation,
|
| 61 |
+
groups=groups,
|
| 62 |
+
bias=False)
|
| 63 |
+
else:
|
| 64 |
+
assert self.conv_cfg is None, 'conv_cfg must be None for DCN'
|
| 65 |
+
self.conv2 = build_conv_layer(
|
| 66 |
+
self.dcn,
|
| 67 |
+
width,
|
| 68 |
+
width,
|
| 69 |
+
kernel_size=3,
|
| 70 |
+
stride=self.conv2_stride,
|
| 71 |
+
padding=self.dilation,
|
| 72 |
+
dilation=self.dilation,
|
| 73 |
+
groups=groups,
|
| 74 |
+
bias=False)
|
| 75 |
+
|
| 76 |
+
self.add_module(self.norm2_name, norm2)
|
| 77 |
+
self.conv3 = build_conv_layer(
|
| 78 |
+
self.conv_cfg,
|
| 79 |
+
width,
|
| 80 |
+
self.planes * self.expansion,
|
| 81 |
+
kernel_size=1,
|
| 82 |
+
bias=False)
|
| 83 |
+
self.add_module(self.norm3_name, norm3)
|
| 84 |
+
|
| 85 |
+
|
| 86 |
+
@BACKBONES.register_module()
|
| 87 |
+
class ResNeXt(ResNet):
|
| 88 |
+
"""ResNeXt backbone.
|
| 89 |
+
|
| 90 |
+
Args:
|
| 91 |
+
depth (int): Depth of resnet, from {18, 34, 50, 101, 152}.
|
| 92 |
+
in_channels (int): Number of input image channels. Normally 3.
|
| 93 |
+
num_stages (int): Resnet stages, normally 4.
|
| 94 |
+
groups (int): Group of resnext.
|
| 95 |
+
base_width (int): Base width of resnext.
|
| 96 |
+
strides (Sequence[int]): Strides of the first block of each stage.
|
| 97 |
+
dilations (Sequence[int]): Dilation of each stage.
|
| 98 |
+
out_indices (Sequence[int]): Output from which stages.
|
| 99 |
+
style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two
|
| 100 |
+
layer is the 3x3 conv layer, otherwise the stride-two layer is
|
| 101 |
+
the first 1x1 conv layer.
|
| 102 |
+
frozen_stages (int): Stages to be frozen (all param fixed). -1 means
|
| 103 |
+
not freezing any parameters.
|
| 104 |
+
norm_cfg (dict): dictionary to construct and config norm layer.
|
| 105 |
+
norm_eval (bool): Whether to set norm layers to eval mode, namely,
|
| 106 |
+
freeze running stats (mean and var). Note: Effect on Batch Norm
|
| 107 |
+
and its variants only.
|
| 108 |
+
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
|
| 109 |
+
memory while slowing down the training speed.
|
| 110 |
+
zero_init_residual (bool): whether to use zero init for last norm layer
|
| 111 |
+
in resblocks to let them behave as identity.
|
| 112 |
+
|
| 113 |
+
Example:
|
| 114 |
+
>>> from annotator.mmpkg.mmseg.models import ResNeXt
|
| 115 |
+
>>> import torch
|
| 116 |
+
>>> self = ResNeXt(depth=50)
|
| 117 |
+
>>> self.eval()
|
| 118 |
+
>>> inputs = torch.rand(1, 3, 32, 32)
|
| 119 |
+
>>> level_outputs = self.forward(inputs)
|
| 120 |
+
>>> for level_out in level_outputs:
|
| 121 |
+
... print(tuple(level_out.shape))
|
| 122 |
+
(1, 256, 8, 8)
|
| 123 |
+
(1, 512, 4, 4)
|
| 124 |
+
(1, 1024, 2, 2)
|
| 125 |
+
(1, 2048, 1, 1)
|
| 126 |
+
"""
|
| 127 |
+
|
| 128 |
+
arch_settings = {
|
| 129 |
+
50: (Bottleneck, (3, 4, 6, 3)),
|
| 130 |
+
101: (Bottleneck, (3, 4, 23, 3)),
|
| 131 |
+
152: (Bottleneck, (3, 8, 36, 3))
|
| 132 |
+
}
|
| 133 |
+
|
| 134 |
+
def __init__(self, groups=1, base_width=4, **kwargs):
|
| 135 |
+
self.groups = groups
|
| 136 |
+
self.base_width = base_width
|
| 137 |
+
super(ResNeXt, self).__init__(**kwargs)
|
| 138 |
+
|
| 139 |
+
def make_res_layer(self, **kwargs):
|
| 140 |
+
"""Pack all blocks in a stage into a ``ResLayer``"""
|
| 141 |
+
return ResLayer(
|
| 142 |
+
groups=self.groups,
|
| 143 |
+
base_width=self.base_width,
|
| 144 |
+
base_channels=self.base_channels,
|
| 145 |
+
**kwargs)
|
RAVE-main/annotator/mmpkg/mmseg/models/builder.py
ADDED
|
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import warnings
|
| 2 |
+
|
| 3 |
+
from annotator.mmpkg.mmcv.cnn import MODELS as MMCV_MODELS
|
| 4 |
+
from annotator.mmpkg.mmcv.utils import Registry
|
| 5 |
+
|
| 6 |
+
MODELS = Registry('models', parent=MMCV_MODELS)
|
| 7 |
+
|
| 8 |
+
BACKBONES = MODELS
|
| 9 |
+
NECKS = MODELS
|
| 10 |
+
HEADS = MODELS
|
| 11 |
+
LOSSES = MODELS
|
| 12 |
+
SEGMENTORS = MODELS
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
def build_backbone(cfg):
|
| 16 |
+
"""Build backbone."""
|
| 17 |
+
return BACKBONES.build(cfg)
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
def build_neck(cfg):
|
| 21 |
+
"""Build neck."""
|
| 22 |
+
return NECKS.build(cfg)
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
def build_head(cfg):
|
| 26 |
+
"""Build head."""
|
| 27 |
+
return HEADS.build(cfg)
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
def build_loss(cfg):
|
| 31 |
+
"""Build loss."""
|
| 32 |
+
return LOSSES.build(cfg)
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
def build_segmentor(cfg, train_cfg=None, test_cfg=None):
|
| 36 |
+
"""Build segmentor."""
|
| 37 |
+
if train_cfg is not None or test_cfg is not None:
|
| 38 |
+
warnings.warn(
|
| 39 |
+
'train_cfg and test_cfg is deprecated, '
|
| 40 |
+
'please specify them in model', UserWarning)
|
| 41 |
+
assert cfg.get('train_cfg') is None or train_cfg is None, \
|
| 42 |
+
'train_cfg specified in both outer field and model field '
|
| 43 |
+
assert cfg.get('test_cfg') is None or test_cfg is None, \
|
| 44 |
+
'test_cfg specified in both outer field and model field '
|
| 45 |
+
return SEGMENTORS.build(
|
| 46 |
+
cfg, default_args=dict(train_cfg=train_cfg, test_cfg=test_cfg))
|
RAVE-main/annotator/mmpkg/mmseg/models/decode_heads/__init__.py
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .ann_head import ANNHead
|
| 2 |
+
from .apc_head import APCHead
|
| 3 |
+
from .aspp_head import ASPPHead
|
| 4 |
+
from .cc_head import CCHead
|
| 5 |
+
from .da_head import DAHead
|
| 6 |
+
from .dm_head import DMHead
|
| 7 |
+
from .dnl_head import DNLHead
|
| 8 |
+
from .ema_head import EMAHead
|
| 9 |
+
from .enc_head import EncHead
|
| 10 |
+
from .fcn_head import FCNHead
|
| 11 |
+
from .fpn_head import FPNHead
|
| 12 |
+
from .gc_head import GCHead
|
| 13 |
+
from .lraspp_head import LRASPPHead
|
| 14 |
+
from .nl_head import NLHead
|
| 15 |
+
from .ocr_head import OCRHead
|
| 16 |
+
# from .point_head import PointHead
|
| 17 |
+
from .psa_head import PSAHead
|
| 18 |
+
from .psp_head import PSPHead
|
| 19 |
+
from .sep_aspp_head import DepthwiseSeparableASPPHead
|
| 20 |
+
from .sep_fcn_head import DepthwiseSeparableFCNHead
|
| 21 |
+
from .uper_head import UPerHead
|
| 22 |
+
|
| 23 |
+
__all__ = [
|
| 24 |
+
'FCNHead', 'PSPHead', 'ASPPHead', 'PSAHead', 'NLHead', 'GCHead', 'CCHead',
|
| 25 |
+
'UPerHead', 'DepthwiseSeparableASPPHead', 'ANNHead', 'DAHead', 'OCRHead',
|
| 26 |
+
'EncHead', 'DepthwiseSeparableFCNHead', 'FPNHead', 'EMAHead', 'DNLHead',
|
| 27 |
+
'APCHead', 'DMHead', 'LRASPPHead'
|
| 28 |
+
]
|
RAVE-main/annotator/mmpkg/mmseg/models/decode_heads/ann_head.py
ADDED
|
@@ -0,0 +1,245 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import torch.nn as nn
|
| 3 |
+
from annotator.mmpkg.mmcv.cnn import ConvModule
|
| 4 |
+
|
| 5 |
+
from ..builder import HEADS
|
| 6 |
+
from ..utils import SelfAttentionBlock as _SelfAttentionBlock
|
| 7 |
+
from .decode_head import BaseDecodeHead
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
class PPMConcat(nn.ModuleList):
|
| 11 |
+
"""Pyramid Pooling Module that only concat the features of each layer.
|
| 12 |
+
|
| 13 |
+
Args:
|
| 14 |
+
pool_scales (tuple[int]): Pooling scales used in Pooling Pyramid
|
| 15 |
+
Module.
|
| 16 |
+
"""
|
| 17 |
+
|
| 18 |
+
def __init__(self, pool_scales=(1, 3, 6, 8)):
|
| 19 |
+
super(PPMConcat, self).__init__(
|
| 20 |
+
[nn.AdaptiveAvgPool2d(pool_scale) for pool_scale in pool_scales])
|
| 21 |
+
|
| 22 |
+
def forward(self, feats):
|
| 23 |
+
"""Forward function."""
|
| 24 |
+
ppm_outs = []
|
| 25 |
+
for ppm in self:
|
| 26 |
+
ppm_out = ppm(feats)
|
| 27 |
+
ppm_outs.append(ppm_out.view(*feats.shape[:2], -1))
|
| 28 |
+
concat_outs = torch.cat(ppm_outs, dim=2)
|
| 29 |
+
return concat_outs
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
class SelfAttentionBlock(_SelfAttentionBlock):
|
| 33 |
+
"""Make a ANN used SelfAttentionBlock.
|
| 34 |
+
|
| 35 |
+
Args:
|
| 36 |
+
low_in_channels (int): Input channels of lower level feature,
|
| 37 |
+
which is the key feature for self-attention.
|
| 38 |
+
high_in_channels (int): Input channels of higher level feature,
|
| 39 |
+
which is the query feature for self-attention.
|
| 40 |
+
channels (int): Output channels of key/query transform.
|
| 41 |
+
out_channels (int): Output channels.
|
| 42 |
+
share_key_query (bool): Whether share projection weight between key
|
| 43 |
+
and query projection.
|
| 44 |
+
query_scale (int): The scale of query feature map.
|
| 45 |
+
key_pool_scales (tuple[int]): Pooling scales used in Pooling Pyramid
|
| 46 |
+
Module of key feature.
|
| 47 |
+
conv_cfg (dict|None): Config of conv layers.
|
| 48 |
+
norm_cfg (dict|None): Config of norm layers.
|
| 49 |
+
act_cfg (dict|None): Config of activation layers.
|
| 50 |
+
"""
|
| 51 |
+
|
| 52 |
+
def __init__(self, low_in_channels, high_in_channels, channels,
|
| 53 |
+
out_channels, share_key_query, query_scale, key_pool_scales,
|
| 54 |
+
conv_cfg, norm_cfg, act_cfg):
|
| 55 |
+
key_psp = PPMConcat(key_pool_scales)
|
| 56 |
+
if query_scale > 1:
|
| 57 |
+
query_downsample = nn.MaxPool2d(kernel_size=query_scale)
|
| 58 |
+
else:
|
| 59 |
+
query_downsample = None
|
| 60 |
+
super(SelfAttentionBlock, self).__init__(
|
| 61 |
+
key_in_channels=low_in_channels,
|
| 62 |
+
query_in_channels=high_in_channels,
|
| 63 |
+
channels=channels,
|
| 64 |
+
out_channels=out_channels,
|
| 65 |
+
share_key_query=share_key_query,
|
| 66 |
+
query_downsample=query_downsample,
|
| 67 |
+
key_downsample=key_psp,
|
| 68 |
+
key_query_num_convs=1,
|
| 69 |
+
key_query_norm=True,
|
| 70 |
+
value_out_num_convs=1,
|
| 71 |
+
value_out_norm=False,
|
| 72 |
+
matmul_norm=True,
|
| 73 |
+
with_out=True,
|
| 74 |
+
conv_cfg=conv_cfg,
|
| 75 |
+
norm_cfg=norm_cfg,
|
| 76 |
+
act_cfg=act_cfg)
|
| 77 |
+
|
| 78 |
+
|
| 79 |
+
class AFNB(nn.Module):
|
| 80 |
+
"""Asymmetric Fusion Non-local Block(AFNB)
|
| 81 |
+
|
| 82 |
+
Args:
|
| 83 |
+
low_in_channels (int): Input channels of lower level feature,
|
| 84 |
+
which is the key feature for self-attention.
|
| 85 |
+
high_in_channels (int): Input channels of higher level feature,
|
| 86 |
+
which is the query feature for self-attention.
|
| 87 |
+
channels (int): Output channels of key/query transform.
|
| 88 |
+
out_channels (int): Output channels.
|
| 89 |
+
and query projection.
|
| 90 |
+
query_scales (tuple[int]): The scales of query feature map.
|
| 91 |
+
Default: (1,)
|
| 92 |
+
key_pool_scales (tuple[int]): Pooling scales used in Pooling Pyramid
|
| 93 |
+
Module of key feature.
|
| 94 |
+
conv_cfg (dict|None): Config of conv layers.
|
| 95 |
+
norm_cfg (dict|None): Config of norm layers.
|
| 96 |
+
act_cfg (dict|None): Config of activation layers.
|
| 97 |
+
"""
|
| 98 |
+
|
| 99 |
+
def __init__(self, low_in_channels, high_in_channels, channels,
|
| 100 |
+
out_channels, query_scales, key_pool_scales, conv_cfg,
|
| 101 |
+
norm_cfg, act_cfg):
|
| 102 |
+
super(AFNB, self).__init__()
|
| 103 |
+
self.stages = nn.ModuleList()
|
| 104 |
+
for query_scale in query_scales:
|
| 105 |
+
self.stages.append(
|
| 106 |
+
SelfAttentionBlock(
|
| 107 |
+
low_in_channels=low_in_channels,
|
| 108 |
+
high_in_channels=high_in_channels,
|
| 109 |
+
channels=channels,
|
| 110 |
+
out_channels=out_channels,
|
| 111 |
+
share_key_query=False,
|
| 112 |
+
query_scale=query_scale,
|
| 113 |
+
key_pool_scales=key_pool_scales,
|
| 114 |
+
conv_cfg=conv_cfg,
|
| 115 |
+
norm_cfg=norm_cfg,
|
| 116 |
+
act_cfg=act_cfg))
|
| 117 |
+
self.bottleneck = ConvModule(
|
| 118 |
+
out_channels + high_in_channels,
|
| 119 |
+
out_channels,
|
| 120 |
+
1,
|
| 121 |
+
conv_cfg=conv_cfg,
|
| 122 |
+
norm_cfg=norm_cfg,
|
| 123 |
+
act_cfg=None)
|
| 124 |
+
|
| 125 |
+
def forward(self, low_feats, high_feats):
|
| 126 |
+
"""Forward function."""
|
| 127 |
+
priors = [stage(high_feats, low_feats) for stage in self.stages]
|
| 128 |
+
context = torch.stack(priors, dim=0).sum(dim=0)
|
| 129 |
+
output = self.bottleneck(torch.cat([context, high_feats], 1))
|
| 130 |
+
return output
|
| 131 |
+
|
| 132 |
+
|
| 133 |
+
class APNB(nn.Module):
|
| 134 |
+
"""Asymmetric Pyramid Non-local Block (APNB)
|
| 135 |
+
|
| 136 |
+
Args:
|
| 137 |
+
in_channels (int): Input channels of key/query feature,
|
| 138 |
+
which is the key feature for self-attention.
|
| 139 |
+
channels (int): Output channels of key/query transform.
|
| 140 |
+
out_channels (int): Output channels.
|
| 141 |
+
query_scales (tuple[int]): The scales of query feature map.
|
| 142 |
+
Default: (1,)
|
| 143 |
+
key_pool_scales (tuple[int]): Pooling scales used in Pooling Pyramid
|
| 144 |
+
Module of key feature.
|
| 145 |
+
conv_cfg (dict|None): Config of conv layers.
|
| 146 |
+
norm_cfg (dict|None): Config of norm layers.
|
| 147 |
+
act_cfg (dict|None): Config of activation layers.
|
| 148 |
+
"""
|
| 149 |
+
|
| 150 |
+
def __init__(self, in_channels, channels, out_channels, query_scales,
|
| 151 |
+
key_pool_scales, conv_cfg, norm_cfg, act_cfg):
|
| 152 |
+
super(APNB, self).__init__()
|
| 153 |
+
self.stages = nn.ModuleList()
|
| 154 |
+
for query_scale in query_scales:
|
| 155 |
+
self.stages.append(
|
| 156 |
+
SelfAttentionBlock(
|
| 157 |
+
low_in_channels=in_channels,
|
| 158 |
+
high_in_channels=in_channels,
|
| 159 |
+
channels=channels,
|
| 160 |
+
out_channels=out_channels,
|
| 161 |
+
share_key_query=True,
|
| 162 |
+
query_scale=query_scale,
|
| 163 |
+
key_pool_scales=key_pool_scales,
|
| 164 |
+
conv_cfg=conv_cfg,
|
| 165 |
+
norm_cfg=norm_cfg,
|
| 166 |
+
act_cfg=act_cfg))
|
| 167 |
+
self.bottleneck = ConvModule(
|
| 168 |
+
2 * in_channels,
|
| 169 |
+
out_channels,
|
| 170 |
+
1,
|
| 171 |
+
conv_cfg=conv_cfg,
|
| 172 |
+
norm_cfg=norm_cfg,
|
| 173 |
+
act_cfg=act_cfg)
|
| 174 |
+
|
| 175 |
+
def forward(self, feats):
|
| 176 |
+
"""Forward function."""
|
| 177 |
+
priors = [stage(feats, feats) for stage in self.stages]
|
| 178 |
+
context = torch.stack(priors, dim=0).sum(dim=0)
|
| 179 |
+
output = self.bottleneck(torch.cat([context, feats], 1))
|
| 180 |
+
return output
|
| 181 |
+
|
| 182 |
+
|
| 183 |
+
@HEADS.register_module()
|
| 184 |
+
class ANNHead(BaseDecodeHead):
|
| 185 |
+
"""Asymmetric Non-local Neural Networks for Semantic Segmentation.
|
| 186 |
+
|
| 187 |
+
This head is the implementation of `ANNNet
|
| 188 |
+
<https://arxiv.org/abs/1908.07678>`_.
|
| 189 |
+
|
| 190 |
+
Args:
|
| 191 |
+
project_channels (int): Projection channels for Nonlocal.
|
| 192 |
+
query_scales (tuple[int]): The scales of query feature map.
|
| 193 |
+
Default: (1,)
|
| 194 |
+
key_pool_scales (tuple[int]): The pooling scales of key feature map.
|
| 195 |
+
Default: (1, 3, 6, 8).
|
| 196 |
+
"""
|
| 197 |
+
|
| 198 |
+
def __init__(self,
|
| 199 |
+
project_channels,
|
| 200 |
+
query_scales=(1, ),
|
| 201 |
+
key_pool_scales=(1, 3, 6, 8),
|
| 202 |
+
**kwargs):
|
| 203 |
+
super(ANNHead, self).__init__(
|
| 204 |
+
input_transform='multiple_select', **kwargs)
|
| 205 |
+
assert len(self.in_channels) == 2
|
| 206 |
+
low_in_channels, high_in_channels = self.in_channels
|
| 207 |
+
self.project_channels = project_channels
|
| 208 |
+
self.fusion = AFNB(
|
| 209 |
+
low_in_channels=low_in_channels,
|
| 210 |
+
high_in_channels=high_in_channels,
|
| 211 |
+
out_channels=high_in_channels,
|
| 212 |
+
channels=project_channels,
|
| 213 |
+
query_scales=query_scales,
|
| 214 |
+
key_pool_scales=key_pool_scales,
|
| 215 |
+
conv_cfg=self.conv_cfg,
|
| 216 |
+
norm_cfg=self.norm_cfg,
|
| 217 |
+
act_cfg=self.act_cfg)
|
| 218 |
+
self.bottleneck = ConvModule(
|
| 219 |
+
high_in_channels,
|
| 220 |
+
self.channels,
|
| 221 |
+
3,
|
| 222 |
+
padding=1,
|
| 223 |
+
conv_cfg=self.conv_cfg,
|
| 224 |
+
norm_cfg=self.norm_cfg,
|
| 225 |
+
act_cfg=self.act_cfg)
|
| 226 |
+
self.context = APNB(
|
| 227 |
+
in_channels=self.channels,
|
| 228 |
+
out_channels=self.channels,
|
| 229 |
+
channels=project_channels,
|
| 230 |
+
query_scales=query_scales,
|
| 231 |
+
key_pool_scales=key_pool_scales,
|
| 232 |
+
conv_cfg=self.conv_cfg,
|
| 233 |
+
norm_cfg=self.norm_cfg,
|
| 234 |
+
act_cfg=self.act_cfg)
|
| 235 |
+
|
| 236 |
+
def forward(self, inputs):
|
| 237 |
+
"""Forward function."""
|
| 238 |
+
low_feats, high_feats = self._transform_inputs(inputs)
|
| 239 |
+
output = self.fusion(low_feats, high_feats)
|
| 240 |
+
output = self.dropout(output)
|
| 241 |
+
output = self.bottleneck(output)
|
| 242 |
+
output = self.context(output)
|
| 243 |
+
output = self.cls_seg(output)
|
| 244 |
+
|
| 245 |
+
return output
|
RAVE-main/annotator/mmpkg/mmseg/models/decode_heads/apc_head.py
ADDED
|
@@ -0,0 +1,158 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import torch.nn as nn
|
| 3 |
+
import torch.nn.functional as F
|
| 4 |
+
from annotator.mmpkg.mmcv.cnn import ConvModule
|
| 5 |
+
|
| 6 |
+
from annotator.mmpkg.mmseg.ops import resize
|
| 7 |
+
from ..builder import HEADS
|
| 8 |
+
from .decode_head import BaseDecodeHead
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
class ACM(nn.Module):
|
| 12 |
+
"""Adaptive Context Module used in APCNet.
|
| 13 |
+
|
| 14 |
+
Args:
|
| 15 |
+
pool_scale (int): Pooling scale used in Adaptive Context
|
| 16 |
+
Module to extract region features.
|
| 17 |
+
fusion (bool): Add one conv to fuse residual feature.
|
| 18 |
+
in_channels (int): Input channels.
|
| 19 |
+
channels (int): Channels after modules, before conv_seg.
|
| 20 |
+
conv_cfg (dict | None): Config of conv layers.
|
| 21 |
+
norm_cfg (dict | None): Config of norm layers.
|
| 22 |
+
act_cfg (dict): Config of activation layers.
|
| 23 |
+
"""
|
| 24 |
+
|
| 25 |
+
def __init__(self, pool_scale, fusion, in_channels, channels, conv_cfg,
|
| 26 |
+
norm_cfg, act_cfg):
|
| 27 |
+
super(ACM, self).__init__()
|
| 28 |
+
self.pool_scale = pool_scale
|
| 29 |
+
self.fusion = fusion
|
| 30 |
+
self.in_channels = in_channels
|
| 31 |
+
self.channels = channels
|
| 32 |
+
self.conv_cfg = conv_cfg
|
| 33 |
+
self.norm_cfg = norm_cfg
|
| 34 |
+
self.act_cfg = act_cfg
|
| 35 |
+
self.pooled_redu_conv = ConvModule(
|
| 36 |
+
self.in_channels,
|
| 37 |
+
self.channels,
|
| 38 |
+
1,
|
| 39 |
+
conv_cfg=self.conv_cfg,
|
| 40 |
+
norm_cfg=self.norm_cfg,
|
| 41 |
+
act_cfg=self.act_cfg)
|
| 42 |
+
|
| 43 |
+
self.input_redu_conv = ConvModule(
|
| 44 |
+
self.in_channels,
|
| 45 |
+
self.channels,
|
| 46 |
+
1,
|
| 47 |
+
conv_cfg=self.conv_cfg,
|
| 48 |
+
norm_cfg=self.norm_cfg,
|
| 49 |
+
act_cfg=self.act_cfg)
|
| 50 |
+
|
| 51 |
+
self.global_info = ConvModule(
|
| 52 |
+
self.channels,
|
| 53 |
+
self.channels,
|
| 54 |
+
1,
|
| 55 |
+
conv_cfg=self.conv_cfg,
|
| 56 |
+
norm_cfg=self.norm_cfg,
|
| 57 |
+
act_cfg=self.act_cfg)
|
| 58 |
+
|
| 59 |
+
self.gla = nn.Conv2d(self.channels, self.pool_scale**2, 1, 1, 0)
|
| 60 |
+
|
| 61 |
+
self.residual_conv = ConvModule(
|
| 62 |
+
self.channels,
|
| 63 |
+
self.channels,
|
| 64 |
+
1,
|
| 65 |
+
conv_cfg=self.conv_cfg,
|
| 66 |
+
norm_cfg=self.norm_cfg,
|
| 67 |
+
act_cfg=self.act_cfg)
|
| 68 |
+
|
| 69 |
+
if self.fusion:
|
| 70 |
+
self.fusion_conv = ConvModule(
|
| 71 |
+
self.channels,
|
| 72 |
+
self.channels,
|
| 73 |
+
1,
|
| 74 |
+
conv_cfg=self.conv_cfg,
|
| 75 |
+
norm_cfg=self.norm_cfg,
|
| 76 |
+
act_cfg=self.act_cfg)
|
| 77 |
+
|
| 78 |
+
def forward(self, x):
|
| 79 |
+
"""Forward function."""
|
| 80 |
+
pooled_x = F.adaptive_avg_pool2d(x, self.pool_scale)
|
| 81 |
+
# [batch_size, channels, h, w]
|
| 82 |
+
x = self.input_redu_conv(x)
|
| 83 |
+
# [batch_size, channels, pool_scale, pool_scale]
|
| 84 |
+
pooled_x = self.pooled_redu_conv(pooled_x)
|
| 85 |
+
batch_size = x.size(0)
|
| 86 |
+
# [batch_size, pool_scale * pool_scale, channels]
|
| 87 |
+
pooled_x = pooled_x.view(batch_size, self.channels,
|
| 88 |
+
-1).permute(0, 2, 1).contiguous()
|
| 89 |
+
# [batch_size, h * w, pool_scale * pool_scale]
|
| 90 |
+
affinity_matrix = self.gla(x + resize(
|
| 91 |
+
self.global_info(F.adaptive_avg_pool2d(x, 1)), size=x.shape[2:])
|
| 92 |
+
).permute(0, 2, 3, 1).reshape(
|
| 93 |
+
batch_size, -1, self.pool_scale**2)
|
| 94 |
+
affinity_matrix = F.sigmoid(affinity_matrix)
|
| 95 |
+
# [batch_size, h * w, channels]
|
| 96 |
+
z_out = torch.matmul(affinity_matrix, pooled_x)
|
| 97 |
+
# [batch_size, channels, h * w]
|
| 98 |
+
z_out = z_out.permute(0, 2, 1).contiguous()
|
| 99 |
+
# [batch_size, channels, h, w]
|
| 100 |
+
z_out = z_out.view(batch_size, self.channels, x.size(2), x.size(3))
|
| 101 |
+
z_out = self.residual_conv(z_out)
|
| 102 |
+
z_out = F.relu(z_out + x)
|
| 103 |
+
if self.fusion:
|
| 104 |
+
z_out = self.fusion_conv(z_out)
|
| 105 |
+
|
| 106 |
+
return z_out
|
| 107 |
+
|
| 108 |
+
|
| 109 |
+
@HEADS.register_module()
|
| 110 |
+
class APCHead(BaseDecodeHead):
|
| 111 |
+
"""Adaptive Pyramid Context Network for Semantic Segmentation.
|
| 112 |
+
|
| 113 |
+
This head is the implementation of
|
| 114 |
+
`APCNet <https://openaccess.thecvf.com/content_CVPR_2019/papers/\
|
| 115 |
+
He_Adaptive_Pyramid_Context_Network_for_Semantic_Segmentation_\
|
| 116 |
+
CVPR_2019_paper.pdf>`_.
|
| 117 |
+
|
| 118 |
+
Args:
|
| 119 |
+
pool_scales (tuple[int]): Pooling scales used in Adaptive Context
|
| 120 |
+
Module. Default: (1, 2, 3, 6).
|
| 121 |
+
fusion (bool): Add one conv to fuse residual feature.
|
| 122 |
+
"""
|
| 123 |
+
|
| 124 |
+
def __init__(self, pool_scales=(1, 2, 3, 6), fusion=True, **kwargs):
|
| 125 |
+
super(APCHead, self).__init__(**kwargs)
|
| 126 |
+
assert isinstance(pool_scales, (list, tuple))
|
| 127 |
+
self.pool_scales = pool_scales
|
| 128 |
+
self.fusion = fusion
|
| 129 |
+
acm_modules = []
|
| 130 |
+
for pool_scale in self.pool_scales:
|
| 131 |
+
acm_modules.append(
|
| 132 |
+
ACM(pool_scale,
|
| 133 |
+
self.fusion,
|
| 134 |
+
self.in_channels,
|
| 135 |
+
self.channels,
|
| 136 |
+
conv_cfg=self.conv_cfg,
|
| 137 |
+
norm_cfg=self.norm_cfg,
|
| 138 |
+
act_cfg=self.act_cfg))
|
| 139 |
+
self.acm_modules = nn.ModuleList(acm_modules)
|
| 140 |
+
self.bottleneck = ConvModule(
|
| 141 |
+
self.in_channels + len(pool_scales) * self.channels,
|
| 142 |
+
self.channels,
|
| 143 |
+
3,
|
| 144 |
+
padding=1,
|
| 145 |
+
conv_cfg=self.conv_cfg,
|
| 146 |
+
norm_cfg=self.norm_cfg,
|
| 147 |
+
act_cfg=self.act_cfg)
|
| 148 |
+
|
| 149 |
+
def forward(self, inputs):
|
| 150 |
+
"""Forward function."""
|
| 151 |
+
x = self._transform_inputs(inputs)
|
| 152 |
+
acm_outs = [x]
|
| 153 |
+
for acm_module in self.acm_modules:
|
| 154 |
+
acm_outs.append(acm_module(x))
|
| 155 |
+
acm_outs = torch.cat(acm_outs, dim=1)
|
| 156 |
+
output = self.bottleneck(acm_outs)
|
| 157 |
+
output = self.cls_seg(output)
|
| 158 |
+
return output
|
RAVE-main/annotator/mmpkg/mmseg/models/decode_heads/aspp_head.py
ADDED
|
@@ -0,0 +1,107 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import torch.nn as nn
|
| 3 |
+
from annotator.mmpkg.mmcv.cnn import ConvModule
|
| 4 |
+
|
| 5 |
+
from annotator.mmpkg.mmseg.ops import resize
|
| 6 |
+
from ..builder import HEADS
|
| 7 |
+
from .decode_head import BaseDecodeHead
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
class ASPPModule(nn.ModuleList):
|
| 11 |
+
"""Atrous Spatial Pyramid Pooling (ASPP) Module.
|
| 12 |
+
|
| 13 |
+
Args:
|
| 14 |
+
dilations (tuple[int]): Dilation rate of each layer.
|
| 15 |
+
in_channels (int): Input channels.
|
| 16 |
+
channels (int): Channels after modules, before conv_seg.
|
| 17 |
+
conv_cfg (dict|None): Config of conv layers.
|
| 18 |
+
norm_cfg (dict|None): Config of norm layers.
|
| 19 |
+
act_cfg (dict): Config of activation layers.
|
| 20 |
+
"""
|
| 21 |
+
|
| 22 |
+
def __init__(self, dilations, in_channels, channels, conv_cfg, norm_cfg,
|
| 23 |
+
act_cfg):
|
| 24 |
+
super(ASPPModule, self).__init__()
|
| 25 |
+
self.dilations = dilations
|
| 26 |
+
self.in_channels = in_channels
|
| 27 |
+
self.channels = channels
|
| 28 |
+
self.conv_cfg = conv_cfg
|
| 29 |
+
self.norm_cfg = norm_cfg
|
| 30 |
+
self.act_cfg = act_cfg
|
| 31 |
+
for dilation in dilations:
|
| 32 |
+
self.append(
|
| 33 |
+
ConvModule(
|
| 34 |
+
self.in_channels,
|
| 35 |
+
self.channels,
|
| 36 |
+
1 if dilation == 1 else 3,
|
| 37 |
+
dilation=dilation,
|
| 38 |
+
padding=0 if dilation == 1 else dilation,
|
| 39 |
+
conv_cfg=self.conv_cfg,
|
| 40 |
+
norm_cfg=self.norm_cfg,
|
| 41 |
+
act_cfg=self.act_cfg))
|
| 42 |
+
|
| 43 |
+
def forward(self, x):
|
| 44 |
+
"""Forward function."""
|
| 45 |
+
aspp_outs = []
|
| 46 |
+
for aspp_module in self:
|
| 47 |
+
aspp_outs.append(aspp_module(x))
|
| 48 |
+
|
| 49 |
+
return aspp_outs
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
@HEADS.register_module()
|
| 53 |
+
class ASPPHead(BaseDecodeHead):
|
| 54 |
+
"""Rethinking Atrous Convolution for Semantic Image Segmentation.
|
| 55 |
+
|
| 56 |
+
This head is the implementation of `DeepLabV3
|
| 57 |
+
<https://arxiv.org/abs/1706.05587>`_.
|
| 58 |
+
|
| 59 |
+
Args:
|
| 60 |
+
dilations (tuple[int]): Dilation rates for ASPP module.
|
| 61 |
+
Default: (1, 6, 12, 18).
|
| 62 |
+
"""
|
| 63 |
+
|
| 64 |
+
def __init__(self, dilations=(1, 6, 12, 18), **kwargs):
|
| 65 |
+
super(ASPPHead, self).__init__(**kwargs)
|
| 66 |
+
assert isinstance(dilations, (list, tuple))
|
| 67 |
+
self.dilations = dilations
|
| 68 |
+
self.image_pool = nn.Sequential(
|
| 69 |
+
nn.AdaptiveAvgPool2d(1),
|
| 70 |
+
ConvModule(
|
| 71 |
+
self.in_channels,
|
| 72 |
+
self.channels,
|
| 73 |
+
1,
|
| 74 |
+
conv_cfg=self.conv_cfg,
|
| 75 |
+
norm_cfg=self.norm_cfg,
|
| 76 |
+
act_cfg=self.act_cfg))
|
| 77 |
+
self.aspp_modules = ASPPModule(
|
| 78 |
+
dilations,
|
| 79 |
+
self.in_channels,
|
| 80 |
+
self.channels,
|
| 81 |
+
conv_cfg=self.conv_cfg,
|
| 82 |
+
norm_cfg=self.norm_cfg,
|
| 83 |
+
act_cfg=self.act_cfg)
|
| 84 |
+
self.bottleneck = ConvModule(
|
| 85 |
+
(len(dilations) + 1) * self.channels,
|
| 86 |
+
self.channels,
|
| 87 |
+
3,
|
| 88 |
+
padding=1,
|
| 89 |
+
conv_cfg=self.conv_cfg,
|
| 90 |
+
norm_cfg=self.norm_cfg,
|
| 91 |
+
act_cfg=self.act_cfg)
|
| 92 |
+
|
| 93 |
+
def forward(self, inputs):
|
| 94 |
+
"""Forward function."""
|
| 95 |
+
x = self._transform_inputs(inputs)
|
| 96 |
+
aspp_outs = [
|
| 97 |
+
resize(
|
| 98 |
+
self.image_pool(x),
|
| 99 |
+
size=x.size()[2:],
|
| 100 |
+
mode='bilinear',
|
| 101 |
+
align_corners=self.align_corners)
|
| 102 |
+
]
|
| 103 |
+
aspp_outs.extend(self.aspp_modules(x))
|
| 104 |
+
aspp_outs = torch.cat(aspp_outs, dim=1)
|
| 105 |
+
output = self.bottleneck(aspp_outs)
|
| 106 |
+
output = self.cls_seg(output)
|
| 107 |
+
return output
|
RAVE-main/annotator/mmpkg/mmseg/models/decode_heads/cascade_decode_head.py
ADDED
|
@@ -0,0 +1,57 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from abc import ABCMeta, abstractmethod
|
| 2 |
+
|
| 3 |
+
from .decode_head import BaseDecodeHead
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
class BaseCascadeDecodeHead(BaseDecodeHead, metaclass=ABCMeta):
|
| 7 |
+
"""Base class for cascade decode head used in
|
| 8 |
+
:class:`CascadeEncoderDecoder."""
|
| 9 |
+
|
| 10 |
+
def __init__(self, *args, **kwargs):
|
| 11 |
+
super(BaseCascadeDecodeHead, self).__init__(*args, **kwargs)
|
| 12 |
+
|
| 13 |
+
@abstractmethod
|
| 14 |
+
def forward(self, inputs, prev_output):
|
| 15 |
+
"""Placeholder of forward function."""
|
| 16 |
+
pass
|
| 17 |
+
|
| 18 |
+
def forward_train(self, inputs, prev_output, img_metas, gt_semantic_seg,
|
| 19 |
+
train_cfg):
|
| 20 |
+
"""Forward function for training.
|
| 21 |
+
Args:
|
| 22 |
+
inputs (list[Tensor]): List of multi-level img features.
|
| 23 |
+
prev_output (Tensor): The output of previous decode head.
|
| 24 |
+
img_metas (list[dict]): List of image info dict where each dict
|
| 25 |
+
has: 'img_shape', 'scale_factor', 'flip', and may also contain
|
| 26 |
+
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
|
| 27 |
+
For details on the values of these keys see
|
| 28 |
+
`mmseg/datasets/pipelines/formatting.py:Collect`.
|
| 29 |
+
gt_semantic_seg (Tensor): Semantic segmentation masks
|
| 30 |
+
used if the architecture supports semantic segmentation task.
|
| 31 |
+
train_cfg (dict): The training config.
|
| 32 |
+
|
| 33 |
+
Returns:
|
| 34 |
+
dict[str, Tensor]: a dictionary of loss components
|
| 35 |
+
"""
|
| 36 |
+
seg_logits = self.forward(inputs, prev_output)
|
| 37 |
+
losses = self.losses(seg_logits, gt_semantic_seg)
|
| 38 |
+
|
| 39 |
+
return losses
|
| 40 |
+
|
| 41 |
+
def forward_test(self, inputs, prev_output, img_metas, test_cfg):
|
| 42 |
+
"""Forward function for testing.
|
| 43 |
+
|
| 44 |
+
Args:
|
| 45 |
+
inputs (list[Tensor]): List of multi-level img features.
|
| 46 |
+
prev_output (Tensor): The output of previous decode head.
|
| 47 |
+
img_metas (list[dict]): List of image info dict where each dict
|
| 48 |
+
has: 'img_shape', 'scale_factor', 'flip', and may also contain
|
| 49 |
+
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
|
| 50 |
+
For details on the values of these keys see
|
| 51 |
+
`mmseg/datasets/pipelines/formatting.py:Collect`.
|
| 52 |
+
test_cfg (dict): The testing config.
|
| 53 |
+
|
| 54 |
+
Returns:
|
| 55 |
+
Tensor: Output segmentation map.
|
| 56 |
+
"""
|
| 57 |
+
return self.forward(inputs, prev_output)
|
RAVE-main/annotator/mmpkg/mmseg/models/decode_heads/cc_head.py
ADDED
|
@@ -0,0 +1,45 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
|
| 3 |
+
from ..builder import HEADS
|
| 4 |
+
from .fcn_head import FCNHead
|
| 5 |
+
|
| 6 |
+
try:
|
| 7 |
+
try:
|
| 8 |
+
from mmcv.ops import CrissCrossAttention
|
| 9 |
+
except ImportError:
|
| 10 |
+
from annotator.mmpkg.mmcv.ops import CrissCrossAttention
|
| 11 |
+
except ModuleNotFoundError:
|
| 12 |
+
CrissCrossAttention = None
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
@HEADS.register_module()
|
| 16 |
+
class CCHead(FCNHead):
|
| 17 |
+
"""CCNet: Criss-Cross Attention for Semantic Segmentation.
|
| 18 |
+
|
| 19 |
+
This head is the implementation of `CCNet
|
| 20 |
+
<https://arxiv.org/abs/1811.11721>`_.
|
| 21 |
+
|
| 22 |
+
Args:
|
| 23 |
+
recurrence (int): Number of recurrence of Criss Cross Attention
|
| 24 |
+
module. Default: 2.
|
| 25 |
+
"""
|
| 26 |
+
|
| 27 |
+
def __init__(self, recurrence=2, **kwargs):
|
| 28 |
+
if CrissCrossAttention is None:
|
| 29 |
+
raise RuntimeError('Please install mmcv-full for '
|
| 30 |
+
'CrissCrossAttention ops')
|
| 31 |
+
super(CCHead, self).__init__(num_convs=2, **kwargs)
|
| 32 |
+
self.recurrence = recurrence
|
| 33 |
+
self.cca = CrissCrossAttention(self.channels)
|
| 34 |
+
|
| 35 |
+
def forward(self, inputs):
|
| 36 |
+
"""Forward function."""
|
| 37 |
+
x = self._transform_inputs(inputs)
|
| 38 |
+
output = self.convs[0](x)
|
| 39 |
+
for _ in range(self.recurrence):
|
| 40 |
+
output = self.cca(output)
|
| 41 |
+
output = self.convs[1](output)
|
| 42 |
+
if self.concat_input:
|
| 43 |
+
output = self.conv_cat(torch.cat([x, output], dim=1))
|
| 44 |
+
output = self.cls_seg(output)
|
| 45 |
+
return output
|
RAVE-main/annotator/mmpkg/mmseg/models/decode_heads/da_head.py
ADDED
|
@@ -0,0 +1,178 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import torch.nn.functional as F
|
| 3 |
+
from annotator.mmpkg.mmcv.cnn import ConvModule, Scale
|
| 4 |
+
from torch import nn
|
| 5 |
+
|
| 6 |
+
from annotator.mmpkg.mmseg.core import add_prefix
|
| 7 |
+
from ..builder import HEADS
|
| 8 |
+
from ..utils import SelfAttentionBlock as _SelfAttentionBlock
|
| 9 |
+
from .decode_head import BaseDecodeHead
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
class PAM(_SelfAttentionBlock):
|
| 13 |
+
"""Position Attention Module (PAM)
|
| 14 |
+
|
| 15 |
+
Args:
|
| 16 |
+
in_channels (int): Input channels of key/query feature.
|
| 17 |
+
channels (int): Output channels of key/query transform.
|
| 18 |
+
"""
|
| 19 |
+
|
| 20 |
+
def __init__(self, in_channels, channels):
|
| 21 |
+
super(PAM, self).__init__(
|
| 22 |
+
key_in_channels=in_channels,
|
| 23 |
+
query_in_channels=in_channels,
|
| 24 |
+
channels=channels,
|
| 25 |
+
out_channels=in_channels,
|
| 26 |
+
share_key_query=False,
|
| 27 |
+
query_downsample=None,
|
| 28 |
+
key_downsample=None,
|
| 29 |
+
key_query_num_convs=1,
|
| 30 |
+
key_query_norm=False,
|
| 31 |
+
value_out_num_convs=1,
|
| 32 |
+
value_out_norm=False,
|
| 33 |
+
matmul_norm=False,
|
| 34 |
+
with_out=False,
|
| 35 |
+
conv_cfg=None,
|
| 36 |
+
norm_cfg=None,
|
| 37 |
+
act_cfg=None)
|
| 38 |
+
|
| 39 |
+
self.gamma = Scale(0)
|
| 40 |
+
|
| 41 |
+
def forward(self, x):
|
| 42 |
+
"""Forward function."""
|
| 43 |
+
out = super(PAM, self).forward(x, x)
|
| 44 |
+
|
| 45 |
+
out = self.gamma(out) + x
|
| 46 |
+
return out
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
class CAM(nn.Module):
|
| 50 |
+
"""Channel Attention Module (CAM)"""
|
| 51 |
+
|
| 52 |
+
def __init__(self):
|
| 53 |
+
super(CAM, self).__init__()
|
| 54 |
+
self.gamma = Scale(0)
|
| 55 |
+
|
| 56 |
+
def forward(self, x):
|
| 57 |
+
"""Forward function."""
|
| 58 |
+
batch_size, channels, height, width = x.size()
|
| 59 |
+
proj_query = x.view(batch_size, channels, -1)
|
| 60 |
+
proj_key = x.view(batch_size, channels, -1).permute(0, 2, 1)
|
| 61 |
+
energy = torch.bmm(proj_query, proj_key)
|
| 62 |
+
energy_new = torch.max(
|
| 63 |
+
energy, -1, keepdim=True)[0].expand_as(energy) - energy
|
| 64 |
+
attention = F.softmax(energy_new, dim=-1)
|
| 65 |
+
proj_value = x.view(batch_size, channels, -1)
|
| 66 |
+
|
| 67 |
+
out = torch.bmm(attention, proj_value)
|
| 68 |
+
out = out.view(batch_size, channels, height, width)
|
| 69 |
+
|
| 70 |
+
out = self.gamma(out) + x
|
| 71 |
+
return out
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
@HEADS.register_module()
|
| 75 |
+
class DAHead(BaseDecodeHead):
|
| 76 |
+
"""Dual Attention Network for Scene Segmentation.
|
| 77 |
+
|
| 78 |
+
This head is the implementation of `DANet
|
| 79 |
+
<https://arxiv.org/abs/1809.02983>`_.
|
| 80 |
+
|
| 81 |
+
Args:
|
| 82 |
+
pam_channels (int): The channels of Position Attention Module(PAM).
|
| 83 |
+
"""
|
| 84 |
+
|
| 85 |
+
def __init__(self, pam_channels, **kwargs):
|
| 86 |
+
super(DAHead, self).__init__(**kwargs)
|
| 87 |
+
self.pam_channels = pam_channels
|
| 88 |
+
self.pam_in_conv = ConvModule(
|
| 89 |
+
self.in_channels,
|
| 90 |
+
self.channels,
|
| 91 |
+
3,
|
| 92 |
+
padding=1,
|
| 93 |
+
conv_cfg=self.conv_cfg,
|
| 94 |
+
norm_cfg=self.norm_cfg,
|
| 95 |
+
act_cfg=self.act_cfg)
|
| 96 |
+
self.pam = PAM(self.channels, pam_channels)
|
| 97 |
+
self.pam_out_conv = ConvModule(
|
| 98 |
+
self.channels,
|
| 99 |
+
self.channels,
|
| 100 |
+
3,
|
| 101 |
+
padding=1,
|
| 102 |
+
conv_cfg=self.conv_cfg,
|
| 103 |
+
norm_cfg=self.norm_cfg,
|
| 104 |
+
act_cfg=self.act_cfg)
|
| 105 |
+
self.pam_conv_seg = nn.Conv2d(
|
| 106 |
+
self.channels, self.num_classes, kernel_size=1)
|
| 107 |
+
|
| 108 |
+
self.cam_in_conv = ConvModule(
|
| 109 |
+
self.in_channels,
|
| 110 |
+
self.channels,
|
| 111 |
+
3,
|
| 112 |
+
padding=1,
|
| 113 |
+
conv_cfg=self.conv_cfg,
|
| 114 |
+
norm_cfg=self.norm_cfg,
|
| 115 |
+
act_cfg=self.act_cfg)
|
| 116 |
+
self.cam = CAM()
|
| 117 |
+
self.cam_out_conv = ConvModule(
|
| 118 |
+
self.channels,
|
| 119 |
+
self.channels,
|
| 120 |
+
3,
|
| 121 |
+
padding=1,
|
| 122 |
+
conv_cfg=self.conv_cfg,
|
| 123 |
+
norm_cfg=self.norm_cfg,
|
| 124 |
+
act_cfg=self.act_cfg)
|
| 125 |
+
self.cam_conv_seg = nn.Conv2d(
|
| 126 |
+
self.channels, self.num_classes, kernel_size=1)
|
| 127 |
+
|
| 128 |
+
def pam_cls_seg(self, feat):
|
| 129 |
+
"""PAM feature classification."""
|
| 130 |
+
if self.dropout is not None:
|
| 131 |
+
feat = self.dropout(feat)
|
| 132 |
+
output = self.pam_conv_seg(feat)
|
| 133 |
+
return output
|
| 134 |
+
|
| 135 |
+
def cam_cls_seg(self, feat):
|
| 136 |
+
"""CAM feature classification."""
|
| 137 |
+
if self.dropout is not None:
|
| 138 |
+
feat = self.dropout(feat)
|
| 139 |
+
output = self.cam_conv_seg(feat)
|
| 140 |
+
return output
|
| 141 |
+
|
| 142 |
+
def forward(self, inputs):
|
| 143 |
+
"""Forward function."""
|
| 144 |
+
x = self._transform_inputs(inputs)
|
| 145 |
+
pam_feat = self.pam_in_conv(x)
|
| 146 |
+
pam_feat = self.pam(pam_feat)
|
| 147 |
+
pam_feat = self.pam_out_conv(pam_feat)
|
| 148 |
+
pam_out = self.pam_cls_seg(pam_feat)
|
| 149 |
+
|
| 150 |
+
cam_feat = self.cam_in_conv(x)
|
| 151 |
+
cam_feat = self.cam(cam_feat)
|
| 152 |
+
cam_feat = self.cam_out_conv(cam_feat)
|
| 153 |
+
cam_out = self.cam_cls_seg(cam_feat)
|
| 154 |
+
|
| 155 |
+
feat_sum = pam_feat + cam_feat
|
| 156 |
+
pam_cam_out = self.cls_seg(feat_sum)
|
| 157 |
+
|
| 158 |
+
return pam_cam_out, pam_out, cam_out
|
| 159 |
+
|
| 160 |
+
def forward_test(self, inputs, img_metas, test_cfg):
|
| 161 |
+
"""Forward function for testing, only ``pam_cam`` is used."""
|
| 162 |
+
return self.forward(inputs)[0]
|
| 163 |
+
|
| 164 |
+
def losses(self, seg_logit, seg_label):
|
| 165 |
+
"""Compute ``pam_cam``, ``pam``, ``cam`` loss."""
|
| 166 |
+
pam_cam_seg_logit, pam_seg_logit, cam_seg_logit = seg_logit
|
| 167 |
+
loss = dict()
|
| 168 |
+
loss.update(
|
| 169 |
+
add_prefix(
|
| 170 |
+
super(DAHead, self).losses(pam_cam_seg_logit, seg_label),
|
| 171 |
+
'pam_cam'))
|
| 172 |
+
loss.update(
|
| 173 |
+
add_prefix(
|
| 174 |
+
super(DAHead, self).losses(pam_seg_logit, seg_label), 'pam'))
|
| 175 |
+
loss.update(
|
| 176 |
+
add_prefix(
|
| 177 |
+
super(DAHead, self).losses(cam_seg_logit, seg_label), 'cam'))
|
| 178 |
+
return loss
|
RAVE-main/annotator/mmpkg/mmseg/models/decode_heads/decode_head.py
ADDED
|
@@ -0,0 +1,234 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from abc import ABCMeta, abstractmethod
|
| 2 |
+
|
| 3 |
+
import torch
|
| 4 |
+
import torch.nn as nn
|
| 5 |
+
from annotator.mmpkg.mmcv.cnn import normal_init
|
| 6 |
+
from annotator.mmpkg.mmcv.runner import auto_fp16, force_fp32
|
| 7 |
+
|
| 8 |
+
from annotator.mmpkg.mmseg.core import build_pixel_sampler
|
| 9 |
+
from annotator.mmpkg.mmseg.ops import resize
|
| 10 |
+
from ..builder import build_loss
|
| 11 |
+
from ..losses import accuracy
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
class BaseDecodeHead(nn.Module, metaclass=ABCMeta):
|
| 15 |
+
"""Base class for BaseDecodeHead.
|
| 16 |
+
|
| 17 |
+
Args:
|
| 18 |
+
in_channels (int|Sequence[int]): Input channels.
|
| 19 |
+
channels (int): Channels after modules, before conv_seg.
|
| 20 |
+
num_classes (int): Number of classes.
|
| 21 |
+
dropout_ratio (float): Ratio of dropout layer. Default: 0.1.
|
| 22 |
+
conv_cfg (dict|None): Config of conv layers. Default: None.
|
| 23 |
+
norm_cfg (dict|None): Config of norm layers. Default: None.
|
| 24 |
+
act_cfg (dict): Config of activation layers.
|
| 25 |
+
Default: dict(type='ReLU')
|
| 26 |
+
in_index (int|Sequence[int]): Input feature index. Default: -1
|
| 27 |
+
input_transform (str|None): Transformation type of input features.
|
| 28 |
+
Options: 'resize_concat', 'multiple_select', None.
|
| 29 |
+
'resize_concat': Multiple feature maps will be resize to the
|
| 30 |
+
same size as first one and than concat together.
|
| 31 |
+
Usually used in FCN head of HRNet.
|
| 32 |
+
'multiple_select': Multiple feature maps will be bundle into
|
| 33 |
+
a list and passed into decode head.
|
| 34 |
+
None: Only one select feature map is allowed.
|
| 35 |
+
Default: None.
|
| 36 |
+
loss_decode (dict): Config of decode loss.
|
| 37 |
+
Default: dict(type='CrossEntropyLoss').
|
| 38 |
+
ignore_index (int | None): The label index to be ignored. When using
|
| 39 |
+
masked BCE loss, ignore_index should be set to None. Default: 255
|
| 40 |
+
sampler (dict|None): The config of segmentation map sampler.
|
| 41 |
+
Default: None.
|
| 42 |
+
align_corners (bool): align_corners argument of F.interpolate.
|
| 43 |
+
Default: False.
|
| 44 |
+
"""
|
| 45 |
+
|
| 46 |
+
def __init__(self,
|
| 47 |
+
in_channels,
|
| 48 |
+
channels,
|
| 49 |
+
*,
|
| 50 |
+
num_classes,
|
| 51 |
+
dropout_ratio=0.1,
|
| 52 |
+
conv_cfg=None,
|
| 53 |
+
norm_cfg=None,
|
| 54 |
+
act_cfg=dict(type='ReLU'),
|
| 55 |
+
in_index=-1,
|
| 56 |
+
input_transform=None,
|
| 57 |
+
loss_decode=dict(
|
| 58 |
+
type='CrossEntropyLoss',
|
| 59 |
+
use_sigmoid=False,
|
| 60 |
+
loss_weight=1.0),
|
| 61 |
+
ignore_index=255,
|
| 62 |
+
sampler=None,
|
| 63 |
+
align_corners=False):
|
| 64 |
+
super(BaseDecodeHead, self).__init__()
|
| 65 |
+
self._init_inputs(in_channels, in_index, input_transform)
|
| 66 |
+
self.channels = channels
|
| 67 |
+
self.num_classes = num_classes
|
| 68 |
+
self.dropout_ratio = dropout_ratio
|
| 69 |
+
self.conv_cfg = conv_cfg
|
| 70 |
+
self.norm_cfg = norm_cfg
|
| 71 |
+
self.act_cfg = act_cfg
|
| 72 |
+
self.in_index = in_index
|
| 73 |
+
self.loss_decode = build_loss(loss_decode)
|
| 74 |
+
self.ignore_index = ignore_index
|
| 75 |
+
self.align_corners = align_corners
|
| 76 |
+
if sampler is not None:
|
| 77 |
+
self.sampler = build_pixel_sampler(sampler, context=self)
|
| 78 |
+
else:
|
| 79 |
+
self.sampler = None
|
| 80 |
+
|
| 81 |
+
self.conv_seg = nn.Conv2d(channels, num_classes, kernel_size=1)
|
| 82 |
+
if dropout_ratio > 0:
|
| 83 |
+
self.dropout = nn.Dropout2d(dropout_ratio)
|
| 84 |
+
else:
|
| 85 |
+
self.dropout = None
|
| 86 |
+
self.fp16_enabled = False
|
| 87 |
+
|
| 88 |
+
def extra_repr(self):
|
| 89 |
+
"""Extra repr."""
|
| 90 |
+
s = f'input_transform={self.input_transform}, ' \
|
| 91 |
+
f'ignore_index={self.ignore_index}, ' \
|
| 92 |
+
f'align_corners={self.align_corners}'
|
| 93 |
+
return s
|
| 94 |
+
|
| 95 |
+
def _init_inputs(self, in_channels, in_index, input_transform):
|
| 96 |
+
"""Check and initialize input transforms.
|
| 97 |
+
|
| 98 |
+
The in_channels, in_index and input_transform must match.
|
| 99 |
+
Specifically, when input_transform is None, only single feature map
|
| 100 |
+
will be selected. So in_channels and in_index must be of type int.
|
| 101 |
+
When input_transform
|
| 102 |
+
|
| 103 |
+
Args:
|
| 104 |
+
in_channels (int|Sequence[int]): Input channels.
|
| 105 |
+
in_index (int|Sequence[int]): Input feature index.
|
| 106 |
+
input_transform (str|None): Transformation type of input features.
|
| 107 |
+
Options: 'resize_concat', 'multiple_select', None.
|
| 108 |
+
'resize_concat': Multiple feature maps will be resize to the
|
| 109 |
+
same size as first one and than concat together.
|
| 110 |
+
Usually used in FCN head of HRNet.
|
| 111 |
+
'multiple_select': Multiple feature maps will be bundle into
|
| 112 |
+
a list and passed into decode head.
|
| 113 |
+
None: Only one select feature map is allowed.
|
| 114 |
+
"""
|
| 115 |
+
|
| 116 |
+
if input_transform is not None:
|
| 117 |
+
assert input_transform in ['resize_concat', 'multiple_select']
|
| 118 |
+
self.input_transform = input_transform
|
| 119 |
+
self.in_index = in_index
|
| 120 |
+
if input_transform is not None:
|
| 121 |
+
assert isinstance(in_channels, (list, tuple))
|
| 122 |
+
assert isinstance(in_index, (list, tuple))
|
| 123 |
+
assert len(in_channels) == len(in_index)
|
| 124 |
+
if input_transform == 'resize_concat':
|
| 125 |
+
self.in_channels = sum(in_channels)
|
| 126 |
+
else:
|
| 127 |
+
self.in_channels = in_channels
|
| 128 |
+
else:
|
| 129 |
+
assert isinstance(in_channels, int)
|
| 130 |
+
assert isinstance(in_index, int)
|
| 131 |
+
self.in_channels = in_channels
|
| 132 |
+
|
| 133 |
+
def init_weights(self):
|
| 134 |
+
"""Initialize weights of classification layer."""
|
| 135 |
+
normal_init(self.conv_seg, mean=0, std=0.01)
|
| 136 |
+
|
| 137 |
+
def _transform_inputs(self, inputs):
|
| 138 |
+
"""Transform inputs for decoder.
|
| 139 |
+
|
| 140 |
+
Args:
|
| 141 |
+
inputs (list[Tensor]): List of multi-level img features.
|
| 142 |
+
|
| 143 |
+
Returns:
|
| 144 |
+
Tensor: The transformed inputs
|
| 145 |
+
"""
|
| 146 |
+
|
| 147 |
+
if self.input_transform == 'resize_concat':
|
| 148 |
+
inputs = [inputs[i] for i in self.in_index]
|
| 149 |
+
upsampled_inputs = [
|
| 150 |
+
resize(
|
| 151 |
+
input=x,
|
| 152 |
+
size=inputs[0].shape[2:],
|
| 153 |
+
mode='bilinear',
|
| 154 |
+
align_corners=self.align_corners) for x in inputs
|
| 155 |
+
]
|
| 156 |
+
inputs = torch.cat(upsampled_inputs, dim=1)
|
| 157 |
+
elif self.input_transform == 'multiple_select':
|
| 158 |
+
inputs = [inputs[i] for i in self.in_index]
|
| 159 |
+
else:
|
| 160 |
+
inputs = inputs[self.in_index]
|
| 161 |
+
|
| 162 |
+
return inputs
|
| 163 |
+
|
| 164 |
+
@auto_fp16()
|
| 165 |
+
@abstractmethod
|
| 166 |
+
def forward(self, inputs):
|
| 167 |
+
"""Placeholder of forward function."""
|
| 168 |
+
pass
|
| 169 |
+
|
| 170 |
+
def forward_train(self, inputs, img_metas, gt_semantic_seg, train_cfg):
|
| 171 |
+
"""Forward function for training.
|
| 172 |
+
Args:
|
| 173 |
+
inputs (list[Tensor]): List of multi-level img features.
|
| 174 |
+
img_metas (list[dict]): List of image info dict where each dict
|
| 175 |
+
has: 'img_shape', 'scale_factor', 'flip', and may also contain
|
| 176 |
+
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
|
| 177 |
+
For details on the values of these keys see
|
| 178 |
+
`mmseg/datasets/pipelines/formatting.py:Collect`.
|
| 179 |
+
gt_semantic_seg (Tensor): Semantic segmentation masks
|
| 180 |
+
used if the architecture supports semantic segmentation task.
|
| 181 |
+
train_cfg (dict): The training config.
|
| 182 |
+
|
| 183 |
+
Returns:
|
| 184 |
+
dict[str, Tensor]: a dictionary of loss components
|
| 185 |
+
"""
|
| 186 |
+
seg_logits = self.forward(inputs)
|
| 187 |
+
losses = self.losses(seg_logits, gt_semantic_seg)
|
| 188 |
+
return losses
|
| 189 |
+
|
| 190 |
+
def forward_test(self, inputs, img_metas, test_cfg):
|
| 191 |
+
"""Forward function for testing.
|
| 192 |
+
|
| 193 |
+
Args:
|
| 194 |
+
inputs (list[Tensor]): List of multi-level img features.
|
| 195 |
+
img_metas (list[dict]): List of image info dict where each dict
|
| 196 |
+
has: 'img_shape', 'scale_factor', 'flip', and may also contain
|
| 197 |
+
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
|
| 198 |
+
For details on the values of these keys see
|
| 199 |
+
`mmseg/datasets/pipelines/formatting.py:Collect`.
|
| 200 |
+
test_cfg (dict): The testing config.
|
| 201 |
+
|
| 202 |
+
Returns:
|
| 203 |
+
Tensor: Output segmentation map.
|
| 204 |
+
"""
|
| 205 |
+
return self.forward(inputs)
|
| 206 |
+
|
| 207 |
+
def cls_seg(self, feat):
|
| 208 |
+
"""Classify each pixel."""
|
| 209 |
+
if self.dropout is not None:
|
| 210 |
+
feat = self.dropout(feat)
|
| 211 |
+
output = self.conv_seg(feat)
|
| 212 |
+
return output
|
| 213 |
+
|
| 214 |
+
@force_fp32(apply_to=('seg_logit', ))
|
| 215 |
+
def losses(self, seg_logit, seg_label):
|
| 216 |
+
"""Compute segmentation loss."""
|
| 217 |
+
loss = dict()
|
| 218 |
+
seg_logit = resize(
|
| 219 |
+
input=seg_logit,
|
| 220 |
+
size=seg_label.shape[2:],
|
| 221 |
+
mode='bilinear',
|
| 222 |
+
align_corners=self.align_corners)
|
| 223 |
+
if self.sampler is not None:
|
| 224 |
+
seg_weight = self.sampler.sample(seg_logit, seg_label)
|
| 225 |
+
else:
|
| 226 |
+
seg_weight = None
|
| 227 |
+
seg_label = seg_label.squeeze(1)
|
| 228 |
+
loss['loss_seg'] = self.loss_decode(
|
| 229 |
+
seg_logit,
|
| 230 |
+
seg_label,
|
| 231 |
+
weight=seg_weight,
|
| 232 |
+
ignore_index=self.ignore_index)
|
| 233 |
+
loss['acc_seg'] = accuracy(seg_logit, seg_label)
|
| 234 |
+
return loss
|
RAVE-main/annotator/mmpkg/mmseg/models/decode_heads/dm_head.py
ADDED
|
@@ -0,0 +1,140 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import torch.nn as nn
|
| 3 |
+
import torch.nn.functional as F
|
| 4 |
+
from annotator.mmpkg.mmcv.cnn import ConvModule, build_activation_layer, build_norm_layer
|
| 5 |
+
|
| 6 |
+
from ..builder import HEADS
|
| 7 |
+
from .decode_head import BaseDecodeHead
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
class DCM(nn.Module):
|
| 11 |
+
"""Dynamic Convolutional Module used in DMNet.
|
| 12 |
+
|
| 13 |
+
Args:
|
| 14 |
+
filter_size (int): The filter size of generated convolution kernel
|
| 15 |
+
used in Dynamic Convolutional Module.
|
| 16 |
+
fusion (bool): Add one conv to fuse DCM output feature.
|
| 17 |
+
in_channels (int): Input channels.
|
| 18 |
+
channels (int): Channels after modules, before conv_seg.
|
| 19 |
+
conv_cfg (dict | None): Config of conv layers.
|
| 20 |
+
norm_cfg (dict | None): Config of norm layers.
|
| 21 |
+
act_cfg (dict): Config of activation layers.
|
| 22 |
+
"""
|
| 23 |
+
|
| 24 |
+
def __init__(self, filter_size, fusion, in_channels, channels, conv_cfg,
|
| 25 |
+
norm_cfg, act_cfg):
|
| 26 |
+
super(DCM, self).__init__()
|
| 27 |
+
self.filter_size = filter_size
|
| 28 |
+
self.fusion = fusion
|
| 29 |
+
self.in_channels = in_channels
|
| 30 |
+
self.channels = channels
|
| 31 |
+
self.conv_cfg = conv_cfg
|
| 32 |
+
self.norm_cfg = norm_cfg
|
| 33 |
+
self.act_cfg = act_cfg
|
| 34 |
+
self.filter_gen_conv = nn.Conv2d(self.in_channels, self.channels, 1, 1,
|
| 35 |
+
0)
|
| 36 |
+
|
| 37 |
+
self.input_redu_conv = ConvModule(
|
| 38 |
+
self.in_channels,
|
| 39 |
+
self.channels,
|
| 40 |
+
1,
|
| 41 |
+
conv_cfg=self.conv_cfg,
|
| 42 |
+
norm_cfg=self.norm_cfg,
|
| 43 |
+
act_cfg=self.act_cfg)
|
| 44 |
+
|
| 45 |
+
if self.norm_cfg is not None:
|
| 46 |
+
self.norm = build_norm_layer(self.norm_cfg, self.channels)[1]
|
| 47 |
+
else:
|
| 48 |
+
self.norm = None
|
| 49 |
+
self.activate = build_activation_layer(self.act_cfg)
|
| 50 |
+
|
| 51 |
+
if self.fusion:
|
| 52 |
+
self.fusion_conv = ConvModule(
|
| 53 |
+
self.channels,
|
| 54 |
+
self.channels,
|
| 55 |
+
1,
|
| 56 |
+
conv_cfg=self.conv_cfg,
|
| 57 |
+
norm_cfg=self.norm_cfg,
|
| 58 |
+
act_cfg=self.act_cfg)
|
| 59 |
+
|
| 60 |
+
def forward(self, x):
|
| 61 |
+
"""Forward function."""
|
| 62 |
+
generated_filter = self.filter_gen_conv(
|
| 63 |
+
F.adaptive_avg_pool2d(x, self.filter_size))
|
| 64 |
+
x = self.input_redu_conv(x)
|
| 65 |
+
b, c, h, w = x.shape
|
| 66 |
+
# [1, b * c, h, w], c = self.channels
|
| 67 |
+
x = x.view(1, b * c, h, w)
|
| 68 |
+
# [b * c, 1, filter_size, filter_size]
|
| 69 |
+
generated_filter = generated_filter.view(b * c, 1, self.filter_size,
|
| 70 |
+
self.filter_size)
|
| 71 |
+
pad = (self.filter_size - 1) // 2
|
| 72 |
+
if (self.filter_size - 1) % 2 == 0:
|
| 73 |
+
p2d = (pad, pad, pad, pad)
|
| 74 |
+
else:
|
| 75 |
+
p2d = (pad + 1, pad, pad + 1, pad)
|
| 76 |
+
x = F.pad(input=x, pad=p2d, mode='constant', value=0)
|
| 77 |
+
# [1, b * c, h, w]
|
| 78 |
+
output = F.conv2d(input=x, weight=generated_filter, groups=b * c)
|
| 79 |
+
# [b, c, h, w]
|
| 80 |
+
output = output.view(b, c, h, w)
|
| 81 |
+
if self.norm is not None:
|
| 82 |
+
output = self.norm(output)
|
| 83 |
+
output = self.activate(output)
|
| 84 |
+
|
| 85 |
+
if self.fusion:
|
| 86 |
+
output = self.fusion_conv(output)
|
| 87 |
+
|
| 88 |
+
return output
|
| 89 |
+
|
| 90 |
+
|
| 91 |
+
@HEADS.register_module()
|
| 92 |
+
class DMHead(BaseDecodeHead):
|
| 93 |
+
"""Dynamic Multi-scale Filters for Semantic Segmentation.
|
| 94 |
+
|
| 95 |
+
This head is the implementation of
|
| 96 |
+
`DMNet <https://openaccess.thecvf.com/content_ICCV_2019/papers/\
|
| 97 |
+
He_Dynamic_Multi-Scale_Filters_for_Semantic_Segmentation_\
|
| 98 |
+
ICCV_2019_paper.pdf>`_.
|
| 99 |
+
|
| 100 |
+
Args:
|
| 101 |
+
filter_sizes (tuple[int]): The size of generated convolutional filters
|
| 102 |
+
used in Dynamic Convolutional Module. Default: (1, 3, 5, 7).
|
| 103 |
+
fusion (bool): Add one conv to fuse DCM output feature.
|
| 104 |
+
"""
|
| 105 |
+
|
| 106 |
+
def __init__(self, filter_sizes=(1, 3, 5, 7), fusion=False, **kwargs):
|
| 107 |
+
super(DMHead, self).__init__(**kwargs)
|
| 108 |
+
assert isinstance(filter_sizes, (list, tuple))
|
| 109 |
+
self.filter_sizes = filter_sizes
|
| 110 |
+
self.fusion = fusion
|
| 111 |
+
dcm_modules = []
|
| 112 |
+
for filter_size in self.filter_sizes:
|
| 113 |
+
dcm_modules.append(
|
| 114 |
+
DCM(filter_size,
|
| 115 |
+
self.fusion,
|
| 116 |
+
self.in_channels,
|
| 117 |
+
self.channels,
|
| 118 |
+
conv_cfg=self.conv_cfg,
|
| 119 |
+
norm_cfg=self.norm_cfg,
|
| 120 |
+
act_cfg=self.act_cfg))
|
| 121 |
+
self.dcm_modules = nn.ModuleList(dcm_modules)
|
| 122 |
+
self.bottleneck = ConvModule(
|
| 123 |
+
self.in_channels + len(filter_sizes) * self.channels,
|
| 124 |
+
self.channels,
|
| 125 |
+
3,
|
| 126 |
+
padding=1,
|
| 127 |
+
conv_cfg=self.conv_cfg,
|
| 128 |
+
norm_cfg=self.norm_cfg,
|
| 129 |
+
act_cfg=self.act_cfg)
|
| 130 |
+
|
| 131 |
+
def forward(self, inputs):
|
| 132 |
+
"""Forward function."""
|
| 133 |
+
x = self._transform_inputs(inputs)
|
| 134 |
+
dcm_outs = [x]
|
| 135 |
+
for dcm_module in self.dcm_modules:
|
| 136 |
+
dcm_outs.append(dcm_module(x))
|
| 137 |
+
dcm_outs = torch.cat(dcm_outs, dim=1)
|
| 138 |
+
output = self.bottleneck(dcm_outs)
|
| 139 |
+
output = self.cls_seg(output)
|
| 140 |
+
return output
|
RAVE-main/annotator/mmpkg/mmseg/models/decode_heads/dnl_head.py
ADDED
|
@@ -0,0 +1,131 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
from annotator.mmpkg.mmcv.cnn import NonLocal2d
|
| 3 |
+
from torch import nn
|
| 4 |
+
|
| 5 |
+
from ..builder import HEADS
|
| 6 |
+
from .fcn_head import FCNHead
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
class DisentangledNonLocal2d(NonLocal2d):
|
| 10 |
+
"""Disentangled Non-Local Blocks.
|
| 11 |
+
|
| 12 |
+
Args:
|
| 13 |
+
temperature (float): Temperature to adjust attention. Default: 0.05
|
| 14 |
+
"""
|
| 15 |
+
|
| 16 |
+
def __init__(self, *arg, temperature, **kwargs):
|
| 17 |
+
super().__init__(*arg, **kwargs)
|
| 18 |
+
self.temperature = temperature
|
| 19 |
+
self.conv_mask = nn.Conv2d(self.in_channels, 1, kernel_size=1)
|
| 20 |
+
|
| 21 |
+
def embedded_gaussian(self, theta_x, phi_x):
|
| 22 |
+
"""Embedded gaussian with temperature."""
|
| 23 |
+
|
| 24 |
+
# NonLocal2d pairwise_weight: [N, HxW, HxW]
|
| 25 |
+
pairwise_weight = torch.matmul(theta_x, phi_x)
|
| 26 |
+
if self.use_scale:
|
| 27 |
+
# theta_x.shape[-1] is `self.inter_channels`
|
| 28 |
+
pairwise_weight /= theta_x.shape[-1]**0.5
|
| 29 |
+
pairwise_weight /= self.temperature
|
| 30 |
+
pairwise_weight = pairwise_weight.softmax(dim=-1)
|
| 31 |
+
return pairwise_weight
|
| 32 |
+
|
| 33 |
+
def forward(self, x):
|
| 34 |
+
# x: [N, C, H, W]
|
| 35 |
+
n = x.size(0)
|
| 36 |
+
|
| 37 |
+
# g_x: [N, HxW, C]
|
| 38 |
+
g_x = self.g(x).view(n, self.inter_channels, -1)
|
| 39 |
+
g_x = g_x.permute(0, 2, 1)
|
| 40 |
+
|
| 41 |
+
# theta_x: [N, HxW, C], phi_x: [N, C, HxW]
|
| 42 |
+
if self.mode == 'gaussian':
|
| 43 |
+
theta_x = x.view(n, self.in_channels, -1)
|
| 44 |
+
theta_x = theta_x.permute(0, 2, 1)
|
| 45 |
+
if self.sub_sample:
|
| 46 |
+
phi_x = self.phi(x).view(n, self.in_channels, -1)
|
| 47 |
+
else:
|
| 48 |
+
phi_x = x.view(n, self.in_channels, -1)
|
| 49 |
+
elif self.mode == 'concatenation':
|
| 50 |
+
theta_x = self.theta(x).view(n, self.inter_channels, -1, 1)
|
| 51 |
+
phi_x = self.phi(x).view(n, self.inter_channels, 1, -1)
|
| 52 |
+
else:
|
| 53 |
+
theta_x = self.theta(x).view(n, self.inter_channels, -1)
|
| 54 |
+
theta_x = theta_x.permute(0, 2, 1)
|
| 55 |
+
phi_x = self.phi(x).view(n, self.inter_channels, -1)
|
| 56 |
+
|
| 57 |
+
# subtract mean
|
| 58 |
+
theta_x -= theta_x.mean(dim=-2, keepdim=True)
|
| 59 |
+
phi_x -= phi_x.mean(dim=-1, keepdim=True)
|
| 60 |
+
|
| 61 |
+
pairwise_func = getattr(self, self.mode)
|
| 62 |
+
# pairwise_weight: [N, HxW, HxW]
|
| 63 |
+
pairwise_weight = pairwise_func(theta_x, phi_x)
|
| 64 |
+
|
| 65 |
+
# y: [N, HxW, C]
|
| 66 |
+
y = torch.matmul(pairwise_weight, g_x)
|
| 67 |
+
# y: [N, C, H, W]
|
| 68 |
+
y = y.permute(0, 2, 1).contiguous().reshape(n, self.inter_channels,
|
| 69 |
+
*x.size()[2:])
|
| 70 |
+
|
| 71 |
+
# unary_mask: [N, 1, HxW]
|
| 72 |
+
unary_mask = self.conv_mask(x)
|
| 73 |
+
unary_mask = unary_mask.view(n, 1, -1)
|
| 74 |
+
unary_mask = unary_mask.softmax(dim=-1)
|
| 75 |
+
# unary_x: [N, 1, C]
|
| 76 |
+
unary_x = torch.matmul(unary_mask, g_x)
|
| 77 |
+
# unary_x: [N, C, 1, 1]
|
| 78 |
+
unary_x = unary_x.permute(0, 2, 1).contiguous().reshape(
|
| 79 |
+
n, self.inter_channels, 1, 1)
|
| 80 |
+
|
| 81 |
+
output = x + self.conv_out(y + unary_x)
|
| 82 |
+
|
| 83 |
+
return output
|
| 84 |
+
|
| 85 |
+
|
| 86 |
+
@HEADS.register_module()
|
| 87 |
+
class DNLHead(FCNHead):
|
| 88 |
+
"""Disentangled Non-Local Neural Networks.
|
| 89 |
+
|
| 90 |
+
This head is the implementation of `DNLNet
|
| 91 |
+
<https://arxiv.org/abs/2006.06668>`_.
|
| 92 |
+
|
| 93 |
+
Args:
|
| 94 |
+
reduction (int): Reduction factor of projection transform. Default: 2.
|
| 95 |
+
use_scale (bool): Whether to scale pairwise_weight by
|
| 96 |
+
sqrt(1/inter_channels). Default: False.
|
| 97 |
+
mode (str): The nonlocal mode. Options are 'embedded_gaussian',
|
| 98 |
+
'dot_product'. Default: 'embedded_gaussian.'.
|
| 99 |
+
temperature (float): Temperature to adjust attention. Default: 0.05
|
| 100 |
+
"""
|
| 101 |
+
|
| 102 |
+
def __init__(self,
|
| 103 |
+
reduction=2,
|
| 104 |
+
use_scale=True,
|
| 105 |
+
mode='embedded_gaussian',
|
| 106 |
+
temperature=0.05,
|
| 107 |
+
**kwargs):
|
| 108 |
+
super(DNLHead, self).__init__(num_convs=2, **kwargs)
|
| 109 |
+
self.reduction = reduction
|
| 110 |
+
self.use_scale = use_scale
|
| 111 |
+
self.mode = mode
|
| 112 |
+
self.temperature = temperature
|
| 113 |
+
self.dnl_block = DisentangledNonLocal2d(
|
| 114 |
+
in_channels=self.channels,
|
| 115 |
+
reduction=self.reduction,
|
| 116 |
+
use_scale=self.use_scale,
|
| 117 |
+
conv_cfg=self.conv_cfg,
|
| 118 |
+
norm_cfg=self.norm_cfg,
|
| 119 |
+
mode=self.mode,
|
| 120 |
+
temperature=self.temperature)
|
| 121 |
+
|
| 122 |
+
def forward(self, inputs):
|
| 123 |
+
"""Forward function."""
|
| 124 |
+
x = self._transform_inputs(inputs)
|
| 125 |
+
output = self.convs[0](x)
|
| 126 |
+
output = self.dnl_block(output)
|
| 127 |
+
output = self.convs[1](output)
|
| 128 |
+
if self.concat_input:
|
| 129 |
+
output = self.conv_cat(torch.cat([x, output], dim=1))
|
| 130 |
+
output = self.cls_seg(output)
|
| 131 |
+
return output
|
RAVE-main/annotator/mmpkg/mmseg/models/decode_heads/ema_head.py
ADDED
|
@@ -0,0 +1,168 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import math
|
| 2 |
+
|
| 3 |
+
import torch
|
| 4 |
+
import torch.distributed as dist
|
| 5 |
+
import torch.nn as nn
|
| 6 |
+
import torch.nn.functional as F
|
| 7 |
+
from annotator.mmpkg.mmcv.cnn import ConvModule
|
| 8 |
+
|
| 9 |
+
from ..builder import HEADS
|
| 10 |
+
from .decode_head import BaseDecodeHead
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
def reduce_mean(tensor):
|
| 14 |
+
"""Reduce mean when distributed training."""
|
| 15 |
+
if not (dist.is_available() and dist.is_initialized()):
|
| 16 |
+
return tensor
|
| 17 |
+
tensor = tensor.clone()
|
| 18 |
+
dist.all_reduce(tensor.div_(dist.get_world_size()), op=dist.ReduceOp.SUM)
|
| 19 |
+
return tensor
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
class EMAModule(nn.Module):
|
| 23 |
+
"""Expectation Maximization Attention Module used in EMANet.
|
| 24 |
+
|
| 25 |
+
Args:
|
| 26 |
+
channels (int): Channels of the whole module.
|
| 27 |
+
num_bases (int): Number of bases.
|
| 28 |
+
num_stages (int): Number of the EM iterations.
|
| 29 |
+
"""
|
| 30 |
+
|
| 31 |
+
def __init__(self, channels, num_bases, num_stages, momentum):
|
| 32 |
+
super(EMAModule, self).__init__()
|
| 33 |
+
assert num_stages >= 1, 'num_stages must be at least 1!'
|
| 34 |
+
self.num_bases = num_bases
|
| 35 |
+
self.num_stages = num_stages
|
| 36 |
+
self.momentum = momentum
|
| 37 |
+
|
| 38 |
+
bases = torch.zeros(1, channels, self.num_bases)
|
| 39 |
+
bases.normal_(0, math.sqrt(2. / self.num_bases))
|
| 40 |
+
# [1, channels, num_bases]
|
| 41 |
+
bases = F.normalize(bases, dim=1, p=2)
|
| 42 |
+
self.register_buffer('bases', bases)
|
| 43 |
+
|
| 44 |
+
def forward(self, feats):
|
| 45 |
+
"""Forward function."""
|
| 46 |
+
batch_size, channels, height, width = feats.size()
|
| 47 |
+
# [batch_size, channels, height*width]
|
| 48 |
+
feats = feats.view(batch_size, channels, height * width)
|
| 49 |
+
# [batch_size, channels, num_bases]
|
| 50 |
+
bases = self.bases.repeat(batch_size, 1, 1)
|
| 51 |
+
|
| 52 |
+
with torch.no_grad():
|
| 53 |
+
for i in range(self.num_stages):
|
| 54 |
+
# [batch_size, height*width, num_bases]
|
| 55 |
+
attention = torch.einsum('bcn,bck->bnk', feats, bases)
|
| 56 |
+
attention = F.softmax(attention, dim=2)
|
| 57 |
+
# l1 norm
|
| 58 |
+
attention_normed = F.normalize(attention, dim=1, p=1)
|
| 59 |
+
# [batch_size, channels, num_bases]
|
| 60 |
+
bases = torch.einsum('bcn,bnk->bck', feats, attention_normed)
|
| 61 |
+
# l2 norm
|
| 62 |
+
bases = F.normalize(bases, dim=1, p=2)
|
| 63 |
+
|
| 64 |
+
feats_recon = torch.einsum('bck,bnk->bcn', bases, attention)
|
| 65 |
+
feats_recon = feats_recon.view(batch_size, channels, height, width)
|
| 66 |
+
|
| 67 |
+
if self.training:
|
| 68 |
+
bases = bases.mean(dim=0, keepdim=True)
|
| 69 |
+
bases = reduce_mean(bases)
|
| 70 |
+
# l2 norm
|
| 71 |
+
bases = F.normalize(bases, dim=1, p=2)
|
| 72 |
+
self.bases = (1 -
|
| 73 |
+
self.momentum) * self.bases + self.momentum * bases
|
| 74 |
+
|
| 75 |
+
return feats_recon
|
| 76 |
+
|
| 77 |
+
|
| 78 |
+
@HEADS.register_module()
|
| 79 |
+
class EMAHead(BaseDecodeHead):
|
| 80 |
+
"""Expectation Maximization Attention Networks for Semantic Segmentation.
|
| 81 |
+
|
| 82 |
+
This head is the implementation of `EMANet
|
| 83 |
+
<https://arxiv.org/abs/1907.13426>`_.
|
| 84 |
+
|
| 85 |
+
Args:
|
| 86 |
+
ema_channels (int): EMA module channels
|
| 87 |
+
num_bases (int): Number of bases.
|
| 88 |
+
num_stages (int): Number of the EM iterations.
|
| 89 |
+
concat_input (bool): Whether concat the input and output of convs
|
| 90 |
+
before classification layer. Default: True
|
| 91 |
+
momentum (float): Momentum to update the base. Default: 0.1.
|
| 92 |
+
"""
|
| 93 |
+
|
| 94 |
+
def __init__(self,
|
| 95 |
+
ema_channels,
|
| 96 |
+
num_bases,
|
| 97 |
+
num_stages,
|
| 98 |
+
concat_input=True,
|
| 99 |
+
momentum=0.1,
|
| 100 |
+
**kwargs):
|
| 101 |
+
super(EMAHead, self).__init__(**kwargs)
|
| 102 |
+
self.ema_channels = ema_channels
|
| 103 |
+
self.num_bases = num_bases
|
| 104 |
+
self.num_stages = num_stages
|
| 105 |
+
self.concat_input = concat_input
|
| 106 |
+
self.momentum = momentum
|
| 107 |
+
self.ema_module = EMAModule(self.ema_channels, self.num_bases,
|
| 108 |
+
self.num_stages, self.momentum)
|
| 109 |
+
|
| 110 |
+
self.ema_in_conv = ConvModule(
|
| 111 |
+
self.in_channels,
|
| 112 |
+
self.ema_channels,
|
| 113 |
+
3,
|
| 114 |
+
padding=1,
|
| 115 |
+
conv_cfg=self.conv_cfg,
|
| 116 |
+
norm_cfg=self.norm_cfg,
|
| 117 |
+
act_cfg=self.act_cfg)
|
| 118 |
+
# project (0, inf) -> (-inf, inf)
|
| 119 |
+
self.ema_mid_conv = ConvModule(
|
| 120 |
+
self.ema_channels,
|
| 121 |
+
self.ema_channels,
|
| 122 |
+
1,
|
| 123 |
+
conv_cfg=self.conv_cfg,
|
| 124 |
+
norm_cfg=None,
|
| 125 |
+
act_cfg=None)
|
| 126 |
+
for param in self.ema_mid_conv.parameters():
|
| 127 |
+
param.requires_grad = False
|
| 128 |
+
|
| 129 |
+
self.ema_out_conv = ConvModule(
|
| 130 |
+
self.ema_channels,
|
| 131 |
+
self.ema_channels,
|
| 132 |
+
1,
|
| 133 |
+
conv_cfg=self.conv_cfg,
|
| 134 |
+
norm_cfg=self.norm_cfg,
|
| 135 |
+
act_cfg=None)
|
| 136 |
+
self.bottleneck = ConvModule(
|
| 137 |
+
self.ema_channels,
|
| 138 |
+
self.channels,
|
| 139 |
+
3,
|
| 140 |
+
padding=1,
|
| 141 |
+
conv_cfg=self.conv_cfg,
|
| 142 |
+
norm_cfg=self.norm_cfg,
|
| 143 |
+
act_cfg=self.act_cfg)
|
| 144 |
+
if self.concat_input:
|
| 145 |
+
self.conv_cat = ConvModule(
|
| 146 |
+
self.in_channels + self.channels,
|
| 147 |
+
self.channels,
|
| 148 |
+
kernel_size=3,
|
| 149 |
+
padding=1,
|
| 150 |
+
conv_cfg=self.conv_cfg,
|
| 151 |
+
norm_cfg=self.norm_cfg,
|
| 152 |
+
act_cfg=self.act_cfg)
|
| 153 |
+
|
| 154 |
+
def forward(self, inputs):
|
| 155 |
+
"""Forward function."""
|
| 156 |
+
x = self._transform_inputs(inputs)
|
| 157 |
+
feats = self.ema_in_conv(x)
|
| 158 |
+
identity = feats
|
| 159 |
+
feats = self.ema_mid_conv(feats)
|
| 160 |
+
recon = self.ema_module(feats)
|
| 161 |
+
recon = F.relu(recon, inplace=True)
|
| 162 |
+
recon = self.ema_out_conv(recon)
|
| 163 |
+
output = F.relu(identity + recon, inplace=True)
|
| 164 |
+
output = self.bottleneck(output)
|
| 165 |
+
if self.concat_input:
|
| 166 |
+
output = self.conv_cat(torch.cat([x, output], dim=1))
|
| 167 |
+
output = self.cls_seg(output)
|
| 168 |
+
return output
|
RAVE-main/annotator/mmpkg/mmseg/models/decode_heads/enc_head.py
ADDED
|
@@ -0,0 +1,187 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import torch.nn as nn
|
| 3 |
+
import torch.nn.functional as F
|
| 4 |
+
from annotator.mmpkg.mmcv.cnn import ConvModule, build_norm_layer
|
| 5 |
+
|
| 6 |
+
from annotator.mmpkg.mmseg.ops import Encoding, resize
|
| 7 |
+
from ..builder import HEADS, build_loss
|
| 8 |
+
from .decode_head import BaseDecodeHead
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
class EncModule(nn.Module):
|
| 12 |
+
"""Encoding Module used in EncNet.
|
| 13 |
+
|
| 14 |
+
Args:
|
| 15 |
+
in_channels (int): Input channels.
|
| 16 |
+
num_codes (int): Number of code words.
|
| 17 |
+
conv_cfg (dict|None): Config of conv layers.
|
| 18 |
+
norm_cfg (dict|None): Config of norm layers.
|
| 19 |
+
act_cfg (dict): Config of activation layers.
|
| 20 |
+
"""
|
| 21 |
+
|
| 22 |
+
def __init__(self, in_channels, num_codes, conv_cfg, norm_cfg, act_cfg):
|
| 23 |
+
super(EncModule, self).__init__()
|
| 24 |
+
self.encoding_project = ConvModule(
|
| 25 |
+
in_channels,
|
| 26 |
+
in_channels,
|
| 27 |
+
1,
|
| 28 |
+
conv_cfg=conv_cfg,
|
| 29 |
+
norm_cfg=norm_cfg,
|
| 30 |
+
act_cfg=act_cfg)
|
| 31 |
+
# TODO: resolve this hack
|
| 32 |
+
# change to 1d
|
| 33 |
+
if norm_cfg is not None:
|
| 34 |
+
encoding_norm_cfg = norm_cfg.copy()
|
| 35 |
+
if encoding_norm_cfg['type'] in ['BN', 'IN']:
|
| 36 |
+
encoding_norm_cfg['type'] += '1d'
|
| 37 |
+
else:
|
| 38 |
+
encoding_norm_cfg['type'] = encoding_norm_cfg['type'].replace(
|
| 39 |
+
'2d', '1d')
|
| 40 |
+
else:
|
| 41 |
+
# fallback to BN1d
|
| 42 |
+
encoding_norm_cfg = dict(type='BN1d')
|
| 43 |
+
self.encoding = nn.Sequential(
|
| 44 |
+
Encoding(channels=in_channels, num_codes=num_codes),
|
| 45 |
+
build_norm_layer(encoding_norm_cfg, num_codes)[1],
|
| 46 |
+
nn.ReLU(inplace=True))
|
| 47 |
+
self.fc = nn.Sequential(
|
| 48 |
+
nn.Linear(in_channels, in_channels), nn.Sigmoid())
|
| 49 |
+
|
| 50 |
+
def forward(self, x):
|
| 51 |
+
"""Forward function."""
|
| 52 |
+
encoding_projection = self.encoding_project(x)
|
| 53 |
+
encoding_feat = self.encoding(encoding_projection).mean(dim=1)
|
| 54 |
+
batch_size, channels, _, _ = x.size()
|
| 55 |
+
gamma = self.fc(encoding_feat)
|
| 56 |
+
y = gamma.view(batch_size, channels, 1, 1)
|
| 57 |
+
output = F.relu_(x + x * y)
|
| 58 |
+
return encoding_feat, output
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
@HEADS.register_module()
|
| 62 |
+
class EncHead(BaseDecodeHead):
|
| 63 |
+
"""Context Encoding for Semantic Segmentation.
|
| 64 |
+
|
| 65 |
+
This head is the implementation of `EncNet
|
| 66 |
+
<https://arxiv.org/abs/1803.08904>`_.
|
| 67 |
+
|
| 68 |
+
Args:
|
| 69 |
+
num_codes (int): Number of code words. Default: 32.
|
| 70 |
+
use_se_loss (bool): Whether use Semantic Encoding Loss (SE-loss) to
|
| 71 |
+
regularize the training. Default: True.
|
| 72 |
+
add_lateral (bool): Whether use lateral connection to fuse features.
|
| 73 |
+
Default: False.
|
| 74 |
+
loss_se_decode (dict): Config of decode loss.
|
| 75 |
+
Default: dict(type='CrossEntropyLoss', use_sigmoid=True).
|
| 76 |
+
"""
|
| 77 |
+
|
| 78 |
+
def __init__(self,
|
| 79 |
+
num_codes=32,
|
| 80 |
+
use_se_loss=True,
|
| 81 |
+
add_lateral=False,
|
| 82 |
+
loss_se_decode=dict(
|
| 83 |
+
type='CrossEntropyLoss',
|
| 84 |
+
use_sigmoid=True,
|
| 85 |
+
loss_weight=0.2),
|
| 86 |
+
**kwargs):
|
| 87 |
+
super(EncHead, self).__init__(
|
| 88 |
+
input_transform='multiple_select', **kwargs)
|
| 89 |
+
self.use_se_loss = use_se_loss
|
| 90 |
+
self.add_lateral = add_lateral
|
| 91 |
+
self.num_codes = num_codes
|
| 92 |
+
self.bottleneck = ConvModule(
|
| 93 |
+
self.in_channels[-1],
|
| 94 |
+
self.channels,
|
| 95 |
+
3,
|
| 96 |
+
padding=1,
|
| 97 |
+
conv_cfg=self.conv_cfg,
|
| 98 |
+
norm_cfg=self.norm_cfg,
|
| 99 |
+
act_cfg=self.act_cfg)
|
| 100 |
+
if add_lateral:
|
| 101 |
+
self.lateral_convs = nn.ModuleList()
|
| 102 |
+
for in_channels in self.in_channels[:-1]: # skip the last one
|
| 103 |
+
self.lateral_convs.append(
|
| 104 |
+
ConvModule(
|
| 105 |
+
in_channels,
|
| 106 |
+
self.channels,
|
| 107 |
+
1,
|
| 108 |
+
conv_cfg=self.conv_cfg,
|
| 109 |
+
norm_cfg=self.norm_cfg,
|
| 110 |
+
act_cfg=self.act_cfg))
|
| 111 |
+
self.fusion = ConvModule(
|
| 112 |
+
len(self.in_channels) * self.channels,
|
| 113 |
+
self.channels,
|
| 114 |
+
3,
|
| 115 |
+
padding=1,
|
| 116 |
+
conv_cfg=self.conv_cfg,
|
| 117 |
+
norm_cfg=self.norm_cfg,
|
| 118 |
+
act_cfg=self.act_cfg)
|
| 119 |
+
self.enc_module = EncModule(
|
| 120 |
+
self.channels,
|
| 121 |
+
num_codes=num_codes,
|
| 122 |
+
conv_cfg=self.conv_cfg,
|
| 123 |
+
norm_cfg=self.norm_cfg,
|
| 124 |
+
act_cfg=self.act_cfg)
|
| 125 |
+
if self.use_se_loss:
|
| 126 |
+
self.loss_se_decode = build_loss(loss_se_decode)
|
| 127 |
+
self.se_layer = nn.Linear(self.channels, self.num_classes)
|
| 128 |
+
|
| 129 |
+
def forward(self, inputs):
|
| 130 |
+
"""Forward function."""
|
| 131 |
+
inputs = self._transform_inputs(inputs)
|
| 132 |
+
feat = self.bottleneck(inputs[-1])
|
| 133 |
+
if self.add_lateral:
|
| 134 |
+
laterals = [
|
| 135 |
+
resize(
|
| 136 |
+
lateral_conv(inputs[i]),
|
| 137 |
+
size=feat.shape[2:],
|
| 138 |
+
mode='bilinear',
|
| 139 |
+
align_corners=self.align_corners)
|
| 140 |
+
for i, lateral_conv in enumerate(self.lateral_convs)
|
| 141 |
+
]
|
| 142 |
+
feat = self.fusion(torch.cat([feat, *laterals], 1))
|
| 143 |
+
encode_feat, output = self.enc_module(feat)
|
| 144 |
+
output = self.cls_seg(output)
|
| 145 |
+
if self.use_se_loss:
|
| 146 |
+
se_output = self.se_layer(encode_feat)
|
| 147 |
+
return output, se_output
|
| 148 |
+
else:
|
| 149 |
+
return output
|
| 150 |
+
|
| 151 |
+
def forward_test(self, inputs, img_metas, test_cfg):
|
| 152 |
+
"""Forward function for testing, ignore se_loss."""
|
| 153 |
+
if self.use_se_loss:
|
| 154 |
+
return self.forward(inputs)[0]
|
| 155 |
+
else:
|
| 156 |
+
return self.forward(inputs)
|
| 157 |
+
|
| 158 |
+
@staticmethod
|
| 159 |
+
def _convert_to_onehot_labels(seg_label, num_classes):
|
| 160 |
+
"""Convert segmentation label to onehot.
|
| 161 |
+
|
| 162 |
+
Args:
|
| 163 |
+
seg_label (Tensor): Segmentation label of shape (N, H, W).
|
| 164 |
+
num_classes (int): Number of classes.
|
| 165 |
+
|
| 166 |
+
Returns:
|
| 167 |
+
Tensor: Onehot labels of shape (N, num_classes).
|
| 168 |
+
"""
|
| 169 |
+
|
| 170 |
+
batch_size = seg_label.size(0)
|
| 171 |
+
onehot_labels = seg_label.new_zeros((batch_size, num_classes))
|
| 172 |
+
for i in range(batch_size):
|
| 173 |
+
hist = seg_label[i].float().histc(
|
| 174 |
+
bins=num_classes, min=0, max=num_classes - 1)
|
| 175 |
+
onehot_labels[i] = hist > 0
|
| 176 |
+
return onehot_labels
|
| 177 |
+
|
| 178 |
+
def losses(self, seg_logit, seg_label):
|
| 179 |
+
"""Compute segmentation and semantic encoding loss."""
|
| 180 |
+
seg_logit, se_seg_logit = seg_logit
|
| 181 |
+
loss = dict()
|
| 182 |
+
loss.update(super(EncHead, self).losses(seg_logit, seg_label))
|
| 183 |
+
se_loss = self.loss_se_decode(
|
| 184 |
+
se_seg_logit,
|
| 185 |
+
self._convert_to_onehot_labels(seg_label, self.num_classes))
|
| 186 |
+
loss['loss_se'] = se_loss
|
| 187 |
+
return loss
|
RAVE-main/annotator/mmpkg/mmseg/models/decode_heads/fcn_head.py
ADDED
|
@@ -0,0 +1,81 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import torch.nn as nn
|
| 3 |
+
from annotator.mmpkg.mmcv.cnn import ConvModule
|
| 4 |
+
|
| 5 |
+
from ..builder import HEADS
|
| 6 |
+
from .decode_head import BaseDecodeHead
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
@HEADS.register_module()
|
| 10 |
+
class FCNHead(BaseDecodeHead):
|
| 11 |
+
"""Fully Convolution Networks for Semantic Segmentation.
|
| 12 |
+
|
| 13 |
+
This head is implemented of `FCNNet <https://arxiv.org/abs/1411.4038>`_.
|
| 14 |
+
|
| 15 |
+
Args:
|
| 16 |
+
num_convs (int): Number of convs in the head. Default: 2.
|
| 17 |
+
kernel_size (int): The kernel size for convs in the head. Default: 3.
|
| 18 |
+
concat_input (bool): Whether concat the input and output of convs
|
| 19 |
+
before classification layer.
|
| 20 |
+
dilation (int): The dilation rate for convs in the head. Default: 1.
|
| 21 |
+
"""
|
| 22 |
+
|
| 23 |
+
def __init__(self,
|
| 24 |
+
num_convs=2,
|
| 25 |
+
kernel_size=3,
|
| 26 |
+
concat_input=True,
|
| 27 |
+
dilation=1,
|
| 28 |
+
**kwargs):
|
| 29 |
+
assert num_convs >= 0 and dilation > 0 and isinstance(dilation, int)
|
| 30 |
+
self.num_convs = num_convs
|
| 31 |
+
self.concat_input = concat_input
|
| 32 |
+
self.kernel_size = kernel_size
|
| 33 |
+
super(FCNHead, self).__init__(**kwargs)
|
| 34 |
+
if num_convs == 0:
|
| 35 |
+
assert self.in_channels == self.channels
|
| 36 |
+
|
| 37 |
+
conv_padding = (kernel_size // 2) * dilation
|
| 38 |
+
convs = []
|
| 39 |
+
convs.append(
|
| 40 |
+
ConvModule(
|
| 41 |
+
self.in_channels,
|
| 42 |
+
self.channels,
|
| 43 |
+
kernel_size=kernel_size,
|
| 44 |
+
padding=conv_padding,
|
| 45 |
+
dilation=dilation,
|
| 46 |
+
conv_cfg=self.conv_cfg,
|
| 47 |
+
norm_cfg=self.norm_cfg,
|
| 48 |
+
act_cfg=self.act_cfg))
|
| 49 |
+
for i in range(num_convs - 1):
|
| 50 |
+
convs.append(
|
| 51 |
+
ConvModule(
|
| 52 |
+
self.channels,
|
| 53 |
+
self.channels,
|
| 54 |
+
kernel_size=kernel_size,
|
| 55 |
+
padding=conv_padding,
|
| 56 |
+
dilation=dilation,
|
| 57 |
+
conv_cfg=self.conv_cfg,
|
| 58 |
+
norm_cfg=self.norm_cfg,
|
| 59 |
+
act_cfg=self.act_cfg))
|
| 60 |
+
if num_convs == 0:
|
| 61 |
+
self.convs = nn.Identity()
|
| 62 |
+
else:
|
| 63 |
+
self.convs = nn.Sequential(*convs)
|
| 64 |
+
if self.concat_input:
|
| 65 |
+
self.conv_cat = ConvModule(
|
| 66 |
+
self.in_channels + self.channels,
|
| 67 |
+
self.channels,
|
| 68 |
+
kernel_size=kernel_size,
|
| 69 |
+
padding=kernel_size // 2,
|
| 70 |
+
conv_cfg=self.conv_cfg,
|
| 71 |
+
norm_cfg=self.norm_cfg,
|
| 72 |
+
act_cfg=self.act_cfg)
|
| 73 |
+
|
| 74 |
+
def forward(self, inputs):
|
| 75 |
+
"""Forward function."""
|
| 76 |
+
x = self._transform_inputs(inputs)
|
| 77 |
+
output = self.convs(x)
|
| 78 |
+
if self.concat_input:
|
| 79 |
+
output = self.conv_cat(torch.cat([x, output], dim=1))
|
| 80 |
+
output = self.cls_seg(output)
|
| 81 |
+
return output
|
RAVE-main/annotator/mmpkg/mmseg/models/decode_heads/fpn_head.py
ADDED
|
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
import torch.nn as nn
|
| 3 |
+
from annotator.mmpkg.mmcv.cnn import ConvModule
|
| 4 |
+
|
| 5 |
+
from annotator.mmpkg.mmseg.ops import resize
|
| 6 |
+
from ..builder import HEADS
|
| 7 |
+
from .decode_head import BaseDecodeHead
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
@HEADS.register_module()
|
| 11 |
+
class FPNHead(BaseDecodeHead):
|
| 12 |
+
"""Panoptic Feature Pyramid Networks.
|
| 13 |
+
|
| 14 |
+
This head is the implementation of `Semantic FPN
|
| 15 |
+
<https://arxiv.org/abs/1901.02446>`_.
|
| 16 |
+
|
| 17 |
+
Args:
|
| 18 |
+
feature_strides (tuple[int]): The strides for input feature maps.
|
| 19 |
+
stack_lateral. All strides suppose to be power of 2. The first
|
| 20 |
+
one is of largest resolution.
|
| 21 |
+
"""
|
| 22 |
+
|
| 23 |
+
def __init__(self, feature_strides, **kwargs):
|
| 24 |
+
super(FPNHead, self).__init__(
|
| 25 |
+
input_transform='multiple_select', **kwargs)
|
| 26 |
+
assert len(feature_strides) == len(self.in_channels)
|
| 27 |
+
assert min(feature_strides) == feature_strides[0]
|
| 28 |
+
self.feature_strides = feature_strides
|
| 29 |
+
|
| 30 |
+
self.scale_heads = nn.ModuleList()
|
| 31 |
+
for i in range(len(feature_strides)):
|
| 32 |
+
head_length = max(
|
| 33 |
+
1,
|
| 34 |
+
int(np.log2(feature_strides[i]) - np.log2(feature_strides[0])))
|
| 35 |
+
scale_head = []
|
| 36 |
+
for k in range(head_length):
|
| 37 |
+
scale_head.append(
|
| 38 |
+
ConvModule(
|
| 39 |
+
self.in_channels[i] if k == 0 else self.channels,
|
| 40 |
+
self.channels,
|
| 41 |
+
3,
|
| 42 |
+
padding=1,
|
| 43 |
+
conv_cfg=self.conv_cfg,
|
| 44 |
+
norm_cfg=self.norm_cfg,
|
| 45 |
+
act_cfg=self.act_cfg))
|
| 46 |
+
if feature_strides[i] != feature_strides[0]:
|
| 47 |
+
scale_head.append(
|
| 48 |
+
nn.Upsample(
|
| 49 |
+
scale_factor=2,
|
| 50 |
+
mode='bilinear',
|
| 51 |
+
align_corners=self.align_corners))
|
| 52 |
+
self.scale_heads.append(nn.Sequential(*scale_head))
|
| 53 |
+
|
| 54 |
+
def forward(self, inputs):
|
| 55 |
+
|
| 56 |
+
x = self._transform_inputs(inputs)
|
| 57 |
+
|
| 58 |
+
output = self.scale_heads[0](x[0])
|
| 59 |
+
for i in range(1, len(self.feature_strides)):
|
| 60 |
+
# non inplace
|
| 61 |
+
output = output + resize(
|
| 62 |
+
self.scale_heads[i](x[i]),
|
| 63 |
+
size=output.shape[2:],
|
| 64 |
+
mode='bilinear',
|
| 65 |
+
align_corners=self.align_corners)
|
| 66 |
+
|
| 67 |
+
output = self.cls_seg(output)
|
| 68 |
+
return output
|
RAVE-main/annotator/mmpkg/mmseg/models/decode_heads/gc_head.py
ADDED
|
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
from annotator.mmpkg.mmcv.cnn import ContextBlock
|
| 3 |
+
|
| 4 |
+
from ..builder import HEADS
|
| 5 |
+
from .fcn_head import FCNHead
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
@HEADS.register_module()
|
| 9 |
+
class GCHead(FCNHead):
|
| 10 |
+
"""GCNet: Non-local Networks Meet Squeeze-Excitation Networks and Beyond.
|
| 11 |
+
|
| 12 |
+
This head is the implementation of `GCNet
|
| 13 |
+
<https://arxiv.org/abs/1904.11492>`_.
|
| 14 |
+
|
| 15 |
+
Args:
|
| 16 |
+
ratio (float): Multiplier of channels ratio. Default: 1/4.
|
| 17 |
+
pooling_type (str): The pooling type of context aggregation.
|
| 18 |
+
Options are 'att', 'avg'. Default: 'avg'.
|
| 19 |
+
fusion_types (tuple[str]): The fusion type for feature fusion.
|
| 20 |
+
Options are 'channel_add', 'channel_mul'. Default: ('channel_add',)
|
| 21 |
+
"""
|
| 22 |
+
|
| 23 |
+
def __init__(self,
|
| 24 |
+
ratio=1 / 4.,
|
| 25 |
+
pooling_type='att',
|
| 26 |
+
fusion_types=('channel_add', ),
|
| 27 |
+
**kwargs):
|
| 28 |
+
super(GCHead, self).__init__(num_convs=2, **kwargs)
|
| 29 |
+
self.ratio = ratio
|
| 30 |
+
self.pooling_type = pooling_type
|
| 31 |
+
self.fusion_types = fusion_types
|
| 32 |
+
self.gc_block = ContextBlock(
|
| 33 |
+
in_channels=self.channels,
|
| 34 |
+
ratio=self.ratio,
|
| 35 |
+
pooling_type=self.pooling_type,
|
| 36 |
+
fusion_types=self.fusion_types)
|
| 37 |
+
|
| 38 |
+
def forward(self, inputs):
|
| 39 |
+
"""Forward function."""
|
| 40 |
+
x = self._transform_inputs(inputs)
|
| 41 |
+
output = self.convs[0](x)
|
| 42 |
+
output = self.gc_block(output)
|
| 43 |
+
output = self.convs[1](output)
|
| 44 |
+
if self.concat_input:
|
| 45 |
+
output = self.conv_cat(torch.cat([x, output], dim=1))
|
| 46 |
+
output = self.cls_seg(output)
|
| 47 |
+
return output
|
RAVE-main/annotator/mmpkg/mmseg/models/decode_heads/lraspp_head.py
ADDED
|
@@ -0,0 +1,90 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import torch.nn as nn
|
| 3 |
+
from annotator.mmpkg.mmcv import is_tuple_of
|
| 4 |
+
from annotator.mmpkg.mmcv.cnn import ConvModule
|
| 5 |
+
|
| 6 |
+
from annotator.mmpkg.mmseg.ops import resize
|
| 7 |
+
from ..builder import HEADS
|
| 8 |
+
from .decode_head import BaseDecodeHead
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
@HEADS.register_module()
|
| 12 |
+
class LRASPPHead(BaseDecodeHead):
|
| 13 |
+
"""Lite R-ASPP (LRASPP) head is proposed in Searching for MobileNetV3.
|
| 14 |
+
|
| 15 |
+
This head is the improved implementation of `Searching for MobileNetV3
|
| 16 |
+
<https://ieeexplore.ieee.org/document/9008835>`_.
|
| 17 |
+
|
| 18 |
+
Args:
|
| 19 |
+
branch_channels (tuple[int]): The number of output channels in every
|
| 20 |
+
each branch. Default: (32, 64).
|
| 21 |
+
"""
|
| 22 |
+
|
| 23 |
+
def __init__(self, branch_channels=(32, 64), **kwargs):
|
| 24 |
+
super(LRASPPHead, self).__init__(**kwargs)
|
| 25 |
+
if self.input_transform != 'multiple_select':
|
| 26 |
+
raise ValueError('in Lite R-ASPP (LRASPP) head, input_transform '
|
| 27 |
+
f'must be \'multiple_select\'. But received '
|
| 28 |
+
f'\'{self.input_transform}\'')
|
| 29 |
+
assert is_tuple_of(branch_channels, int)
|
| 30 |
+
assert len(branch_channels) == len(self.in_channels) - 1
|
| 31 |
+
self.branch_channels = branch_channels
|
| 32 |
+
|
| 33 |
+
self.convs = nn.Sequential()
|
| 34 |
+
self.conv_ups = nn.Sequential()
|
| 35 |
+
for i in range(len(branch_channels)):
|
| 36 |
+
self.convs.add_module(
|
| 37 |
+
f'conv{i}',
|
| 38 |
+
nn.Conv2d(
|
| 39 |
+
self.in_channels[i], branch_channels[i], 1, bias=False))
|
| 40 |
+
self.conv_ups.add_module(
|
| 41 |
+
f'conv_up{i}',
|
| 42 |
+
ConvModule(
|
| 43 |
+
self.channels + branch_channels[i],
|
| 44 |
+
self.channels,
|
| 45 |
+
1,
|
| 46 |
+
norm_cfg=self.norm_cfg,
|
| 47 |
+
act_cfg=self.act_cfg,
|
| 48 |
+
bias=False))
|
| 49 |
+
|
| 50 |
+
self.conv_up_input = nn.Conv2d(self.channels, self.channels, 1)
|
| 51 |
+
|
| 52 |
+
self.aspp_conv = ConvModule(
|
| 53 |
+
self.in_channels[-1],
|
| 54 |
+
self.channels,
|
| 55 |
+
1,
|
| 56 |
+
norm_cfg=self.norm_cfg,
|
| 57 |
+
act_cfg=self.act_cfg,
|
| 58 |
+
bias=False)
|
| 59 |
+
self.image_pool = nn.Sequential(
|
| 60 |
+
nn.AvgPool2d(kernel_size=49, stride=(16, 20)),
|
| 61 |
+
ConvModule(
|
| 62 |
+
self.in_channels[2],
|
| 63 |
+
self.channels,
|
| 64 |
+
1,
|
| 65 |
+
act_cfg=dict(type='Sigmoid'),
|
| 66 |
+
bias=False))
|
| 67 |
+
|
| 68 |
+
def forward(self, inputs):
|
| 69 |
+
"""Forward function."""
|
| 70 |
+
inputs = self._transform_inputs(inputs)
|
| 71 |
+
|
| 72 |
+
x = inputs[-1]
|
| 73 |
+
|
| 74 |
+
x = self.aspp_conv(x) * resize(
|
| 75 |
+
self.image_pool(x),
|
| 76 |
+
size=x.size()[2:],
|
| 77 |
+
mode='bilinear',
|
| 78 |
+
align_corners=self.align_corners)
|
| 79 |
+
x = self.conv_up_input(x)
|
| 80 |
+
|
| 81 |
+
for i in range(len(self.branch_channels) - 1, -1, -1):
|
| 82 |
+
x = resize(
|
| 83 |
+
x,
|
| 84 |
+
size=inputs[i].size()[2:],
|
| 85 |
+
mode='bilinear',
|
| 86 |
+
align_corners=self.align_corners)
|
| 87 |
+
x = torch.cat([x, self.convs[i](inputs[i])], 1)
|
| 88 |
+
x = self.conv_ups[i](x)
|
| 89 |
+
|
| 90 |
+
return self.cls_seg(x)
|
RAVE-main/annotator/mmpkg/mmseg/models/decode_heads/nl_head.py
ADDED
|
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
from annotator.mmpkg.mmcv.cnn import NonLocal2d
|
| 3 |
+
|
| 4 |
+
from ..builder import HEADS
|
| 5 |
+
from .fcn_head import FCNHead
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
@HEADS.register_module()
|
| 9 |
+
class NLHead(FCNHead):
|
| 10 |
+
"""Non-local Neural Networks.
|
| 11 |
+
|
| 12 |
+
This head is the implementation of `NLNet
|
| 13 |
+
<https://arxiv.org/abs/1711.07971>`_.
|
| 14 |
+
|
| 15 |
+
Args:
|
| 16 |
+
reduction (int): Reduction factor of projection transform. Default: 2.
|
| 17 |
+
use_scale (bool): Whether to scale pairwise_weight by
|
| 18 |
+
sqrt(1/inter_channels). Default: True.
|
| 19 |
+
mode (str): The nonlocal mode. Options are 'embedded_gaussian',
|
| 20 |
+
'dot_product'. Default: 'embedded_gaussian.'.
|
| 21 |
+
"""
|
| 22 |
+
|
| 23 |
+
def __init__(self,
|
| 24 |
+
reduction=2,
|
| 25 |
+
use_scale=True,
|
| 26 |
+
mode='embedded_gaussian',
|
| 27 |
+
**kwargs):
|
| 28 |
+
super(NLHead, self).__init__(num_convs=2, **kwargs)
|
| 29 |
+
self.reduction = reduction
|
| 30 |
+
self.use_scale = use_scale
|
| 31 |
+
self.mode = mode
|
| 32 |
+
self.nl_block = NonLocal2d(
|
| 33 |
+
in_channels=self.channels,
|
| 34 |
+
reduction=self.reduction,
|
| 35 |
+
use_scale=self.use_scale,
|
| 36 |
+
conv_cfg=self.conv_cfg,
|
| 37 |
+
norm_cfg=self.norm_cfg,
|
| 38 |
+
mode=self.mode)
|
| 39 |
+
|
| 40 |
+
def forward(self, inputs):
|
| 41 |
+
"""Forward function."""
|
| 42 |
+
x = self._transform_inputs(inputs)
|
| 43 |
+
output = self.convs[0](x)
|
| 44 |
+
output = self.nl_block(output)
|
| 45 |
+
output = self.convs[1](output)
|
| 46 |
+
if self.concat_input:
|
| 47 |
+
output = self.conv_cat(torch.cat([x, output], dim=1))
|
| 48 |
+
output = self.cls_seg(output)
|
| 49 |
+
return output
|
RAVE-main/annotator/mmpkg/mmseg/models/decode_heads/ocr_head.py
ADDED
|
@@ -0,0 +1,127 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import torch.nn as nn
|
| 3 |
+
import torch.nn.functional as F
|
| 4 |
+
from annotator.mmpkg.mmcv.cnn import ConvModule
|
| 5 |
+
|
| 6 |
+
from annotator.mmpkg.mmseg.ops import resize
|
| 7 |
+
from ..builder import HEADS
|
| 8 |
+
from ..utils import SelfAttentionBlock as _SelfAttentionBlock
|
| 9 |
+
from .cascade_decode_head import BaseCascadeDecodeHead
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
class SpatialGatherModule(nn.Module):
|
| 13 |
+
"""Aggregate the context features according to the initial predicted
|
| 14 |
+
probability distribution.
|
| 15 |
+
|
| 16 |
+
Employ the soft-weighted method to aggregate the context.
|
| 17 |
+
"""
|
| 18 |
+
|
| 19 |
+
def __init__(self, scale):
|
| 20 |
+
super(SpatialGatherModule, self).__init__()
|
| 21 |
+
self.scale = scale
|
| 22 |
+
|
| 23 |
+
def forward(self, feats, probs):
|
| 24 |
+
"""Forward function."""
|
| 25 |
+
batch_size, num_classes, height, width = probs.size()
|
| 26 |
+
channels = feats.size(1)
|
| 27 |
+
probs = probs.view(batch_size, num_classes, -1)
|
| 28 |
+
feats = feats.view(batch_size, channels, -1)
|
| 29 |
+
# [batch_size, height*width, num_classes]
|
| 30 |
+
feats = feats.permute(0, 2, 1)
|
| 31 |
+
# [batch_size, channels, height*width]
|
| 32 |
+
probs = F.softmax(self.scale * probs, dim=2)
|
| 33 |
+
# [batch_size, channels, num_classes]
|
| 34 |
+
ocr_context = torch.matmul(probs, feats)
|
| 35 |
+
ocr_context = ocr_context.permute(0, 2, 1).contiguous().unsqueeze(3)
|
| 36 |
+
return ocr_context
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
class ObjectAttentionBlock(_SelfAttentionBlock):
|
| 40 |
+
"""Make a OCR used SelfAttentionBlock."""
|
| 41 |
+
|
| 42 |
+
def __init__(self, in_channels, channels, scale, conv_cfg, norm_cfg,
|
| 43 |
+
act_cfg):
|
| 44 |
+
if scale > 1:
|
| 45 |
+
query_downsample = nn.MaxPool2d(kernel_size=scale)
|
| 46 |
+
else:
|
| 47 |
+
query_downsample = None
|
| 48 |
+
super(ObjectAttentionBlock, self).__init__(
|
| 49 |
+
key_in_channels=in_channels,
|
| 50 |
+
query_in_channels=in_channels,
|
| 51 |
+
channels=channels,
|
| 52 |
+
out_channels=in_channels,
|
| 53 |
+
share_key_query=False,
|
| 54 |
+
query_downsample=query_downsample,
|
| 55 |
+
key_downsample=None,
|
| 56 |
+
key_query_num_convs=2,
|
| 57 |
+
key_query_norm=True,
|
| 58 |
+
value_out_num_convs=1,
|
| 59 |
+
value_out_norm=True,
|
| 60 |
+
matmul_norm=True,
|
| 61 |
+
with_out=True,
|
| 62 |
+
conv_cfg=conv_cfg,
|
| 63 |
+
norm_cfg=norm_cfg,
|
| 64 |
+
act_cfg=act_cfg)
|
| 65 |
+
self.bottleneck = ConvModule(
|
| 66 |
+
in_channels * 2,
|
| 67 |
+
in_channels,
|
| 68 |
+
1,
|
| 69 |
+
conv_cfg=self.conv_cfg,
|
| 70 |
+
norm_cfg=self.norm_cfg,
|
| 71 |
+
act_cfg=self.act_cfg)
|
| 72 |
+
|
| 73 |
+
def forward(self, query_feats, key_feats):
|
| 74 |
+
"""Forward function."""
|
| 75 |
+
context = super(ObjectAttentionBlock,
|
| 76 |
+
self).forward(query_feats, key_feats)
|
| 77 |
+
output = self.bottleneck(torch.cat([context, query_feats], dim=1))
|
| 78 |
+
if self.query_downsample is not None:
|
| 79 |
+
output = resize(query_feats)
|
| 80 |
+
|
| 81 |
+
return output
|
| 82 |
+
|
| 83 |
+
|
| 84 |
+
@HEADS.register_module()
|
| 85 |
+
class OCRHead(BaseCascadeDecodeHead):
|
| 86 |
+
"""Object-Contextual Representations for Semantic Segmentation.
|
| 87 |
+
|
| 88 |
+
This head is the implementation of `OCRNet
|
| 89 |
+
<https://arxiv.org/abs/1909.11065>`_.
|
| 90 |
+
|
| 91 |
+
Args:
|
| 92 |
+
ocr_channels (int): The intermediate channels of OCR block.
|
| 93 |
+
scale (int): The scale of probability map in SpatialGatherModule in
|
| 94 |
+
Default: 1.
|
| 95 |
+
"""
|
| 96 |
+
|
| 97 |
+
def __init__(self, ocr_channels, scale=1, **kwargs):
|
| 98 |
+
super(OCRHead, self).__init__(**kwargs)
|
| 99 |
+
self.ocr_channels = ocr_channels
|
| 100 |
+
self.scale = scale
|
| 101 |
+
self.object_context_block = ObjectAttentionBlock(
|
| 102 |
+
self.channels,
|
| 103 |
+
self.ocr_channels,
|
| 104 |
+
self.scale,
|
| 105 |
+
conv_cfg=self.conv_cfg,
|
| 106 |
+
norm_cfg=self.norm_cfg,
|
| 107 |
+
act_cfg=self.act_cfg)
|
| 108 |
+
self.spatial_gather_module = SpatialGatherModule(self.scale)
|
| 109 |
+
|
| 110 |
+
self.bottleneck = ConvModule(
|
| 111 |
+
self.in_channels,
|
| 112 |
+
self.channels,
|
| 113 |
+
3,
|
| 114 |
+
padding=1,
|
| 115 |
+
conv_cfg=self.conv_cfg,
|
| 116 |
+
norm_cfg=self.norm_cfg,
|
| 117 |
+
act_cfg=self.act_cfg)
|
| 118 |
+
|
| 119 |
+
def forward(self, inputs, prev_output):
|
| 120 |
+
"""Forward function."""
|
| 121 |
+
x = self._transform_inputs(inputs)
|
| 122 |
+
feats = self.bottleneck(x)
|
| 123 |
+
context = self.spatial_gather_module(feats, prev_output)
|
| 124 |
+
object_context = self.object_context_block(feats, context)
|
| 125 |
+
output = self.cls_seg(object_context)
|
| 126 |
+
|
| 127 |
+
return output
|