Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- FRESCO/src/ControlNet/annotator/uniformer/mmcv/cnn/bricks/conv2d_adaptive_padding.py +62 -0
- FRESCO/src/ControlNet/annotator/uniformer/mmcv/cnn/bricks/conv_module.py +206 -0
- FRESCO/src/ControlNet/annotator/uniformer/mmcv/cnn/bricks/conv_ws.py +148 -0
- FRESCO/src/ControlNet/annotator/uniformer/mmcv/cnn/bricks/depthwise_separable_conv_module.py +96 -0
- FRESCO/src/ControlNet/annotator/uniformer/mmcv/cnn/bricks/non_local.py +306 -0
- FRESCO/src/ControlNet/annotator/uniformer/mmcv/cnn/bricks/padding.py +36 -0
- FRESCO/src/ControlNet/annotator/uniformer/mmcv/cnn/bricks/registry.py +16 -0
- FRESCO/src/ControlNet/annotator/uniformer/mmcv/cnn/bricks/swish.py +25 -0
- FRESCO/src/ControlNet/annotator/uniformer/mmcv/cnn/bricks/transformer.py +595 -0
- FRESCO/src/ControlNet/annotator/uniformer/mmcv/cnn/bricks/upsample.py +84 -0
- FRESCO/src/ControlNet/annotator/uniformer/mmcv/cnn/bricks/wrappers.py +180 -0
- FRESCO/src/ControlNet/annotator/uniformer/mmcv/cnn/utils/flops_counter.py +599 -0
- FRESCO/src/ControlNet/annotator/uniformer/mmcv/runner/base_module.py +195 -0
- FRESCO/src/ControlNet/annotator/uniformer/mmcv/runner/checkpoint.py +707 -0
- FRESCO/src/ControlNet/annotator/uniformer/mmcv/runner/hooks/__init__.py +29 -0
- FRESCO/src/ControlNet/annotator/uniformer/mmcv/runner/hooks/checkpoint.py +167 -0
- FRESCO/src/ControlNet/annotator/uniformer/mmcv/runner/hooks/closure.py +11 -0
- FRESCO/src/ControlNet/annotator/uniformer/mmcv/runner/hooks/ema.py +89 -0
- FRESCO/src/ControlNet/annotator/uniformer/mmcv/runner/hooks/evaluation.py +509 -0
- FRESCO/src/ControlNet/annotator/uniformer/mmcv/runner/hooks/hook.py +92 -0
- FRESCO/src/ControlNet/annotator/uniformer/mmcv/runner/hooks/iter_timer.py +18 -0
- FRESCO/src/ControlNet/annotator/uniformer/mmcv/runner/hooks/logger/__init__.py +15 -0
- FRESCO/src/ControlNet/annotator/uniformer/mmcv/runner/hooks/logger/base.py +166 -0
- FRESCO/src/ControlNet/annotator/uniformer/mmcv/runner/hooks/logger/dvclive.py +58 -0
- FRESCO/src/ControlNet/annotator/uniformer/mmcv/runner/hooks/logger/mlflow.py +78 -0
- FRESCO/src/ControlNet/annotator/uniformer/mmcv/runner/hooks/logger/neptune.py +82 -0
- FRESCO/src/ControlNet/annotator/uniformer/mmcv/runner/hooks/logger/pavi.py +117 -0
- FRESCO/src/ControlNet/annotator/uniformer/mmcv/runner/hooks/logger/tensorboard.py +57 -0
- FRESCO/src/ControlNet/annotator/uniformer/mmcv/runner/hooks/logger/text.py +256 -0
- FRESCO/src/ControlNet/annotator/uniformer/mmcv/runner/hooks/logger/wandb.py +56 -0
- FRESCO/src/ControlNet/annotator/uniformer/mmcv/runner/hooks/lr_updater.py +670 -0
- FRESCO/src/ControlNet/annotator/uniformer/mmcv/runner/hooks/memory.py +25 -0
- FRESCO/src/ControlNet/annotator/uniformer/mmcv/runner/hooks/momentum_updater.py +493 -0
- FRESCO/src/ControlNet/annotator/uniformer/mmcv/runner/hooks/optimizer.py +508 -0
- FRESCO/src/ControlNet/annotator/uniformer/mmcv/runner/hooks/profiler.py +180 -0
- FRESCO/src/ControlNet/annotator/uniformer/mmcv/runner/hooks/sampler_seed.py +20 -0
- FRESCO/src/ControlNet/annotator/uniformer/mmcv/runner/hooks/sync_buffer.py +22 -0
- FRESCO/src/ControlNet/annotator/uniformer/mmcv/runner/priority.py +60 -0
- FRESCO/src/ControlNet/annotator/uniformer/mmseg/models/__init__.py +12 -0
- FRESCO/src/ControlNet/annotator/uniformer/mmseg/models/builder.py +46 -0
- FRESCO/src/ControlNet/annotator/uniformer/mmseg/models/decode_heads/__init__.py +28 -0
- FRESCO/src/ControlNet/annotator/uniformer/mmseg/models/decode_heads/ann_head.py +245 -0
- FRESCO/src/ControlNet/annotator/uniformer/mmseg/models/decode_heads/apc_head.py +158 -0
- FRESCO/src/ControlNet/annotator/uniformer/mmseg/models/decode_heads/aspp_head.py +107 -0
- FRESCO/src/ControlNet/annotator/uniformer/mmseg/models/decode_heads/cascade_decode_head.py +57 -0
- FRESCO/src/ControlNet/annotator/uniformer/mmseg/models/decode_heads/cc_head.py +42 -0
- FRESCO/src/ControlNet/annotator/uniformer/mmseg/models/decode_heads/da_head.py +178 -0
- FRESCO/src/ControlNet/annotator/uniformer/mmseg/models/decode_heads/decode_head.py +234 -0
- FRESCO/src/ControlNet/annotator/uniformer/mmseg/models/decode_heads/dm_head.py +140 -0
- FRESCO/src/ControlNet/annotator/uniformer/mmseg/models/decode_heads/dnl_head.py +131 -0
FRESCO/src/ControlNet/annotator/uniformer/mmcv/cnn/bricks/conv2d_adaptive_padding.py
ADDED
|
@@ -0,0 +1,62 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
import math
|
| 3 |
+
|
| 4 |
+
from torch import nn
|
| 5 |
+
from torch.nn import functional as F
|
| 6 |
+
|
| 7 |
+
from .registry import CONV_LAYERS
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
@CONV_LAYERS.register_module()
|
| 11 |
+
class Conv2dAdaptivePadding(nn.Conv2d):
|
| 12 |
+
"""Implementation of 2D convolution in tensorflow with `padding` as "same",
|
| 13 |
+
which applies padding to input (if needed) so that input image gets fully
|
| 14 |
+
covered by filter and stride you specified. For stride 1, this will ensure
|
| 15 |
+
that output image size is same as input. For stride of 2, output dimensions
|
| 16 |
+
will be half, for example.
|
| 17 |
+
|
| 18 |
+
Args:
|
| 19 |
+
in_channels (int): Number of channels in the input image
|
| 20 |
+
out_channels (int): Number of channels produced by the convolution
|
| 21 |
+
kernel_size (int or tuple): Size of the convolving kernel
|
| 22 |
+
stride (int or tuple, optional): Stride of the convolution. Default: 1
|
| 23 |
+
padding (int or tuple, optional): Zero-padding added to both sides of
|
| 24 |
+
the input. Default: 0
|
| 25 |
+
dilation (int or tuple, optional): Spacing between kernel elements.
|
| 26 |
+
Default: 1
|
| 27 |
+
groups (int, optional): Number of blocked connections from input
|
| 28 |
+
channels to output channels. Default: 1
|
| 29 |
+
bias (bool, optional): If ``True``, adds a learnable bias to the
|
| 30 |
+
output. Default: ``True``
|
| 31 |
+
"""
|
| 32 |
+
|
| 33 |
+
def __init__(self,
|
| 34 |
+
in_channels,
|
| 35 |
+
out_channels,
|
| 36 |
+
kernel_size,
|
| 37 |
+
stride=1,
|
| 38 |
+
padding=0,
|
| 39 |
+
dilation=1,
|
| 40 |
+
groups=1,
|
| 41 |
+
bias=True):
|
| 42 |
+
super().__init__(in_channels, out_channels, kernel_size, stride, 0,
|
| 43 |
+
dilation, groups, bias)
|
| 44 |
+
|
| 45 |
+
def forward(self, x):
|
| 46 |
+
img_h, img_w = x.size()[-2:]
|
| 47 |
+
kernel_h, kernel_w = self.weight.size()[-2:]
|
| 48 |
+
stride_h, stride_w = self.stride
|
| 49 |
+
output_h = math.ceil(img_h / stride_h)
|
| 50 |
+
output_w = math.ceil(img_w / stride_w)
|
| 51 |
+
pad_h = (
|
| 52 |
+
max((output_h - 1) * self.stride[0] +
|
| 53 |
+
(kernel_h - 1) * self.dilation[0] + 1 - img_h, 0))
|
| 54 |
+
pad_w = (
|
| 55 |
+
max((output_w - 1) * self.stride[1] +
|
| 56 |
+
(kernel_w - 1) * self.dilation[1] + 1 - img_w, 0))
|
| 57 |
+
if pad_h > 0 or pad_w > 0:
|
| 58 |
+
x = F.pad(x, [
|
| 59 |
+
pad_w // 2, pad_w - pad_w // 2, pad_h // 2, pad_h - pad_h // 2
|
| 60 |
+
])
|
| 61 |
+
return F.conv2d(x, self.weight, self.bias, self.stride, self.padding,
|
| 62 |
+
self.dilation, self.groups)
|
FRESCO/src/ControlNet/annotator/uniformer/mmcv/cnn/bricks/conv_module.py
ADDED
|
@@ -0,0 +1,206 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
import warnings
|
| 3 |
+
|
| 4 |
+
import torch.nn as nn
|
| 5 |
+
|
| 6 |
+
from annotator.uniformer.mmcv.utils import _BatchNorm, _InstanceNorm
|
| 7 |
+
from ..utils import constant_init, kaiming_init
|
| 8 |
+
from .activation import build_activation_layer
|
| 9 |
+
from .conv import build_conv_layer
|
| 10 |
+
from .norm import build_norm_layer
|
| 11 |
+
from .padding import build_padding_layer
|
| 12 |
+
from .registry import PLUGIN_LAYERS
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
@PLUGIN_LAYERS.register_module()
|
| 16 |
+
class ConvModule(nn.Module):
|
| 17 |
+
"""A conv block that bundles conv/norm/activation layers.
|
| 18 |
+
|
| 19 |
+
This block simplifies the usage of convolution layers, which are commonly
|
| 20 |
+
used with a norm layer (e.g., BatchNorm) and activation layer (e.g., ReLU).
|
| 21 |
+
It is based upon three build methods: `build_conv_layer()`,
|
| 22 |
+
`build_norm_layer()` and `build_activation_layer()`.
|
| 23 |
+
|
| 24 |
+
Besides, we add some additional features in this module.
|
| 25 |
+
1. Automatically set `bias` of the conv layer.
|
| 26 |
+
2. Spectral norm is supported.
|
| 27 |
+
3. More padding modes are supported. Before PyTorch 1.5, nn.Conv2d only
|
| 28 |
+
supports zero and circular padding, and we add "reflect" padding mode.
|
| 29 |
+
|
| 30 |
+
Args:
|
| 31 |
+
in_channels (int): Number of channels in the input feature map.
|
| 32 |
+
Same as that in ``nn._ConvNd``.
|
| 33 |
+
out_channels (int): Number of channels produced by the convolution.
|
| 34 |
+
Same as that in ``nn._ConvNd``.
|
| 35 |
+
kernel_size (int | tuple[int]): Size of the convolving kernel.
|
| 36 |
+
Same as that in ``nn._ConvNd``.
|
| 37 |
+
stride (int | tuple[int]): Stride of the convolution.
|
| 38 |
+
Same as that in ``nn._ConvNd``.
|
| 39 |
+
padding (int | tuple[int]): Zero-padding added to both sides of
|
| 40 |
+
the input. Same as that in ``nn._ConvNd``.
|
| 41 |
+
dilation (int | tuple[int]): Spacing between kernel elements.
|
| 42 |
+
Same as that in ``nn._ConvNd``.
|
| 43 |
+
groups (int): Number of blocked connections from input channels to
|
| 44 |
+
output channels. Same as that in ``nn._ConvNd``.
|
| 45 |
+
bias (bool | str): If specified as `auto`, it will be decided by the
|
| 46 |
+
norm_cfg. Bias will be set as True if `norm_cfg` is None, otherwise
|
| 47 |
+
False. Default: "auto".
|
| 48 |
+
conv_cfg (dict): Config dict for convolution layer. Default: None,
|
| 49 |
+
which means using conv2d.
|
| 50 |
+
norm_cfg (dict): Config dict for normalization layer. Default: None.
|
| 51 |
+
act_cfg (dict): Config dict for activation layer.
|
| 52 |
+
Default: dict(type='ReLU').
|
| 53 |
+
inplace (bool): Whether to use inplace mode for activation.
|
| 54 |
+
Default: True.
|
| 55 |
+
with_spectral_norm (bool): Whether use spectral norm in conv module.
|
| 56 |
+
Default: False.
|
| 57 |
+
padding_mode (str): If the `padding_mode` has not been supported by
|
| 58 |
+
current `Conv2d` in PyTorch, we will use our own padding layer
|
| 59 |
+
instead. Currently, we support ['zeros', 'circular'] with official
|
| 60 |
+
implementation and ['reflect'] with our own implementation.
|
| 61 |
+
Default: 'zeros'.
|
| 62 |
+
order (tuple[str]): The order of conv/norm/activation layers. It is a
|
| 63 |
+
sequence of "conv", "norm" and "act". Common examples are
|
| 64 |
+
("conv", "norm", "act") and ("act", "conv", "norm").
|
| 65 |
+
Default: ('conv', 'norm', 'act').
|
| 66 |
+
"""
|
| 67 |
+
|
| 68 |
+
_abbr_ = 'conv_block'
|
| 69 |
+
|
| 70 |
+
def __init__(self,
|
| 71 |
+
in_channels,
|
| 72 |
+
out_channels,
|
| 73 |
+
kernel_size,
|
| 74 |
+
stride=1,
|
| 75 |
+
padding=0,
|
| 76 |
+
dilation=1,
|
| 77 |
+
groups=1,
|
| 78 |
+
bias='auto',
|
| 79 |
+
conv_cfg=None,
|
| 80 |
+
norm_cfg=None,
|
| 81 |
+
act_cfg=dict(type='ReLU'),
|
| 82 |
+
inplace=True,
|
| 83 |
+
with_spectral_norm=False,
|
| 84 |
+
padding_mode='zeros',
|
| 85 |
+
order=('conv', 'norm', 'act')):
|
| 86 |
+
super(ConvModule, self).__init__()
|
| 87 |
+
assert conv_cfg is None or isinstance(conv_cfg, dict)
|
| 88 |
+
assert norm_cfg is None or isinstance(norm_cfg, dict)
|
| 89 |
+
assert act_cfg is None or isinstance(act_cfg, dict)
|
| 90 |
+
official_padding_mode = ['zeros', 'circular']
|
| 91 |
+
self.conv_cfg = conv_cfg
|
| 92 |
+
self.norm_cfg = norm_cfg
|
| 93 |
+
self.act_cfg = act_cfg
|
| 94 |
+
self.inplace = inplace
|
| 95 |
+
self.with_spectral_norm = with_spectral_norm
|
| 96 |
+
self.with_explicit_padding = padding_mode not in official_padding_mode
|
| 97 |
+
self.order = order
|
| 98 |
+
assert isinstance(self.order, tuple) and len(self.order) == 3
|
| 99 |
+
assert set(order) == set(['conv', 'norm', 'act'])
|
| 100 |
+
|
| 101 |
+
self.with_norm = norm_cfg is not None
|
| 102 |
+
self.with_activation = act_cfg is not None
|
| 103 |
+
# if the conv layer is before a norm layer, bias is unnecessary.
|
| 104 |
+
if bias == 'auto':
|
| 105 |
+
bias = not self.with_norm
|
| 106 |
+
self.with_bias = bias
|
| 107 |
+
|
| 108 |
+
if self.with_explicit_padding:
|
| 109 |
+
pad_cfg = dict(type=padding_mode)
|
| 110 |
+
self.padding_layer = build_padding_layer(pad_cfg, padding)
|
| 111 |
+
|
| 112 |
+
# reset padding to 0 for conv module
|
| 113 |
+
conv_padding = 0 if self.with_explicit_padding else padding
|
| 114 |
+
# build convolution layer
|
| 115 |
+
self.conv = build_conv_layer(
|
| 116 |
+
conv_cfg,
|
| 117 |
+
in_channels,
|
| 118 |
+
out_channels,
|
| 119 |
+
kernel_size,
|
| 120 |
+
stride=stride,
|
| 121 |
+
padding=conv_padding,
|
| 122 |
+
dilation=dilation,
|
| 123 |
+
groups=groups,
|
| 124 |
+
bias=bias)
|
| 125 |
+
# export the attributes of self.conv to a higher level for convenience
|
| 126 |
+
self.in_channels = self.conv.in_channels
|
| 127 |
+
self.out_channels = self.conv.out_channels
|
| 128 |
+
self.kernel_size = self.conv.kernel_size
|
| 129 |
+
self.stride = self.conv.stride
|
| 130 |
+
self.padding = padding
|
| 131 |
+
self.dilation = self.conv.dilation
|
| 132 |
+
self.transposed = self.conv.transposed
|
| 133 |
+
self.output_padding = self.conv.output_padding
|
| 134 |
+
self.groups = self.conv.groups
|
| 135 |
+
|
| 136 |
+
if self.with_spectral_norm:
|
| 137 |
+
self.conv = nn.utils.spectral_norm(self.conv)
|
| 138 |
+
|
| 139 |
+
# build normalization layers
|
| 140 |
+
if self.with_norm:
|
| 141 |
+
# norm layer is after conv layer
|
| 142 |
+
if order.index('norm') > order.index('conv'):
|
| 143 |
+
norm_channels = out_channels
|
| 144 |
+
else:
|
| 145 |
+
norm_channels = in_channels
|
| 146 |
+
self.norm_name, norm = build_norm_layer(norm_cfg, norm_channels)
|
| 147 |
+
self.add_module(self.norm_name, norm)
|
| 148 |
+
if self.with_bias:
|
| 149 |
+
if isinstance(norm, (_BatchNorm, _InstanceNorm)):
|
| 150 |
+
warnings.warn(
|
| 151 |
+
'Unnecessary conv bias before batch/instance norm')
|
| 152 |
+
else:
|
| 153 |
+
self.norm_name = None
|
| 154 |
+
|
| 155 |
+
# build activation layer
|
| 156 |
+
if self.with_activation:
|
| 157 |
+
act_cfg_ = act_cfg.copy()
|
| 158 |
+
# nn.Tanh has no 'inplace' argument
|
| 159 |
+
if act_cfg_['type'] not in [
|
| 160 |
+
'Tanh', 'PReLU', 'Sigmoid', 'HSigmoid', 'Swish'
|
| 161 |
+
]:
|
| 162 |
+
act_cfg_.setdefault('inplace', inplace)
|
| 163 |
+
self.activate = build_activation_layer(act_cfg_)
|
| 164 |
+
|
| 165 |
+
# Use msra init by default
|
| 166 |
+
self.init_weights()
|
| 167 |
+
|
| 168 |
+
@property
|
| 169 |
+
def norm(self):
|
| 170 |
+
if self.norm_name:
|
| 171 |
+
return getattr(self, self.norm_name)
|
| 172 |
+
else:
|
| 173 |
+
return None
|
| 174 |
+
|
| 175 |
+
def init_weights(self):
|
| 176 |
+
# 1. It is mainly for customized conv layers with their own
|
| 177 |
+
# initialization manners by calling their own ``init_weights()``,
|
| 178 |
+
# and we do not want ConvModule to override the initialization.
|
| 179 |
+
# 2. For customized conv layers without their own initialization
|
| 180 |
+
# manners (that is, they don't have their own ``init_weights()``)
|
| 181 |
+
# and PyTorch's conv layers, they will be initialized by
|
| 182 |
+
# this method with default ``kaiming_init``.
|
| 183 |
+
# Note: For PyTorch's conv layers, they will be overwritten by our
|
| 184 |
+
# initialization implementation using default ``kaiming_init``.
|
| 185 |
+
if not hasattr(self.conv, 'init_weights'):
|
| 186 |
+
if self.with_activation and self.act_cfg['type'] == 'LeakyReLU':
|
| 187 |
+
nonlinearity = 'leaky_relu'
|
| 188 |
+
a = self.act_cfg.get('negative_slope', 0.01)
|
| 189 |
+
else:
|
| 190 |
+
nonlinearity = 'relu'
|
| 191 |
+
a = 0
|
| 192 |
+
kaiming_init(self.conv, a=a, nonlinearity=nonlinearity)
|
| 193 |
+
if self.with_norm:
|
| 194 |
+
constant_init(self.norm, 1, bias=0)
|
| 195 |
+
|
| 196 |
+
def forward(self, x, activate=True, norm=True):
|
| 197 |
+
for layer in self.order:
|
| 198 |
+
if layer == 'conv':
|
| 199 |
+
if self.with_explicit_padding:
|
| 200 |
+
x = self.padding_layer(x)
|
| 201 |
+
x = self.conv(x)
|
| 202 |
+
elif layer == 'norm' and norm and self.with_norm:
|
| 203 |
+
x = self.norm(x)
|
| 204 |
+
elif layer == 'act' and activate and self.with_activation:
|
| 205 |
+
x = self.activate(x)
|
| 206 |
+
return x
|
FRESCO/src/ControlNet/annotator/uniformer/mmcv/cnn/bricks/conv_ws.py
ADDED
|
@@ -0,0 +1,148 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
import torch
|
| 3 |
+
import torch.nn as nn
|
| 4 |
+
import torch.nn.functional as F
|
| 5 |
+
|
| 6 |
+
from .registry import CONV_LAYERS
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
def conv_ws_2d(input,
|
| 10 |
+
weight,
|
| 11 |
+
bias=None,
|
| 12 |
+
stride=1,
|
| 13 |
+
padding=0,
|
| 14 |
+
dilation=1,
|
| 15 |
+
groups=1,
|
| 16 |
+
eps=1e-5):
|
| 17 |
+
c_in = weight.size(0)
|
| 18 |
+
weight_flat = weight.view(c_in, -1)
|
| 19 |
+
mean = weight_flat.mean(dim=1, keepdim=True).view(c_in, 1, 1, 1)
|
| 20 |
+
std = weight_flat.std(dim=1, keepdim=True).view(c_in, 1, 1, 1)
|
| 21 |
+
weight = (weight - mean) / (std + eps)
|
| 22 |
+
return F.conv2d(input, weight, bias, stride, padding, dilation, groups)
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
@CONV_LAYERS.register_module('ConvWS')
|
| 26 |
+
class ConvWS2d(nn.Conv2d):
|
| 27 |
+
|
| 28 |
+
def __init__(self,
|
| 29 |
+
in_channels,
|
| 30 |
+
out_channels,
|
| 31 |
+
kernel_size,
|
| 32 |
+
stride=1,
|
| 33 |
+
padding=0,
|
| 34 |
+
dilation=1,
|
| 35 |
+
groups=1,
|
| 36 |
+
bias=True,
|
| 37 |
+
eps=1e-5):
|
| 38 |
+
super(ConvWS2d, self).__init__(
|
| 39 |
+
in_channels,
|
| 40 |
+
out_channels,
|
| 41 |
+
kernel_size,
|
| 42 |
+
stride=stride,
|
| 43 |
+
padding=padding,
|
| 44 |
+
dilation=dilation,
|
| 45 |
+
groups=groups,
|
| 46 |
+
bias=bias)
|
| 47 |
+
self.eps = eps
|
| 48 |
+
|
| 49 |
+
def forward(self, x):
|
| 50 |
+
return conv_ws_2d(x, self.weight, self.bias, self.stride, self.padding,
|
| 51 |
+
self.dilation, self.groups, self.eps)
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
@CONV_LAYERS.register_module(name='ConvAWS')
|
| 55 |
+
class ConvAWS2d(nn.Conv2d):
|
| 56 |
+
"""AWS (Adaptive Weight Standardization)
|
| 57 |
+
|
| 58 |
+
This is a variant of Weight Standardization
|
| 59 |
+
(https://arxiv.org/pdf/1903.10520.pdf)
|
| 60 |
+
It is used in DetectoRS to avoid NaN
|
| 61 |
+
(https://arxiv.org/pdf/2006.02334.pdf)
|
| 62 |
+
|
| 63 |
+
Args:
|
| 64 |
+
in_channels (int): Number of channels in the input image
|
| 65 |
+
out_channels (int): Number of channels produced by the convolution
|
| 66 |
+
kernel_size (int or tuple): Size of the conv kernel
|
| 67 |
+
stride (int or tuple, optional): Stride of the convolution. Default: 1
|
| 68 |
+
padding (int or tuple, optional): Zero-padding added to both sides of
|
| 69 |
+
the input. Default: 0
|
| 70 |
+
dilation (int or tuple, optional): Spacing between kernel elements.
|
| 71 |
+
Default: 1
|
| 72 |
+
groups (int, optional): Number of blocked connections from input
|
| 73 |
+
channels to output channels. Default: 1
|
| 74 |
+
bias (bool, optional): If set True, adds a learnable bias to the
|
| 75 |
+
output. Default: True
|
| 76 |
+
"""
|
| 77 |
+
|
| 78 |
+
def __init__(self,
|
| 79 |
+
in_channels,
|
| 80 |
+
out_channels,
|
| 81 |
+
kernel_size,
|
| 82 |
+
stride=1,
|
| 83 |
+
padding=0,
|
| 84 |
+
dilation=1,
|
| 85 |
+
groups=1,
|
| 86 |
+
bias=True):
|
| 87 |
+
super().__init__(
|
| 88 |
+
in_channels,
|
| 89 |
+
out_channels,
|
| 90 |
+
kernel_size,
|
| 91 |
+
stride=stride,
|
| 92 |
+
padding=padding,
|
| 93 |
+
dilation=dilation,
|
| 94 |
+
groups=groups,
|
| 95 |
+
bias=bias)
|
| 96 |
+
self.register_buffer('weight_gamma',
|
| 97 |
+
torch.ones(self.out_channels, 1, 1, 1))
|
| 98 |
+
self.register_buffer('weight_beta',
|
| 99 |
+
torch.zeros(self.out_channels, 1, 1, 1))
|
| 100 |
+
|
| 101 |
+
def _get_weight(self, weight):
|
| 102 |
+
weight_flat = weight.view(weight.size(0), -1)
|
| 103 |
+
mean = weight_flat.mean(dim=1).view(-1, 1, 1, 1)
|
| 104 |
+
std = torch.sqrt(weight_flat.var(dim=1) + 1e-5).view(-1, 1, 1, 1)
|
| 105 |
+
weight = (weight - mean) / std
|
| 106 |
+
weight = self.weight_gamma * weight + self.weight_beta
|
| 107 |
+
return weight
|
| 108 |
+
|
| 109 |
+
def forward(self, x):
|
| 110 |
+
weight = self._get_weight(self.weight)
|
| 111 |
+
return F.conv2d(x, weight, self.bias, self.stride, self.padding,
|
| 112 |
+
self.dilation, self.groups)
|
| 113 |
+
|
| 114 |
+
def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict,
|
| 115 |
+
missing_keys, unexpected_keys, error_msgs):
|
| 116 |
+
"""Override default load function.
|
| 117 |
+
|
| 118 |
+
AWS overrides the function _load_from_state_dict to recover
|
| 119 |
+
weight_gamma and weight_beta if they are missing. If weight_gamma and
|
| 120 |
+
weight_beta are found in the checkpoint, this function will return
|
| 121 |
+
after super()._load_from_state_dict. Otherwise, it will compute the
|
| 122 |
+
mean and std of the pretrained weights and store them in weight_beta
|
| 123 |
+
and weight_gamma.
|
| 124 |
+
"""
|
| 125 |
+
|
| 126 |
+
self.weight_gamma.data.fill_(-1)
|
| 127 |
+
local_missing_keys = []
|
| 128 |
+
super()._load_from_state_dict(state_dict, prefix, local_metadata,
|
| 129 |
+
strict, local_missing_keys,
|
| 130 |
+
unexpected_keys, error_msgs)
|
| 131 |
+
if self.weight_gamma.data.mean() > 0:
|
| 132 |
+
for k in local_missing_keys:
|
| 133 |
+
missing_keys.append(k)
|
| 134 |
+
return
|
| 135 |
+
weight = self.weight.data
|
| 136 |
+
weight_flat = weight.view(weight.size(0), -1)
|
| 137 |
+
mean = weight_flat.mean(dim=1).view(-1, 1, 1, 1)
|
| 138 |
+
std = torch.sqrt(weight_flat.var(dim=1) + 1e-5).view(-1, 1, 1, 1)
|
| 139 |
+
self.weight_beta.data.copy_(mean)
|
| 140 |
+
self.weight_gamma.data.copy_(std)
|
| 141 |
+
missing_gamma_beta = [
|
| 142 |
+
k for k in local_missing_keys
|
| 143 |
+
if k.endswith('weight_gamma') or k.endswith('weight_beta')
|
| 144 |
+
]
|
| 145 |
+
for k in missing_gamma_beta:
|
| 146 |
+
local_missing_keys.remove(k)
|
| 147 |
+
for k in local_missing_keys:
|
| 148 |
+
missing_keys.append(k)
|
FRESCO/src/ControlNet/annotator/uniformer/mmcv/cnn/bricks/depthwise_separable_conv_module.py
ADDED
|
@@ -0,0 +1,96 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
import torch.nn as nn
|
| 3 |
+
|
| 4 |
+
from .conv_module import ConvModule
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
class DepthwiseSeparableConvModule(nn.Module):
|
| 8 |
+
"""Depthwise separable convolution module.
|
| 9 |
+
|
| 10 |
+
See https://arxiv.org/pdf/1704.04861.pdf for details.
|
| 11 |
+
|
| 12 |
+
This module can replace a ConvModule with the conv block replaced by two
|
| 13 |
+
conv block: depthwise conv block and pointwise conv block. The depthwise
|
| 14 |
+
conv block contains depthwise-conv/norm/activation layers. The pointwise
|
| 15 |
+
conv block contains pointwise-conv/norm/activation layers. It should be
|
| 16 |
+
noted that there will be norm/activation layer in the depthwise conv block
|
| 17 |
+
if `norm_cfg` and `act_cfg` are specified.
|
| 18 |
+
|
| 19 |
+
Args:
|
| 20 |
+
in_channels (int): Number of channels in the input feature map.
|
| 21 |
+
Same as that in ``nn._ConvNd``.
|
| 22 |
+
out_channels (int): Number of channels produced by the convolution.
|
| 23 |
+
Same as that in ``nn._ConvNd``.
|
| 24 |
+
kernel_size (int | tuple[int]): Size of the convolving kernel.
|
| 25 |
+
Same as that in ``nn._ConvNd``.
|
| 26 |
+
stride (int | tuple[int]): Stride of the convolution.
|
| 27 |
+
Same as that in ``nn._ConvNd``. Default: 1.
|
| 28 |
+
padding (int | tuple[int]): Zero-padding added to both sides of
|
| 29 |
+
the input. Same as that in ``nn._ConvNd``. Default: 0.
|
| 30 |
+
dilation (int | tuple[int]): Spacing between kernel elements.
|
| 31 |
+
Same as that in ``nn._ConvNd``. Default: 1.
|
| 32 |
+
norm_cfg (dict): Default norm config for both depthwise ConvModule and
|
| 33 |
+
pointwise ConvModule. Default: None.
|
| 34 |
+
act_cfg (dict): Default activation config for both depthwise ConvModule
|
| 35 |
+
and pointwise ConvModule. Default: dict(type='ReLU').
|
| 36 |
+
dw_norm_cfg (dict): Norm config of depthwise ConvModule. If it is
|
| 37 |
+
'default', it will be the same as `norm_cfg`. Default: 'default'.
|
| 38 |
+
dw_act_cfg (dict): Activation config of depthwise ConvModule. If it is
|
| 39 |
+
'default', it will be the same as `act_cfg`. Default: 'default'.
|
| 40 |
+
pw_norm_cfg (dict): Norm config of pointwise ConvModule. If it is
|
| 41 |
+
'default', it will be the same as `norm_cfg`. Default: 'default'.
|
| 42 |
+
pw_act_cfg (dict): Activation config of pointwise ConvModule. If it is
|
| 43 |
+
'default', it will be the same as `act_cfg`. Default: 'default'.
|
| 44 |
+
kwargs (optional): Other shared arguments for depthwise and pointwise
|
| 45 |
+
ConvModule. See ConvModule for ref.
|
| 46 |
+
"""
|
| 47 |
+
|
| 48 |
+
def __init__(self,
|
| 49 |
+
in_channels,
|
| 50 |
+
out_channels,
|
| 51 |
+
kernel_size,
|
| 52 |
+
stride=1,
|
| 53 |
+
padding=0,
|
| 54 |
+
dilation=1,
|
| 55 |
+
norm_cfg=None,
|
| 56 |
+
act_cfg=dict(type='ReLU'),
|
| 57 |
+
dw_norm_cfg='default',
|
| 58 |
+
dw_act_cfg='default',
|
| 59 |
+
pw_norm_cfg='default',
|
| 60 |
+
pw_act_cfg='default',
|
| 61 |
+
**kwargs):
|
| 62 |
+
super(DepthwiseSeparableConvModule, self).__init__()
|
| 63 |
+
assert 'groups' not in kwargs, 'groups should not be specified'
|
| 64 |
+
|
| 65 |
+
# if norm/activation config of depthwise/pointwise ConvModule is not
|
| 66 |
+
# specified, use default config.
|
| 67 |
+
dw_norm_cfg = dw_norm_cfg if dw_norm_cfg != 'default' else norm_cfg
|
| 68 |
+
dw_act_cfg = dw_act_cfg if dw_act_cfg != 'default' else act_cfg
|
| 69 |
+
pw_norm_cfg = pw_norm_cfg if pw_norm_cfg != 'default' else norm_cfg
|
| 70 |
+
pw_act_cfg = pw_act_cfg if pw_act_cfg != 'default' else act_cfg
|
| 71 |
+
|
| 72 |
+
# depthwise convolution
|
| 73 |
+
self.depthwise_conv = ConvModule(
|
| 74 |
+
in_channels,
|
| 75 |
+
in_channels,
|
| 76 |
+
kernel_size,
|
| 77 |
+
stride=stride,
|
| 78 |
+
padding=padding,
|
| 79 |
+
dilation=dilation,
|
| 80 |
+
groups=in_channels,
|
| 81 |
+
norm_cfg=dw_norm_cfg,
|
| 82 |
+
act_cfg=dw_act_cfg,
|
| 83 |
+
**kwargs)
|
| 84 |
+
|
| 85 |
+
self.pointwise_conv = ConvModule(
|
| 86 |
+
in_channels,
|
| 87 |
+
out_channels,
|
| 88 |
+
1,
|
| 89 |
+
norm_cfg=pw_norm_cfg,
|
| 90 |
+
act_cfg=pw_act_cfg,
|
| 91 |
+
**kwargs)
|
| 92 |
+
|
| 93 |
+
def forward(self, x):
|
| 94 |
+
x = self.depthwise_conv(x)
|
| 95 |
+
x = self.pointwise_conv(x)
|
| 96 |
+
return x
|
FRESCO/src/ControlNet/annotator/uniformer/mmcv/cnn/bricks/non_local.py
ADDED
|
@@ -0,0 +1,306 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
from abc import ABCMeta
|
| 3 |
+
|
| 4 |
+
import torch
|
| 5 |
+
import torch.nn as nn
|
| 6 |
+
|
| 7 |
+
from ..utils import constant_init, normal_init
|
| 8 |
+
from .conv_module import ConvModule
|
| 9 |
+
from .registry import PLUGIN_LAYERS
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
class _NonLocalNd(nn.Module, metaclass=ABCMeta):
|
| 13 |
+
"""Basic Non-local module.
|
| 14 |
+
|
| 15 |
+
This module is proposed in
|
| 16 |
+
"Non-local Neural Networks"
|
| 17 |
+
Paper reference: https://arxiv.org/abs/1711.07971
|
| 18 |
+
Code reference: https://github.com/AlexHex7/Non-local_pytorch
|
| 19 |
+
|
| 20 |
+
Args:
|
| 21 |
+
in_channels (int): Channels of the input feature map.
|
| 22 |
+
reduction (int): Channel reduction ratio. Default: 2.
|
| 23 |
+
use_scale (bool): Whether to scale pairwise_weight by
|
| 24 |
+
`1/sqrt(inter_channels)` when the mode is `embedded_gaussian`.
|
| 25 |
+
Default: True.
|
| 26 |
+
conv_cfg (None | dict): The config dict for convolution layers.
|
| 27 |
+
If not specified, it will use `nn.Conv2d` for convolution layers.
|
| 28 |
+
Default: None.
|
| 29 |
+
norm_cfg (None | dict): The config dict for normalization layers.
|
| 30 |
+
Default: None. (This parameter is only applicable to conv_out.)
|
| 31 |
+
mode (str): Options are `gaussian`, `concatenation`,
|
| 32 |
+
`embedded_gaussian` and `dot_product`. Default: embedded_gaussian.
|
| 33 |
+
"""
|
| 34 |
+
|
| 35 |
+
def __init__(self,
|
| 36 |
+
in_channels,
|
| 37 |
+
reduction=2,
|
| 38 |
+
use_scale=True,
|
| 39 |
+
conv_cfg=None,
|
| 40 |
+
norm_cfg=None,
|
| 41 |
+
mode='embedded_gaussian',
|
| 42 |
+
**kwargs):
|
| 43 |
+
super(_NonLocalNd, self).__init__()
|
| 44 |
+
self.in_channels = in_channels
|
| 45 |
+
self.reduction = reduction
|
| 46 |
+
self.use_scale = use_scale
|
| 47 |
+
self.inter_channels = max(in_channels // reduction, 1)
|
| 48 |
+
self.mode = mode
|
| 49 |
+
|
| 50 |
+
if mode not in [
|
| 51 |
+
'gaussian', 'embedded_gaussian', 'dot_product', 'concatenation'
|
| 52 |
+
]:
|
| 53 |
+
raise ValueError("Mode should be in 'gaussian', 'concatenation', "
|
| 54 |
+
f"'embedded_gaussian' or 'dot_product', but got "
|
| 55 |
+
f'{mode} instead.')
|
| 56 |
+
|
| 57 |
+
# g, theta, phi are defaulted as `nn.ConvNd`.
|
| 58 |
+
# Here we use ConvModule for potential usage.
|
| 59 |
+
self.g = ConvModule(
|
| 60 |
+
self.in_channels,
|
| 61 |
+
self.inter_channels,
|
| 62 |
+
kernel_size=1,
|
| 63 |
+
conv_cfg=conv_cfg,
|
| 64 |
+
act_cfg=None)
|
| 65 |
+
self.conv_out = ConvModule(
|
| 66 |
+
self.inter_channels,
|
| 67 |
+
self.in_channels,
|
| 68 |
+
kernel_size=1,
|
| 69 |
+
conv_cfg=conv_cfg,
|
| 70 |
+
norm_cfg=norm_cfg,
|
| 71 |
+
act_cfg=None)
|
| 72 |
+
|
| 73 |
+
if self.mode != 'gaussian':
|
| 74 |
+
self.theta = ConvModule(
|
| 75 |
+
self.in_channels,
|
| 76 |
+
self.inter_channels,
|
| 77 |
+
kernel_size=1,
|
| 78 |
+
conv_cfg=conv_cfg,
|
| 79 |
+
act_cfg=None)
|
| 80 |
+
self.phi = ConvModule(
|
| 81 |
+
self.in_channels,
|
| 82 |
+
self.inter_channels,
|
| 83 |
+
kernel_size=1,
|
| 84 |
+
conv_cfg=conv_cfg,
|
| 85 |
+
act_cfg=None)
|
| 86 |
+
|
| 87 |
+
if self.mode == 'concatenation':
|
| 88 |
+
self.concat_project = ConvModule(
|
| 89 |
+
self.inter_channels * 2,
|
| 90 |
+
1,
|
| 91 |
+
kernel_size=1,
|
| 92 |
+
stride=1,
|
| 93 |
+
padding=0,
|
| 94 |
+
bias=False,
|
| 95 |
+
act_cfg=dict(type='ReLU'))
|
| 96 |
+
|
| 97 |
+
self.init_weights(**kwargs)
|
| 98 |
+
|
| 99 |
+
def init_weights(self, std=0.01, zeros_init=True):
|
| 100 |
+
if self.mode != 'gaussian':
|
| 101 |
+
for m in [self.g, self.theta, self.phi]:
|
| 102 |
+
normal_init(m.conv, std=std)
|
| 103 |
+
else:
|
| 104 |
+
normal_init(self.g.conv, std=std)
|
| 105 |
+
if zeros_init:
|
| 106 |
+
if self.conv_out.norm_cfg is None:
|
| 107 |
+
constant_init(self.conv_out.conv, 0)
|
| 108 |
+
else:
|
| 109 |
+
constant_init(self.conv_out.norm, 0)
|
| 110 |
+
else:
|
| 111 |
+
if self.conv_out.norm_cfg is None:
|
| 112 |
+
normal_init(self.conv_out.conv, std=std)
|
| 113 |
+
else:
|
| 114 |
+
normal_init(self.conv_out.norm, std=std)
|
| 115 |
+
|
| 116 |
+
def gaussian(self, theta_x, phi_x):
|
| 117 |
+
# NonLocal1d pairwise_weight: [N, H, H]
|
| 118 |
+
# NonLocal2d pairwise_weight: [N, HxW, HxW]
|
| 119 |
+
# NonLocal3d pairwise_weight: [N, TxHxW, TxHxW]
|
| 120 |
+
pairwise_weight = torch.matmul(theta_x, phi_x)
|
| 121 |
+
pairwise_weight = pairwise_weight.softmax(dim=-1)
|
| 122 |
+
return pairwise_weight
|
| 123 |
+
|
| 124 |
+
def embedded_gaussian(self, theta_x, phi_x):
|
| 125 |
+
# NonLocal1d pairwise_weight: [N, H, H]
|
| 126 |
+
# NonLocal2d pairwise_weight: [N, HxW, HxW]
|
| 127 |
+
# NonLocal3d pairwise_weight: [N, TxHxW, TxHxW]
|
| 128 |
+
pairwise_weight = torch.matmul(theta_x, phi_x)
|
| 129 |
+
if self.use_scale:
|
| 130 |
+
# theta_x.shape[-1] is `self.inter_channels`
|
| 131 |
+
pairwise_weight /= theta_x.shape[-1]**0.5
|
| 132 |
+
pairwise_weight = pairwise_weight.softmax(dim=-1)
|
| 133 |
+
return pairwise_weight
|
| 134 |
+
|
| 135 |
+
def dot_product(self, theta_x, phi_x):
|
| 136 |
+
# NonLocal1d pairwise_weight: [N, H, H]
|
| 137 |
+
# NonLocal2d pairwise_weight: [N, HxW, HxW]
|
| 138 |
+
# NonLocal3d pairwise_weight: [N, TxHxW, TxHxW]
|
| 139 |
+
pairwise_weight = torch.matmul(theta_x, phi_x)
|
| 140 |
+
pairwise_weight /= pairwise_weight.shape[-1]
|
| 141 |
+
return pairwise_weight
|
| 142 |
+
|
| 143 |
+
def concatenation(self, theta_x, phi_x):
|
| 144 |
+
# NonLocal1d pairwise_weight: [N, H, H]
|
| 145 |
+
# NonLocal2d pairwise_weight: [N, HxW, HxW]
|
| 146 |
+
# NonLocal3d pairwise_weight: [N, TxHxW, TxHxW]
|
| 147 |
+
h = theta_x.size(2)
|
| 148 |
+
w = phi_x.size(3)
|
| 149 |
+
theta_x = theta_x.repeat(1, 1, 1, w)
|
| 150 |
+
phi_x = phi_x.repeat(1, 1, h, 1)
|
| 151 |
+
|
| 152 |
+
concat_feature = torch.cat([theta_x, phi_x], dim=1)
|
| 153 |
+
pairwise_weight = self.concat_project(concat_feature)
|
| 154 |
+
n, _, h, w = pairwise_weight.size()
|
| 155 |
+
pairwise_weight = pairwise_weight.view(n, h, w)
|
| 156 |
+
pairwise_weight /= pairwise_weight.shape[-1]
|
| 157 |
+
|
| 158 |
+
return pairwise_weight
|
| 159 |
+
|
| 160 |
+
def forward(self, x):
|
| 161 |
+
# Assume `reduction = 1`, then `inter_channels = C`
|
| 162 |
+
# or `inter_channels = C` when `mode="gaussian"`
|
| 163 |
+
|
| 164 |
+
# NonLocal1d x: [N, C, H]
|
| 165 |
+
# NonLocal2d x: [N, C, H, W]
|
| 166 |
+
# NonLocal3d x: [N, C, T, H, W]
|
| 167 |
+
n = x.size(0)
|
| 168 |
+
|
| 169 |
+
# NonLocal1d g_x: [N, H, C]
|
| 170 |
+
# NonLocal2d g_x: [N, HxW, C]
|
| 171 |
+
# NonLocal3d g_x: [N, TxHxW, C]
|
| 172 |
+
g_x = self.g(x).view(n, self.inter_channels, -1)
|
| 173 |
+
g_x = g_x.permute(0, 2, 1)
|
| 174 |
+
|
| 175 |
+
# NonLocal1d theta_x: [N, H, C], phi_x: [N, C, H]
|
| 176 |
+
# NonLocal2d theta_x: [N, HxW, C], phi_x: [N, C, HxW]
|
| 177 |
+
# NonLocal3d theta_x: [N, TxHxW, C], phi_x: [N, C, TxHxW]
|
| 178 |
+
if self.mode == 'gaussian':
|
| 179 |
+
theta_x = x.view(n, self.in_channels, -1)
|
| 180 |
+
theta_x = theta_x.permute(0, 2, 1)
|
| 181 |
+
if self.sub_sample:
|
| 182 |
+
phi_x = self.phi(x).view(n, self.in_channels, -1)
|
| 183 |
+
else:
|
| 184 |
+
phi_x = x.view(n, self.in_channels, -1)
|
| 185 |
+
elif self.mode == 'concatenation':
|
| 186 |
+
theta_x = self.theta(x).view(n, self.inter_channels, -1, 1)
|
| 187 |
+
phi_x = self.phi(x).view(n, self.inter_channels, 1, -1)
|
| 188 |
+
else:
|
| 189 |
+
theta_x = self.theta(x).view(n, self.inter_channels, -1)
|
| 190 |
+
theta_x = theta_x.permute(0, 2, 1)
|
| 191 |
+
phi_x = self.phi(x).view(n, self.inter_channels, -1)
|
| 192 |
+
|
| 193 |
+
pairwise_func = getattr(self, self.mode)
|
| 194 |
+
# NonLocal1d pairwise_weight: [N, H, H]
|
| 195 |
+
# NonLocal2d pairwise_weight: [N, HxW, HxW]
|
| 196 |
+
# NonLocal3d pairwise_weight: [N, TxHxW, TxHxW]
|
| 197 |
+
pairwise_weight = pairwise_func(theta_x, phi_x)
|
| 198 |
+
|
| 199 |
+
# NonLocal1d y: [N, H, C]
|
| 200 |
+
# NonLocal2d y: [N, HxW, C]
|
| 201 |
+
# NonLocal3d y: [N, TxHxW, C]
|
| 202 |
+
y = torch.matmul(pairwise_weight, g_x)
|
| 203 |
+
# NonLocal1d y: [N, C, H]
|
| 204 |
+
# NonLocal2d y: [N, C, H, W]
|
| 205 |
+
# NonLocal3d y: [N, C, T, H, W]
|
| 206 |
+
y = y.permute(0, 2, 1).contiguous().reshape(n, self.inter_channels,
|
| 207 |
+
*x.size()[2:])
|
| 208 |
+
|
| 209 |
+
output = x + self.conv_out(y)
|
| 210 |
+
|
| 211 |
+
return output
|
| 212 |
+
|
| 213 |
+
|
| 214 |
+
class NonLocal1d(_NonLocalNd):
|
| 215 |
+
"""1D Non-local module.
|
| 216 |
+
|
| 217 |
+
Args:
|
| 218 |
+
in_channels (int): Same as `NonLocalND`.
|
| 219 |
+
sub_sample (bool): Whether to apply max pooling after pairwise
|
| 220 |
+
function (Note that the `sub_sample` is applied on spatial only).
|
| 221 |
+
Default: False.
|
| 222 |
+
conv_cfg (None | dict): Same as `NonLocalND`.
|
| 223 |
+
Default: dict(type='Conv1d').
|
| 224 |
+
"""
|
| 225 |
+
|
| 226 |
+
def __init__(self,
|
| 227 |
+
in_channels,
|
| 228 |
+
sub_sample=False,
|
| 229 |
+
conv_cfg=dict(type='Conv1d'),
|
| 230 |
+
**kwargs):
|
| 231 |
+
super(NonLocal1d, self).__init__(
|
| 232 |
+
in_channels, conv_cfg=conv_cfg, **kwargs)
|
| 233 |
+
|
| 234 |
+
self.sub_sample = sub_sample
|
| 235 |
+
|
| 236 |
+
if sub_sample:
|
| 237 |
+
max_pool_layer = nn.MaxPool1d(kernel_size=2)
|
| 238 |
+
self.g = nn.Sequential(self.g, max_pool_layer)
|
| 239 |
+
if self.mode != 'gaussian':
|
| 240 |
+
self.phi = nn.Sequential(self.phi, max_pool_layer)
|
| 241 |
+
else:
|
| 242 |
+
self.phi = max_pool_layer
|
| 243 |
+
|
| 244 |
+
|
| 245 |
+
@PLUGIN_LAYERS.register_module()
|
| 246 |
+
class NonLocal2d(_NonLocalNd):
|
| 247 |
+
"""2D Non-local module.
|
| 248 |
+
|
| 249 |
+
Args:
|
| 250 |
+
in_channels (int): Same as `NonLocalND`.
|
| 251 |
+
sub_sample (bool): Whether to apply max pooling after pairwise
|
| 252 |
+
function (Note that the `sub_sample` is applied on spatial only).
|
| 253 |
+
Default: False.
|
| 254 |
+
conv_cfg (None | dict): Same as `NonLocalND`.
|
| 255 |
+
Default: dict(type='Conv2d').
|
| 256 |
+
"""
|
| 257 |
+
|
| 258 |
+
_abbr_ = 'nonlocal_block'
|
| 259 |
+
|
| 260 |
+
def __init__(self,
|
| 261 |
+
in_channels,
|
| 262 |
+
sub_sample=False,
|
| 263 |
+
conv_cfg=dict(type='Conv2d'),
|
| 264 |
+
**kwargs):
|
| 265 |
+
super(NonLocal2d, self).__init__(
|
| 266 |
+
in_channels, conv_cfg=conv_cfg, **kwargs)
|
| 267 |
+
|
| 268 |
+
self.sub_sample = sub_sample
|
| 269 |
+
|
| 270 |
+
if sub_sample:
|
| 271 |
+
max_pool_layer = nn.MaxPool2d(kernel_size=(2, 2))
|
| 272 |
+
self.g = nn.Sequential(self.g, max_pool_layer)
|
| 273 |
+
if self.mode != 'gaussian':
|
| 274 |
+
self.phi = nn.Sequential(self.phi, max_pool_layer)
|
| 275 |
+
else:
|
| 276 |
+
self.phi = max_pool_layer
|
| 277 |
+
|
| 278 |
+
|
| 279 |
+
class NonLocal3d(_NonLocalNd):
|
| 280 |
+
"""3D Non-local module.
|
| 281 |
+
|
| 282 |
+
Args:
|
| 283 |
+
in_channels (int): Same as `NonLocalND`.
|
| 284 |
+
sub_sample (bool): Whether to apply max pooling after pairwise
|
| 285 |
+
function (Note that the `sub_sample` is applied on spatial only).
|
| 286 |
+
Default: False.
|
| 287 |
+
conv_cfg (None | dict): Same as `NonLocalND`.
|
| 288 |
+
Default: dict(type='Conv3d').
|
| 289 |
+
"""
|
| 290 |
+
|
| 291 |
+
def __init__(self,
|
| 292 |
+
in_channels,
|
| 293 |
+
sub_sample=False,
|
| 294 |
+
conv_cfg=dict(type='Conv3d'),
|
| 295 |
+
**kwargs):
|
| 296 |
+
super(NonLocal3d, self).__init__(
|
| 297 |
+
in_channels, conv_cfg=conv_cfg, **kwargs)
|
| 298 |
+
self.sub_sample = sub_sample
|
| 299 |
+
|
| 300 |
+
if sub_sample:
|
| 301 |
+
max_pool_layer = nn.MaxPool3d(kernel_size=(1, 2, 2))
|
| 302 |
+
self.g = nn.Sequential(self.g, max_pool_layer)
|
| 303 |
+
if self.mode != 'gaussian':
|
| 304 |
+
self.phi = nn.Sequential(self.phi, max_pool_layer)
|
| 305 |
+
else:
|
| 306 |
+
self.phi = max_pool_layer
|
FRESCO/src/ControlNet/annotator/uniformer/mmcv/cnn/bricks/padding.py
ADDED
|
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
import torch.nn as nn
|
| 3 |
+
|
| 4 |
+
from .registry import PADDING_LAYERS
|
| 5 |
+
|
| 6 |
+
PADDING_LAYERS.register_module('zero', module=nn.ZeroPad2d)
|
| 7 |
+
PADDING_LAYERS.register_module('reflect', module=nn.ReflectionPad2d)
|
| 8 |
+
PADDING_LAYERS.register_module('replicate', module=nn.ReplicationPad2d)
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
def build_padding_layer(cfg, *args, **kwargs):
|
| 12 |
+
"""Build padding layer.
|
| 13 |
+
|
| 14 |
+
Args:
|
| 15 |
+
cfg (None or dict): The padding layer config, which should contain:
|
| 16 |
+
- type (str): Layer type.
|
| 17 |
+
- layer args: Args needed to instantiate a padding layer.
|
| 18 |
+
|
| 19 |
+
Returns:
|
| 20 |
+
nn.Module: Created padding layer.
|
| 21 |
+
"""
|
| 22 |
+
if not isinstance(cfg, dict):
|
| 23 |
+
raise TypeError('cfg must be a dict')
|
| 24 |
+
if 'type' not in cfg:
|
| 25 |
+
raise KeyError('the cfg dict must contain the key "type"')
|
| 26 |
+
|
| 27 |
+
cfg_ = cfg.copy()
|
| 28 |
+
padding_type = cfg_.pop('type')
|
| 29 |
+
if padding_type not in PADDING_LAYERS:
|
| 30 |
+
raise KeyError(f'Unrecognized padding type {padding_type}.')
|
| 31 |
+
else:
|
| 32 |
+
padding_layer = PADDING_LAYERS.get(padding_type)
|
| 33 |
+
|
| 34 |
+
layer = padding_layer(*args, **kwargs, **cfg_)
|
| 35 |
+
|
| 36 |
+
return layer
|
FRESCO/src/ControlNet/annotator/uniformer/mmcv/cnn/bricks/registry.py
ADDED
|
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
from annotator.uniformer.mmcv.utils import Registry
|
| 3 |
+
|
| 4 |
+
CONV_LAYERS = Registry('conv layer')
|
| 5 |
+
NORM_LAYERS = Registry('norm layer')
|
| 6 |
+
ACTIVATION_LAYERS = Registry('activation layer')
|
| 7 |
+
PADDING_LAYERS = Registry('padding layer')
|
| 8 |
+
UPSAMPLE_LAYERS = Registry('upsample layer')
|
| 9 |
+
PLUGIN_LAYERS = Registry('plugin layer')
|
| 10 |
+
|
| 11 |
+
DROPOUT_LAYERS = Registry('drop out layers')
|
| 12 |
+
POSITIONAL_ENCODING = Registry('position encoding')
|
| 13 |
+
ATTENTION = Registry('attention')
|
| 14 |
+
FEEDFORWARD_NETWORK = Registry('feed-forward Network')
|
| 15 |
+
TRANSFORMER_LAYER = Registry('transformerLayer')
|
| 16 |
+
TRANSFORMER_LAYER_SEQUENCE = Registry('transformer-layers sequence')
|
FRESCO/src/ControlNet/annotator/uniformer/mmcv/cnn/bricks/swish.py
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
import torch
|
| 3 |
+
import torch.nn as nn
|
| 4 |
+
|
| 5 |
+
from .registry import ACTIVATION_LAYERS
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
@ACTIVATION_LAYERS.register_module()
|
| 9 |
+
class Swish(nn.Module):
|
| 10 |
+
"""Swish Module.
|
| 11 |
+
|
| 12 |
+
This module applies the swish function:
|
| 13 |
+
|
| 14 |
+
.. math::
|
| 15 |
+
Swish(x) = x * Sigmoid(x)
|
| 16 |
+
|
| 17 |
+
Returns:
|
| 18 |
+
Tensor: The output tensor.
|
| 19 |
+
"""
|
| 20 |
+
|
| 21 |
+
def __init__(self):
|
| 22 |
+
super(Swish, self).__init__()
|
| 23 |
+
|
| 24 |
+
def forward(self, x):
|
| 25 |
+
return x * torch.sigmoid(x)
|
FRESCO/src/ControlNet/annotator/uniformer/mmcv/cnn/bricks/transformer.py
ADDED
|
@@ -0,0 +1,595 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
import copy
|
| 3 |
+
import warnings
|
| 4 |
+
|
| 5 |
+
import torch
|
| 6 |
+
import torch.nn as nn
|
| 7 |
+
|
| 8 |
+
from annotator.uniformer.mmcv import ConfigDict, deprecated_api_warning
|
| 9 |
+
from annotator.uniformer.mmcv.cnn import Linear, build_activation_layer, build_norm_layer
|
| 10 |
+
from annotator.uniformer.mmcv.runner.base_module import BaseModule, ModuleList, Sequential
|
| 11 |
+
from annotator.uniformer.mmcv.utils import build_from_cfg
|
| 12 |
+
from .drop import build_dropout
|
| 13 |
+
from .registry import (ATTENTION, FEEDFORWARD_NETWORK, POSITIONAL_ENCODING,
|
| 14 |
+
TRANSFORMER_LAYER, TRANSFORMER_LAYER_SEQUENCE)
|
| 15 |
+
|
| 16 |
+
# Avoid BC-breaking of importing MultiScaleDeformableAttention from this file
|
| 17 |
+
try:
|
| 18 |
+
from annotator.uniformer.mmcv.ops.multi_scale_deform_attn import MultiScaleDeformableAttention # noqa F401
|
| 19 |
+
warnings.warn(
|
| 20 |
+
ImportWarning(
|
| 21 |
+
'``MultiScaleDeformableAttention`` has been moved to '
|
| 22 |
+
'``mmcv.ops.multi_scale_deform_attn``, please change original path ' # noqa E501
|
| 23 |
+
'``from annotator.uniformer.mmcv.cnn.bricks.transformer import MultiScaleDeformableAttention`` ' # noqa E501
|
| 24 |
+
'to ``from annotator.uniformer.mmcv.ops.multi_scale_deform_attn import MultiScaleDeformableAttention`` ' # noqa E501
|
| 25 |
+
))
|
| 26 |
+
|
| 27 |
+
except ImportError:
|
| 28 |
+
warnings.warn('Fail to import ``MultiScaleDeformableAttention`` from '
|
| 29 |
+
'``mmcv.ops.multi_scale_deform_attn``, '
|
| 30 |
+
'You should install ``mmcv-full`` if you need this module. ')
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
def build_positional_encoding(cfg, default_args=None):
|
| 34 |
+
"""Builder for Position Encoding."""
|
| 35 |
+
return build_from_cfg(cfg, POSITIONAL_ENCODING, default_args)
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
def build_attention(cfg, default_args=None):
|
| 39 |
+
"""Builder for attention."""
|
| 40 |
+
return build_from_cfg(cfg, ATTENTION, default_args)
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
def build_feedforward_network(cfg, default_args=None):
|
| 44 |
+
"""Builder for feed-forward network (FFN)."""
|
| 45 |
+
return build_from_cfg(cfg, FEEDFORWARD_NETWORK, default_args)
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
def build_transformer_layer(cfg, default_args=None):
|
| 49 |
+
"""Builder for transformer layer."""
|
| 50 |
+
return build_from_cfg(cfg, TRANSFORMER_LAYER, default_args)
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
def build_transformer_layer_sequence(cfg, default_args=None):
|
| 54 |
+
"""Builder for transformer encoder and transformer decoder."""
|
| 55 |
+
return build_from_cfg(cfg, TRANSFORMER_LAYER_SEQUENCE, default_args)
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
@ATTENTION.register_module()
|
| 59 |
+
class MultiheadAttention(BaseModule):
|
| 60 |
+
"""A wrapper for ``torch.nn.MultiheadAttention``.
|
| 61 |
+
|
| 62 |
+
This module implements MultiheadAttention with identity connection,
|
| 63 |
+
and positional encoding is also passed as input.
|
| 64 |
+
|
| 65 |
+
Args:
|
| 66 |
+
embed_dims (int): The embedding dimension.
|
| 67 |
+
num_heads (int): Parallel attention heads.
|
| 68 |
+
attn_drop (float): A Dropout layer on attn_output_weights.
|
| 69 |
+
Default: 0.0.
|
| 70 |
+
proj_drop (float): A Dropout layer after `nn.MultiheadAttention`.
|
| 71 |
+
Default: 0.0.
|
| 72 |
+
dropout_layer (obj:`ConfigDict`): The dropout_layer used
|
| 73 |
+
when adding the shortcut.
|
| 74 |
+
init_cfg (obj:`mmcv.ConfigDict`): The Config for initialization.
|
| 75 |
+
Default: None.
|
| 76 |
+
batch_first (bool): When it is True, Key, Query and Value are shape of
|
| 77 |
+
(batch, n, embed_dim), otherwise (n, batch, embed_dim).
|
| 78 |
+
Default to False.
|
| 79 |
+
"""
|
| 80 |
+
|
| 81 |
+
def __init__(self,
|
| 82 |
+
embed_dims,
|
| 83 |
+
num_heads,
|
| 84 |
+
attn_drop=0.,
|
| 85 |
+
proj_drop=0.,
|
| 86 |
+
dropout_layer=dict(type='Dropout', drop_prob=0.),
|
| 87 |
+
init_cfg=None,
|
| 88 |
+
batch_first=False,
|
| 89 |
+
**kwargs):
|
| 90 |
+
super(MultiheadAttention, self).__init__(init_cfg)
|
| 91 |
+
if 'dropout' in kwargs:
|
| 92 |
+
warnings.warn('The arguments `dropout` in MultiheadAttention '
|
| 93 |
+
'has been deprecated, now you can separately '
|
| 94 |
+
'set `attn_drop`(float), proj_drop(float), '
|
| 95 |
+
'and `dropout_layer`(dict) ')
|
| 96 |
+
attn_drop = kwargs['dropout']
|
| 97 |
+
dropout_layer['drop_prob'] = kwargs.pop('dropout')
|
| 98 |
+
|
| 99 |
+
self.embed_dims = embed_dims
|
| 100 |
+
self.num_heads = num_heads
|
| 101 |
+
self.batch_first = batch_first
|
| 102 |
+
|
| 103 |
+
self.attn = nn.MultiheadAttention(embed_dims, num_heads, attn_drop,
|
| 104 |
+
**kwargs)
|
| 105 |
+
|
| 106 |
+
self.proj_drop = nn.Dropout(proj_drop)
|
| 107 |
+
self.dropout_layer = build_dropout(
|
| 108 |
+
dropout_layer) if dropout_layer else nn.Identity()
|
| 109 |
+
|
| 110 |
+
@deprecated_api_warning({'residual': 'identity'},
|
| 111 |
+
cls_name='MultiheadAttention')
|
| 112 |
+
def forward(self,
|
| 113 |
+
query,
|
| 114 |
+
key=None,
|
| 115 |
+
value=None,
|
| 116 |
+
identity=None,
|
| 117 |
+
query_pos=None,
|
| 118 |
+
key_pos=None,
|
| 119 |
+
attn_mask=None,
|
| 120 |
+
key_padding_mask=None,
|
| 121 |
+
**kwargs):
|
| 122 |
+
"""Forward function for `MultiheadAttention`.
|
| 123 |
+
|
| 124 |
+
**kwargs allow passing a more general data flow when combining
|
| 125 |
+
with other operations in `transformerlayer`.
|
| 126 |
+
|
| 127 |
+
Args:
|
| 128 |
+
query (Tensor): The input query with shape [num_queries, bs,
|
| 129 |
+
embed_dims] if self.batch_first is False, else
|
| 130 |
+
[bs, num_queries embed_dims].
|
| 131 |
+
key (Tensor): The key tensor with shape [num_keys, bs,
|
| 132 |
+
embed_dims] if self.batch_first is False, else
|
| 133 |
+
[bs, num_keys, embed_dims] .
|
| 134 |
+
If None, the ``query`` will be used. Defaults to None.
|
| 135 |
+
value (Tensor): The value tensor with same shape as `key`.
|
| 136 |
+
Same in `nn.MultiheadAttention.forward`. Defaults to None.
|
| 137 |
+
If None, the `key` will be used.
|
| 138 |
+
identity (Tensor): This tensor, with the same shape as x,
|
| 139 |
+
will be used for the identity link.
|
| 140 |
+
If None, `x` will be used. Defaults to None.
|
| 141 |
+
query_pos (Tensor): The positional encoding for query, with
|
| 142 |
+
the same shape as `x`. If not None, it will
|
| 143 |
+
be added to `x` before forward function. Defaults to None.
|
| 144 |
+
key_pos (Tensor): The positional encoding for `key`, with the
|
| 145 |
+
same shape as `key`. Defaults to None. If not None, it will
|
| 146 |
+
be added to `key` before forward function. If None, and
|
| 147 |
+
`query_pos` has the same shape as `key`, then `query_pos`
|
| 148 |
+
will be used for `key_pos`. Defaults to None.
|
| 149 |
+
attn_mask (Tensor): ByteTensor mask with shape [num_queries,
|
| 150 |
+
num_keys]. Same in `nn.MultiheadAttention.forward`.
|
| 151 |
+
Defaults to None.
|
| 152 |
+
key_padding_mask (Tensor): ByteTensor with shape [bs, num_keys].
|
| 153 |
+
Defaults to None.
|
| 154 |
+
|
| 155 |
+
Returns:
|
| 156 |
+
Tensor: forwarded results with shape
|
| 157 |
+
[num_queries, bs, embed_dims]
|
| 158 |
+
if self.batch_first is False, else
|
| 159 |
+
[bs, num_queries embed_dims].
|
| 160 |
+
"""
|
| 161 |
+
|
| 162 |
+
if key is None:
|
| 163 |
+
key = query
|
| 164 |
+
if value is None:
|
| 165 |
+
value = key
|
| 166 |
+
if identity is None:
|
| 167 |
+
identity = query
|
| 168 |
+
if key_pos is None:
|
| 169 |
+
if query_pos is not None:
|
| 170 |
+
# use query_pos if key_pos is not available
|
| 171 |
+
if query_pos.shape == key.shape:
|
| 172 |
+
key_pos = query_pos
|
| 173 |
+
else:
|
| 174 |
+
warnings.warn(f'position encoding of key is'
|
| 175 |
+
f'missing in {self.__class__.__name__}.')
|
| 176 |
+
if query_pos is not None:
|
| 177 |
+
query = query + query_pos
|
| 178 |
+
if key_pos is not None:
|
| 179 |
+
key = key + key_pos
|
| 180 |
+
|
| 181 |
+
# Because the dataflow('key', 'query', 'value') of
|
| 182 |
+
# ``torch.nn.MultiheadAttention`` is (num_query, batch,
|
| 183 |
+
# embed_dims), We should adjust the shape of dataflow from
|
| 184 |
+
# batch_first (batch, num_query, embed_dims) to num_query_first
|
| 185 |
+
# (num_query ,batch, embed_dims), and recover ``attn_output``
|
| 186 |
+
# from num_query_first to batch_first.
|
| 187 |
+
if self.batch_first:
|
| 188 |
+
query = query.transpose(0, 1)
|
| 189 |
+
key = key.transpose(0, 1)
|
| 190 |
+
value = value.transpose(0, 1)
|
| 191 |
+
|
| 192 |
+
out = self.attn(
|
| 193 |
+
query=query,
|
| 194 |
+
key=key,
|
| 195 |
+
value=value,
|
| 196 |
+
attn_mask=attn_mask,
|
| 197 |
+
key_padding_mask=key_padding_mask)[0]
|
| 198 |
+
|
| 199 |
+
if self.batch_first:
|
| 200 |
+
out = out.transpose(0, 1)
|
| 201 |
+
|
| 202 |
+
return identity + self.dropout_layer(self.proj_drop(out))
|
| 203 |
+
|
| 204 |
+
|
| 205 |
+
@FEEDFORWARD_NETWORK.register_module()
|
| 206 |
+
class FFN(BaseModule):
|
| 207 |
+
"""Implements feed-forward networks (FFNs) with identity connection.
|
| 208 |
+
|
| 209 |
+
Args:
|
| 210 |
+
embed_dims (int): The feature dimension. Same as
|
| 211 |
+
`MultiheadAttention`. Defaults: 256.
|
| 212 |
+
feedforward_channels (int): The hidden dimension of FFNs.
|
| 213 |
+
Defaults: 1024.
|
| 214 |
+
num_fcs (int, optional): The number of fully-connected layers in
|
| 215 |
+
FFNs. Default: 2.
|
| 216 |
+
act_cfg (dict, optional): The activation config for FFNs.
|
| 217 |
+
Default: dict(type='ReLU')
|
| 218 |
+
ffn_drop (float, optional): Probability of an element to be
|
| 219 |
+
zeroed in FFN. Default 0.0.
|
| 220 |
+
add_identity (bool, optional): Whether to add the
|
| 221 |
+
identity connection. Default: `True`.
|
| 222 |
+
dropout_layer (obj:`ConfigDict`): The dropout_layer used
|
| 223 |
+
when adding the shortcut.
|
| 224 |
+
init_cfg (obj:`mmcv.ConfigDict`): The Config for initialization.
|
| 225 |
+
Default: None.
|
| 226 |
+
"""
|
| 227 |
+
|
| 228 |
+
@deprecated_api_warning(
|
| 229 |
+
{
|
| 230 |
+
'dropout': 'ffn_drop',
|
| 231 |
+
'add_residual': 'add_identity'
|
| 232 |
+
},
|
| 233 |
+
cls_name='FFN')
|
| 234 |
+
def __init__(self,
|
| 235 |
+
embed_dims=256,
|
| 236 |
+
feedforward_channels=1024,
|
| 237 |
+
num_fcs=2,
|
| 238 |
+
act_cfg=dict(type='ReLU', inplace=True),
|
| 239 |
+
ffn_drop=0.,
|
| 240 |
+
dropout_layer=None,
|
| 241 |
+
add_identity=True,
|
| 242 |
+
init_cfg=None,
|
| 243 |
+
**kwargs):
|
| 244 |
+
super(FFN, self).__init__(init_cfg)
|
| 245 |
+
assert num_fcs >= 2, 'num_fcs should be no less ' \
|
| 246 |
+
f'than 2. got {num_fcs}.'
|
| 247 |
+
self.embed_dims = embed_dims
|
| 248 |
+
self.feedforward_channels = feedforward_channels
|
| 249 |
+
self.num_fcs = num_fcs
|
| 250 |
+
self.act_cfg = act_cfg
|
| 251 |
+
self.activate = build_activation_layer(act_cfg)
|
| 252 |
+
|
| 253 |
+
layers = []
|
| 254 |
+
in_channels = embed_dims
|
| 255 |
+
for _ in range(num_fcs - 1):
|
| 256 |
+
layers.append(
|
| 257 |
+
Sequential(
|
| 258 |
+
Linear(in_channels, feedforward_channels), self.activate,
|
| 259 |
+
nn.Dropout(ffn_drop)))
|
| 260 |
+
in_channels = feedforward_channels
|
| 261 |
+
layers.append(Linear(feedforward_channels, embed_dims))
|
| 262 |
+
layers.append(nn.Dropout(ffn_drop))
|
| 263 |
+
self.layers = Sequential(*layers)
|
| 264 |
+
self.dropout_layer = build_dropout(
|
| 265 |
+
dropout_layer) if dropout_layer else torch.nn.Identity()
|
| 266 |
+
self.add_identity = add_identity
|
| 267 |
+
|
| 268 |
+
@deprecated_api_warning({'residual': 'identity'}, cls_name='FFN')
|
| 269 |
+
def forward(self, x, identity=None):
|
| 270 |
+
"""Forward function for `FFN`.
|
| 271 |
+
|
| 272 |
+
The function would add x to the output tensor if residue is None.
|
| 273 |
+
"""
|
| 274 |
+
out = self.layers(x)
|
| 275 |
+
if not self.add_identity:
|
| 276 |
+
return self.dropout_layer(out)
|
| 277 |
+
if identity is None:
|
| 278 |
+
identity = x
|
| 279 |
+
return identity + self.dropout_layer(out)
|
| 280 |
+
|
| 281 |
+
|
| 282 |
+
@TRANSFORMER_LAYER.register_module()
|
| 283 |
+
class BaseTransformerLayer(BaseModule):
|
| 284 |
+
"""Base `TransformerLayer` for vision transformer.
|
| 285 |
+
|
| 286 |
+
It can be built from `mmcv.ConfigDict` and support more flexible
|
| 287 |
+
customization, for example, using any number of `FFN or LN ` and
|
| 288 |
+
use different kinds of `attention` by specifying a list of `ConfigDict`
|
| 289 |
+
named `attn_cfgs`. It is worth mentioning that it supports `prenorm`
|
| 290 |
+
when you specifying `norm` as the first element of `operation_order`.
|
| 291 |
+
More details about the `prenorm`: `On Layer Normalization in the
|
| 292 |
+
Transformer Architecture <https://arxiv.org/abs/2002.04745>`_ .
|
| 293 |
+
|
| 294 |
+
Args:
|
| 295 |
+
attn_cfgs (list[`mmcv.ConfigDict`] | obj:`mmcv.ConfigDict` | None )):
|
| 296 |
+
Configs for `self_attention` or `cross_attention` modules,
|
| 297 |
+
The order of the configs in the list should be consistent with
|
| 298 |
+
corresponding attentions in operation_order.
|
| 299 |
+
If it is a dict, all of the attention modules in operation_order
|
| 300 |
+
will be built with this config. Default: None.
|
| 301 |
+
ffn_cfgs (list[`mmcv.ConfigDict`] | obj:`mmcv.ConfigDict` | None )):
|
| 302 |
+
Configs for FFN, The order of the configs in the list should be
|
| 303 |
+
consistent with corresponding ffn in operation_order.
|
| 304 |
+
If it is a dict, all of the attention modules in operation_order
|
| 305 |
+
will be built with this config.
|
| 306 |
+
operation_order (tuple[str]): The execution order of operation
|
| 307 |
+
in transformer. Such as ('self_attn', 'norm', 'ffn', 'norm').
|
| 308 |
+
Support `prenorm` when you specifying first element as `norm`.
|
| 309 |
+
Default:None.
|
| 310 |
+
norm_cfg (dict): Config dict for normalization layer.
|
| 311 |
+
Default: dict(type='LN').
|
| 312 |
+
init_cfg (obj:`mmcv.ConfigDict`): The Config for initialization.
|
| 313 |
+
Default: None.
|
| 314 |
+
batch_first (bool): Key, Query and Value are shape
|
| 315 |
+
of (batch, n, embed_dim)
|
| 316 |
+
or (n, batch, embed_dim). Default to False.
|
| 317 |
+
"""
|
| 318 |
+
|
| 319 |
+
def __init__(self,
|
| 320 |
+
attn_cfgs=None,
|
| 321 |
+
ffn_cfgs=dict(
|
| 322 |
+
type='FFN',
|
| 323 |
+
embed_dims=256,
|
| 324 |
+
feedforward_channels=1024,
|
| 325 |
+
num_fcs=2,
|
| 326 |
+
ffn_drop=0.,
|
| 327 |
+
act_cfg=dict(type='ReLU', inplace=True),
|
| 328 |
+
),
|
| 329 |
+
operation_order=None,
|
| 330 |
+
norm_cfg=dict(type='LN'),
|
| 331 |
+
init_cfg=None,
|
| 332 |
+
batch_first=False,
|
| 333 |
+
**kwargs):
|
| 334 |
+
|
| 335 |
+
deprecated_args = dict(
|
| 336 |
+
feedforward_channels='feedforward_channels',
|
| 337 |
+
ffn_dropout='ffn_drop',
|
| 338 |
+
ffn_num_fcs='num_fcs')
|
| 339 |
+
for ori_name, new_name in deprecated_args.items():
|
| 340 |
+
if ori_name in kwargs:
|
| 341 |
+
warnings.warn(
|
| 342 |
+
f'The arguments `{ori_name}` in BaseTransformerLayer '
|
| 343 |
+
f'has been deprecated, now you should set `{new_name}` '
|
| 344 |
+
f'and other FFN related arguments '
|
| 345 |
+
f'to a dict named `ffn_cfgs`. ')
|
| 346 |
+
ffn_cfgs[new_name] = kwargs[ori_name]
|
| 347 |
+
|
| 348 |
+
super(BaseTransformerLayer, self).__init__(init_cfg)
|
| 349 |
+
|
| 350 |
+
self.batch_first = batch_first
|
| 351 |
+
|
| 352 |
+
assert set(operation_order) & set(
|
| 353 |
+
['self_attn', 'norm', 'ffn', 'cross_attn']) == \
|
| 354 |
+
set(operation_order), f'The operation_order of' \
|
| 355 |
+
f' {self.__class__.__name__} should ' \
|
| 356 |
+
f'contains all four operation type ' \
|
| 357 |
+
f"{['self_attn', 'norm', 'ffn', 'cross_attn']}"
|
| 358 |
+
|
| 359 |
+
num_attn = operation_order.count('self_attn') + operation_order.count(
|
| 360 |
+
'cross_attn')
|
| 361 |
+
if isinstance(attn_cfgs, dict):
|
| 362 |
+
attn_cfgs = [copy.deepcopy(attn_cfgs) for _ in range(num_attn)]
|
| 363 |
+
else:
|
| 364 |
+
assert num_attn == len(attn_cfgs), f'The length ' \
|
| 365 |
+
f'of attn_cfg {num_attn} is ' \
|
| 366 |
+
f'not consistent with the number of attention' \
|
| 367 |
+
f'in operation_order {operation_order}.'
|
| 368 |
+
|
| 369 |
+
self.num_attn = num_attn
|
| 370 |
+
self.operation_order = operation_order
|
| 371 |
+
self.norm_cfg = norm_cfg
|
| 372 |
+
self.pre_norm = operation_order[0] == 'norm'
|
| 373 |
+
self.attentions = ModuleList()
|
| 374 |
+
|
| 375 |
+
index = 0
|
| 376 |
+
for operation_name in operation_order:
|
| 377 |
+
if operation_name in ['self_attn', 'cross_attn']:
|
| 378 |
+
if 'batch_first' in attn_cfgs[index]:
|
| 379 |
+
assert self.batch_first == attn_cfgs[index]['batch_first']
|
| 380 |
+
else:
|
| 381 |
+
attn_cfgs[index]['batch_first'] = self.batch_first
|
| 382 |
+
attention = build_attention(attn_cfgs[index])
|
| 383 |
+
# Some custom attentions used as `self_attn`
|
| 384 |
+
# or `cross_attn` can have different behavior.
|
| 385 |
+
attention.operation_name = operation_name
|
| 386 |
+
self.attentions.append(attention)
|
| 387 |
+
index += 1
|
| 388 |
+
|
| 389 |
+
self.embed_dims = self.attentions[0].embed_dims
|
| 390 |
+
|
| 391 |
+
self.ffns = ModuleList()
|
| 392 |
+
num_ffns = operation_order.count('ffn')
|
| 393 |
+
if isinstance(ffn_cfgs, dict):
|
| 394 |
+
ffn_cfgs = ConfigDict(ffn_cfgs)
|
| 395 |
+
if isinstance(ffn_cfgs, dict):
|
| 396 |
+
ffn_cfgs = [copy.deepcopy(ffn_cfgs) for _ in range(num_ffns)]
|
| 397 |
+
assert len(ffn_cfgs) == num_ffns
|
| 398 |
+
for ffn_index in range(num_ffns):
|
| 399 |
+
if 'embed_dims' not in ffn_cfgs[ffn_index]:
|
| 400 |
+
ffn_cfgs['embed_dims'] = self.embed_dims
|
| 401 |
+
else:
|
| 402 |
+
assert ffn_cfgs[ffn_index]['embed_dims'] == self.embed_dims
|
| 403 |
+
self.ffns.append(
|
| 404 |
+
build_feedforward_network(ffn_cfgs[ffn_index],
|
| 405 |
+
dict(type='FFN')))
|
| 406 |
+
|
| 407 |
+
self.norms = ModuleList()
|
| 408 |
+
num_norms = operation_order.count('norm')
|
| 409 |
+
for _ in range(num_norms):
|
| 410 |
+
self.norms.append(build_norm_layer(norm_cfg, self.embed_dims)[1])
|
| 411 |
+
|
| 412 |
+
def forward(self,
|
| 413 |
+
query,
|
| 414 |
+
key=None,
|
| 415 |
+
value=None,
|
| 416 |
+
query_pos=None,
|
| 417 |
+
key_pos=None,
|
| 418 |
+
attn_masks=None,
|
| 419 |
+
query_key_padding_mask=None,
|
| 420 |
+
key_padding_mask=None,
|
| 421 |
+
**kwargs):
|
| 422 |
+
"""Forward function for `TransformerDecoderLayer`.
|
| 423 |
+
|
| 424 |
+
**kwargs contains some specific arguments of attentions.
|
| 425 |
+
|
| 426 |
+
Args:
|
| 427 |
+
query (Tensor): The input query with shape
|
| 428 |
+
[num_queries, bs, embed_dims] if
|
| 429 |
+
self.batch_first is False, else
|
| 430 |
+
[bs, num_queries embed_dims].
|
| 431 |
+
key (Tensor): The key tensor with shape [num_keys, bs,
|
| 432 |
+
embed_dims] if self.batch_first is False, else
|
| 433 |
+
[bs, num_keys, embed_dims] .
|
| 434 |
+
value (Tensor): The value tensor with same shape as `key`.
|
| 435 |
+
query_pos (Tensor): The positional encoding for `query`.
|
| 436 |
+
Default: None.
|
| 437 |
+
key_pos (Tensor): The positional encoding for `key`.
|
| 438 |
+
Default: None.
|
| 439 |
+
attn_masks (List[Tensor] | None): 2D Tensor used in
|
| 440 |
+
calculation of corresponding attention. The length of
|
| 441 |
+
it should equal to the number of `attention` in
|
| 442 |
+
`operation_order`. Default: None.
|
| 443 |
+
query_key_padding_mask (Tensor): ByteTensor for `query`, with
|
| 444 |
+
shape [bs, num_queries]. Only used in `self_attn` layer.
|
| 445 |
+
Defaults to None.
|
| 446 |
+
key_padding_mask (Tensor): ByteTensor for `query`, with
|
| 447 |
+
shape [bs, num_keys]. Default: None.
|
| 448 |
+
|
| 449 |
+
Returns:
|
| 450 |
+
Tensor: forwarded results with shape [num_queries, bs, embed_dims].
|
| 451 |
+
"""
|
| 452 |
+
|
| 453 |
+
norm_index = 0
|
| 454 |
+
attn_index = 0
|
| 455 |
+
ffn_index = 0
|
| 456 |
+
identity = query
|
| 457 |
+
if attn_masks is None:
|
| 458 |
+
attn_masks = [None for _ in range(self.num_attn)]
|
| 459 |
+
elif isinstance(attn_masks, torch.Tensor):
|
| 460 |
+
attn_masks = [
|
| 461 |
+
copy.deepcopy(attn_masks) for _ in range(self.num_attn)
|
| 462 |
+
]
|
| 463 |
+
warnings.warn(f'Use same attn_mask in all attentions in '
|
| 464 |
+
f'{self.__class__.__name__} ')
|
| 465 |
+
else:
|
| 466 |
+
assert len(attn_masks) == self.num_attn, f'The length of ' \
|
| 467 |
+
f'attn_masks {len(attn_masks)} must be equal ' \
|
| 468 |
+
f'to the number of attention in ' \
|
| 469 |
+
f'operation_order {self.num_attn}'
|
| 470 |
+
|
| 471 |
+
for layer in self.operation_order:
|
| 472 |
+
if layer == 'self_attn':
|
| 473 |
+
temp_key = temp_value = query
|
| 474 |
+
query = self.attentions[attn_index](
|
| 475 |
+
query,
|
| 476 |
+
temp_key,
|
| 477 |
+
temp_value,
|
| 478 |
+
identity if self.pre_norm else None,
|
| 479 |
+
query_pos=query_pos,
|
| 480 |
+
key_pos=query_pos,
|
| 481 |
+
attn_mask=attn_masks[attn_index],
|
| 482 |
+
key_padding_mask=query_key_padding_mask,
|
| 483 |
+
**kwargs)
|
| 484 |
+
attn_index += 1
|
| 485 |
+
identity = query
|
| 486 |
+
|
| 487 |
+
elif layer == 'norm':
|
| 488 |
+
query = self.norms[norm_index](query)
|
| 489 |
+
norm_index += 1
|
| 490 |
+
|
| 491 |
+
elif layer == 'cross_attn':
|
| 492 |
+
query = self.attentions[attn_index](
|
| 493 |
+
query,
|
| 494 |
+
key,
|
| 495 |
+
value,
|
| 496 |
+
identity if self.pre_norm else None,
|
| 497 |
+
query_pos=query_pos,
|
| 498 |
+
key_pos=key_pos,
|
| 499 |
+
attn_mask=attn_masks[attn_index],
|
| 500 |
+
key_padding_mask=key_padding_mask,
|
| 501 |
+
**kwargs)
|
| 502 |
+
attn_index += 1
|
| 503 |
+
identity = query
|
| 504 |
+
|
| 505 |
+
elif layer == 'ffn':
|
| 506 |
+
query = self.ffns[ffn_index](
|
| 507 |
+
query, identity if self.pre_norm else None)
|
| 508 |
+
ffn_index += 1
|
| 509 |
+
|
| 510 |
+
return query
|
| 511 |
+
|
| 512 |
+
|
| 513 |
+
@TRANSFORMER_LAYER_SEQUENCE.register_module()
|
| 514 |
+
class TransformerLayerSequence(BaseModule):
|
| 515 |
+
"""Base class for TransformerEncoder and TransformerDecoder in vision
|
| 516 |
+
transformer.
|
| 517 |
+
|
| 518 |
+
As base-class of Encoder and Decoder in vision transformer.
|
| 519 |
+
Support customization such as specifying different kind
|
| 520 |
+
of `transformer_layer` in `transformer_coder`.
|
| 521 |
+
|
| 522 |
+
Args:
|
| 523 |
+
transformerlayer (list[obj:`mmcv.ConfigDict`] |
|
| 524 |
+
obj:`mmcv.ConfigDict`): Config of transformerlayer
|
| 525 |
+
in TransformerCoder. If it is obj:`mmcv.ConfigDict`,
|
| 526 |
+
it would be repeated `num_layer` times to a
|
| 527 |
+
list[`mmcv.ConfigDict`]. Default: None.
|
| 528 |
+
num_layers (int): The number of `TransformerLayer`. Default: None.
|
| 529 |
+
init_cfg (obj:`mmcv.ConfigDict`): The Config for initialization.
|
| 530 |
+
Default: None.
|
| 531 |
+
"""
|
| 532 |
+
|
| 533 |
+
def __init__(self, transformerlayers=None, num_layers=None, init_cfg=None):
|
| 534 |
+
super(TransformerLayerSequence, self).__init__(init_cfg)
|
| 535 |
+
if isinstance(transformerlayers, dict):
|
| 536 |
+
transformerlayers = [
|
| 537 |
+
copy.deepcopy(transformerlayers) for _ in range(num_layers)
|
| 538 |
+
]
|
| 539 |
+
else:
|
| 540 |
+
assert isinstance(transformerlayers, list) and \
|
| 541 |
+
len(transformerlayers) == num_layers
|
| 542 |
+
self.num_layers = num_layers
|
| 543 |
+
self.layers = ModuleList()
|
| 544 |
+
for i in range(num_layers):
|
| 545 |
+
self.layers.append(build_transformer_layer(transformerlayers[i]))
|
| 546 |
+
self.embed_dims = self.layers[0].embed_dims
|
| 547 |
+
self.pre_norm = self.layers[0].pre_norm
|
| 548 |
+
|
| 549 |
+
def forward(self,
|
| 550 |
+
query,
|
| 551 |
+
key,
|
| 552 |
+
value,
|
| 553 |
+
query_pos=None,
|
| 554 |
+
key_pos=None,
|
| 555 |
+
attn_masks=None,
|
| 556 |
+
query_key_padding_mask=None,
|
| 557 |
+
key_padding_mask=None,
|
| 558 |
+
**kwargs):
|
| 559 |
+
"""Forward function for `TransformerCoder`.
|
| 560 |
+
|
| 561 |
+
Args:
|
| 562 |
+
query (Tensor): Input query with shape
|
| 563 |
+
`(num_queries, bs, embed_dims)`.
|
| 564 |
+
key (Tensor): The key tensor with shape
|
| 565 |
+
`(num_keys, bs, embed_dims)`.
|
| 566 |
+
value (Tensor): The value tensor with shape
|
| 567 |
+
`(num_keys, bs, embed_dims)`.
|
| 568 |
+
query_pos (Tensor): The positional encoding for `query`.
|
| 569 |
+
Default: None.
|
| 570 |
+
key_pos (Tensor): The positional encoding for `key`.
|
| 571 |
+
Default: None.
|
| 572 |
+
attn_masks (List[Tensor], optional): Each element is 2D Tensor
|
| 573 |
+
which is used in calculation of corresponding attention in
|
| 574 |
+
operation_order. Default: None.
|
| 575 |
+
query_key_padding_mask (Tensor): ByteTensor for `query`, with
|
| 576 |
+
shape [bs, num_queries]. Only used in self-attention
|
| 577 |
+
Default: None.
|
| 578 |
+
key_padding_mask (Tensor): ByteTensor for `query`, with
|
| 579 |
+
shape [bs, num_keys]. Default: None.
|
| 580 |
+
|
| 581 |
+
Returns:
|
| 582 |
+
Tensor: results with shape [num_queries, bs, embed_dims].
|
| 583 |
+
"""
|
| 584 |
+
for layer in self.layers:
|
| 585 |
+
query = layer(
|
| 586 |
+
query,
|
| 587 |
+
key,
|
| 588 |
+
value,
|
| 589 |
+
query_pos=query_pos,
|
| 590 |
+
key_pos=key_pos,
|
| 591 |
+
attn_masks=attn_masks,
|
| 592 |
+
query_key_padding_mask=query_key_padding_mask,
|
| 593 |
+
key_padding_mask=key_padding_mask,
|
| 594 |
+
**kwargs)
|
| 595 |
+
return query
|
FRESCO/src/ControlNet/annotator/uniformer/mmcv/cnn/bricks/upsample.py
ADDED
|
@@ -0,0 +1,84 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
import torch.nn as nn
|
| 3 |
+
import torch.nn.functional as F
|
| 4 |
+
|
| 5 |
+
from ..utils import xavier_init
|
| 6 |
+
from .registry import UPSAMPLE_LAYERS
|
| 7 |
+
|
| 8 |
+
UPSAMPLE_LAYERS.register_module('nearest', module=nn.Upsample)
|
| 9 |
+
UPSAMPLE_LAYERS.register_module('bilinear', module=nn.Upsample)
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
@UPSAMPLE_LAYERS.register_module(name='pixel_shuffle')
|
| 13 |
+
class PixelShufflePack(nn.Module):
|
| 14 |
+
"""Pixel Shuffle upsample layer.
|
| 15 |
+
|
| 16 |
+
This module packs `F.pixel_shuffle()` and a nn.Conv2d module together to
|
| 17 |
+
achieve a simple upsampling with pixel shuffle.
|
| 18 |
+
|
| 19 |
+
Args:
|
| 20 |
+
in_channels (int): Number of input channels.
|
| 21 |
+
out_channels (int): Number of output channels.
|
| 22 |
+
scale_factor (int): Upsample ratio.
|
| 23 |
+
upsample_kernel (int): Kernel size of the conv layer to expand the
|
| 24 |
+
channels.
|
| 25 |
+
"""
|
| 26 |
+
|
| 27 |
+
def __init__(self, in_channels, out_channels, scale_factor,
|
| 28 |
+
upsample_kernel):
|
| 29 |
+
super(PixelShufflePack, self).__init__()
|
| 30 |
+
self.in_channels = in_channels
|
| 31 |
+
self.out_channels = out_channels
|
| 32 |
+
self.scale_factor = scale_factor
|
| 33 |
+
self.upsample_kernel = upsample_kernel
|
| 34 |
+
self.upsample_conv = nn.Conv2d(
|
| 35 |
+
self.in_channels,
|
| 36 |
+
self.out_channels * scale_factor * scale_factor,
|
| 37 |
+
self.upsample_kernel,
|
| 38 |
+
padding=(self.upsample_kernel - 1) // 2)
|
| 39 |
+
self.init_weights()
|
| 40 |
+
|
| 41 |
+
def init_weights(self):
|
| 42 |
+
xavier_init(self.upsample_conv, distribution='uniform')
|
| 43 |
+
|
| 44 |
+
def forward(self, x):
|
| 45 |
+
x = self.upsample_conv(x)
|
| 46 |
+
x = F.pixel_shuffle(x, self.scale_factor)
|
| 47 |
+
return x
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
def build_upsample_layer(cfg, *args, **kwargs):
|
| 51 |
+
"""Build upsample layer.
|
| 52 |
+
|
| 53 |
+
Args:
|
| 54 |
+
cfg (dict): The upsample layer config, which should contain:
|
| 55 |
+
|
| 56 |
+
- type (str): Layer type.
|
| 57 |
+
- scale_factor (int): Upsample ratio, which is not applicable to
|
| 58 |
+
deconv.
|
| 59 |
+
- layer args: Args needed to instantiate a upsample layer.
|
| 60 |
+
args (argument list): Arguments passed to the ``__init__``
|
| 61 |
+
method of the corresponding conv layer.
|
| 62 |
+
kwargs (keyword arguments): Keyword arguments passed to the
|
| 63 |
+
``__init__`` method of the corresponding conv layer.
|
| 64 |
+
|
| 65 |
+
Returns:
|
| 66 |
+
nn.Module: Created upsample layer.
|
| 67 |
+
"""
|
| 68 |
+
if not isinstance(cfg, dict):
|
| 69 |
+
raise TypeError(f'cfg must be a dict, but got {type(cfg)}')
|
| 70 |
+
if 'type' not in cfg:
|
| 71 |
+
raise KeyError(
|
| 72 |
+
f'the cfg dict must contain the key "type", but got {cfg}')
|
| 73 |
+
cfg_ = cfg.copy()
|
| 74 |
+
|
| 75 |
+
layer_type = cfg_.pop('type')
|
| 76 |
+
if layer_type not in UPSAMPLE_LAYERS:
|
| 77 |
+
raise KeyError(f'Unrecognized upsample type {layer_type}')
|
| 78 |
+
else:
|
| 79 |
+
upsample = UPSAMPLE_LAYERS.get(layer_type)
|
| 80 |
+
|
| 81 |
+
if upsample is nn.Upsample:
|
| 82 |
+
cfg_['mode'] = layer_type
|
| 83 |
+
layer = upsample(*args, **kwargs, **cfg_)
|
| 84 |
+
return layer
|
FRESCO/src/ControlNet/annotator/uniformer/mmcv/cnn/bricks/wrappers.py
ADDED
|
@@ -0,0 +1,180 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
r"""Modified from https://github.com/facebookresearch/detectron2/blob/master/detectron2/layers/wrappers.py # noqa: E501
|
| 3 |
+
|
| 4 |
+
Wrap some nn modules to support empty tensor input. Currently, these wrappers
|
| 5 |
+
are mainly used in mask heads like fcn_mask_head and maskiou_heads since mask
|
| 6 |
+
heads are trained on only positive RoIs.
|
| 7 |
+
"""
|
| 8 |
+
import math
|
| 9 |
+
|
| 10 |
+
import torch
|
| 11 |
+
import torch.nn as nn
|
| 12 |
+
from torch.nn.modules.utils import _pair, _triple
|
| 13 |
+
|
| 14 |
+
from .registry import CONV_LAYERS, UPSAMPLE_LAYERS
|
| 15 |
+
|
| 16 |
+
if torch.__version__ == 'parrots':
|
| 17 |
+
TORCH_VERSION = torch.__version__
|
| 18 |
+
else:
|
| 19 |
+
# torch.__version__ could be 1.3.1+cu92, we only need the first two
|
| 20 |
+
# for comparison
|
| 21 |
+
TORCH_VERSION = tuple(int(x) for x in torch.__version__.split('.')[:2])
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
def obsolete_torch_version(torch_version, version_threshold):
|
| 25 |
+
return torch_version == 'parrots' or torch_version <= version_threshold
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
class NewEmptyTensorOp(torch.autograd.Function):
|
| 29 |
+
|
| 30 |
+
@staticmethod
|
| 31 |
+
def forward(ctx, x, new_shape):
|
| 32 |
+
ctx.shape = x.shape
|
| 33 |
+
return x.new_empty(new_shape)
|
| 34 |
+
|
| 35 |
+
@staticmethod
|
| 36 |
+
def backward(ctx, grad):
|
| 37 |
+
shape = ctx.shape
|
| 38 |
+
return NewEmptyTensorOp.apply(grad, shape), None
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
@CONV_LAYERS.register_module('Conv', force=True)
|
| 42 |
+
class Conv2d(nn.Conv2d):
|
| 43 |
+
|
| 44 |
+
def forward(self, x):
|
| 45 |
+
if x.numel() == 0 and obsolete_torch_version(TORCH_VERSION, (1, 4)):
|
| 46 |
+
out_shape = [x.shape[0], self.out_channels]
|
| 47 |
+
for i, k, p, s, d in zip(x.shape[-2:], self.kernel_size,
|
| 48 |
+
self.padding, self.stride, self.dilation):
|
| 49 |
+
o = (i + 2 * p - (d * (k - 1) + 1)) // s + 1
|
| 50 |
+
out_shape.append(o)
|
| 51 |
+
empty = NewEmptyTensorOp.apply(x, out_shape)
|
| 52 |
+
if self.training:
|
| 53 |
+
# produce dummy gradient to avoid DDP warning.
|
| 54 |
+
dummy = sum(x.view(-1)[0] for x in self.parameters()) * 0.0
|
| 55 |
+
return empty + dummy
|
| 56 |
+
else:
|
| 57 |
+
return empty
|
| 58 |
+
|
| 59 |
+
return super().forward(x)
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
@CONV_LAYERS.register_module('Conv3d', force=True)
|
| 63 |
+
class Conv3d(nn.Conv3d):
|
| 64 |
+
|
| 65 |
+
def forward(self, x):
|
| 66 |
+
if x.numel() == 0 and obsolete_torch_version(TORCH_VERSION, (1, 4)):
|
| 67 |
+
out_shape = [x.shape[0], self.out_channels]
|
| 68 |
+
for i, k, p, s, d in zip(x.shape[-3:], self.kernel_size,
|
| 69 |
+
self.padding, self.stride, self.dilation):
|
| 70 |
+
o = (i + 2 * p - (d * (k - 1) + 1)) // s + 1
|
| 71 |
+
out_shape.append(o)
|
| 72 |
+
empty = NewEmptyTensorOp.apply(x, out_shape)
|
| 73 |
+
if self.training:
|
| 74 |
+
# produce dummy gradient to avoid DDP warning.
|
| 75 |
+
dummy = sum(x.view(-1)[0] for x in self.parameters()) * 0.0
|
| 76 |
+
return empty + dummy
|
| 77 |
+
else:
|
| 78 |
+
return empty
|
| 79 |
+
|
| 80 |
+
return super().forward(x)
|
| 81 |
+
|
| 82 |
+
|
| 83 |
+
@CONV_LAYERS.register_module()
|
| 84 |
+
@CONV_LAYERS.register_module('deconv')
|
| 85 |
+
@UPSAMPLE_LAYERS.register_module('deconv', force=True)
|
| 86 |
+
class ConvTranspose2d(nn.ConvTranspose2d):
|
| 87 |
+
|
| 88 |
+
def forward(self, x):
|
| 89 |
+
if x.numel() == 0 and obsolete_torch_version(TORCH_VERSION, (1, 4)):
|
| 90 |
+
out_shape = [x.shape[0], self.out_channels]
|
| 91 |
+
for i, k, p, s, d, op in zip(x.shape[-2:], self.kernel_size,
|
| 92 |
+
self.padding, self.stride,
|
| 93 |
+
self.dilation, self.output_padding):
|
| 94 |
+
out_shape.append((i - 1) * s - 2 * p + (d * (k - 1) + 1) + op)
|
| 95 |
+
empty = NewEmptyTensorOp.apply(x, out_shape)
|
| 96 |
+
if self.training:
|
| 97 |
+
# produce dummy gradient to avoid DDP warning.
|
| 98 |
+
dummy = sum(x.view(-1)[0] for x in self.parameters()) * 0.0
|
| 99 |
+
return empty + dummy
|
| 100 |
+
else:
|
| 101 |
+
return empty
|
| 102 |
+
|
| 103 |
+
return super().forward(x)
|
| 104 |
+
|
| 105 |
+
|
| 106 |
+
@CONV_LAYERS.register_module()
|
| 107 |
+
@CONV_LAYERS.register_module('deconv3d')
|
| 108 |
+
@UPSAMPLE_LAYERS.register_module('deconv3d', force=True)
|
| 109 |
+
class ConvTranspose3d(nn.ConvTranspose3d):
|
| 110 |
+
|
| 111 |
+
def forward(self, x):
|
| 112 |
+
if x.numel() == 0 and obsolete_torch_version(TORCH_VERSION, (1, 4)):
|
| 113 |
+
out_shape = [x.shape[0], self.out_channels]
|
| 114 |
+
for i, k, p, s, d, op in zip(x.shape[-3:], self.kernel_size,
|
| 115 |
+
self.padding, self.stride,
|
| 116 |
+
self.dilation, self.output_padding):
|
| 117 |
+
out_shape.append((i - 1) * s - 2 * p + (d * (k - 1) + 1) + op)
|
| 118 |
+
empty = NewEmptyTensorOp.apply(x, out_shape)
|
| 119 |
+
if self.training:
|
| 120 |
+
# produce dummy gradient to avoid DDP warning.
|
| 121 |
+
dummy = sum(x.view(-1)[0] for x in self.parameters()) * 0.0
|
| 122 |
+
return empty + dummy
|
| 123 |
+
else:
|
| 124 |
+
return empty
|
| 125 |
+
|
| 126 |
+
return super().forward(x)
|
| 127 |
+
|
| 128 |
+
|
| 129 |
+
class MaxPool2d(nn.MaxPool2d):
|
| 130 |
+
|
| 131 |
+
def forward(self, x):
|
| 132 |
+
# PyTorch 1.9 does not support empty tensor inference yet
|
| 133 |
+
if x.numel() == 0 and obsolete_torch_version(TORCH_VERSION, (1, 9)):
|
| 134 |
+
out_shape = list(x.shape[:2])
|
| 135 |
+
for i, k, p, s, d in zip(x.shape[-2:], _pair(self.kernel_size),
|
| 136 |
+
_pair(self.padding), _pair(self.stride),
|
| 137 |
+
_pair(self.dilation)):
|
| 138 |
+
o = (i + 2 * p - (d * (k - 1) + 1)) / s + 1
|
| 139 |
+
o = math.ceil(o) if self.ceil_mode else math.floor(o)
|
| 140 |
+
out_shape.append(o)
|
| 141 |
+
empty = NewEmptyTensorOp.apply(x, out_shape)
|
| 142 |
+
return empty
|
| 143 |
+
|
| 144 |
+
return super().forward(x)
|
| 145 |
+
|
| 146 |
+
|
| 147 |
+
class MaxPool3d(nn.MaxPool3d):
|
| 148 |
+
|
| 149 |
+
def forward(self, x):
|
| 150 |
+
# PyTorch 1.9 does not support empty tensor inference yet
|
| 151 |
+
if x.numel() == 0 and obsolete_torch_version(TORCH_VERSION, (1, 9)):
|
| 152 |
+
out_shape = list(x.shape[:2])
|
| 153 |
+
for i, k, p, s, d in zip(x.shape[-3:], _triple(self.kernel_size),
|
| 154 |
+
_triple(self.padding),
|
| 155 |
+
_triple(self.stride),
|
| 156 |
+
_triple(self.dilation)):
|
| 157 |
+
o = (i + 2 * p - (d * (k - 1) + 1)) / s + 1
|
| 158 |
+
o = math.ceil(o) if self.ceil_mode else math.floor(o)
|
| 159 |
+
out_shape.append(o)
|
| 160 |
+
empty = NewEmptyTensorOp.apply(x, out_shape)
|
| 161 |
+
return empty
|
| 162 |
+
|
| 163 |
+
return super().forward(x)
|
| 164 |
+
|
| 165 |
+
|
| 166 |
+
class Linear(torch.nn.Linear):
|
| 167 |
+
|
| 168 |
+
def forward(self, x):
|
| 169 |
+
# empty tensor forward of Linear layer is supported in Pytorch 1.6
|
| 170 |
+
if x.numel() == 0 and obsolete_torch_version(TORCH_VERSION, (1, 5)):
|
| 171 |
+
out_shape = [x.shape[0], self.out_features]
|
| 172 |
+
empty = NewEmptyTensorOp.apply(x, out_shape)
|
| 173 |
+
if self.training:
|
| 174 |
+
# produce dummy gradient to avoid DDP warning.
|
| 175 |
+
dummy = sum(x.view(-1)[0] for x in self.parameters()) * 0.0
|
| 176 |
+
return empty + dummy
|
| 177 |
+
else:
|
| 178 |
+
return empty
|
| 179 |
+
|
| 180 |
+
return super().forward(x)
|
FRESCO/src/ControlNet/annotator/uniformer/mmcv/cnn/utils/flops_counter.py
ADDED
|
@@ -0,0 +1,599 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Modified from flops-counter.pytorch by Vladislav Sovrasov
|
| 2 |
+
# original repo: https://github.com/sovrasov/flops-counter.pytorch
|
| 3 |
+
|
| 4 |
+
# MIT License
|
| 5 |
+
|
| 6 |
+
# Copyright (c) 2018 Vladislav Sovrasov
|
| 7 |
+
|
| 8 |
+
# Permission is hereby granted, free of charge, to any person obtaining a copy
|
| 9 |
+
# of this software and associated documentation files (the "Software"), to deal
|
| 10 |
+
# in the Software without restriction, including without limitation the rights
|
| 11 |
+
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
| 12 |
+
# copies of the Software, and to permit persons to whom the Software is
|
| 13 |
+
# furnished to do so, subject to the following conditions:
|
| 14 |
+
|
| 15 |
+
# The above copyright notice and this permission notice shall be included in
|
| 16 |
+
# all copies or substantial portions of the Software.
|
| 17 |
+
|
| 18 |
+
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
| 19 |
+
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
| 20 |
+
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
| 21 |
+
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
| 22 |
+
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
| 23 |
+
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
| 24 |
+
# SOFTWARE.
|
| 25 |
+
|
| 26 |
+
import sys
|
| 27 |
+
from functools import partial
|
| 28 |
+
|
| 29 |
+
import numpy as np
|
| 30 |
+
import torch
|
| 31 |
+
import torch.nn as nn
|
| 32 |
+
|
| 33 |
+
import annotator.uniformer.mmcv as mmcv
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
def get_model_complexity_info(model,
|
| 37 |
+
input_shape,
|
| 38 |
+
print_per_layer_stat=True,
|
| 39 |
+
as_strings=True,
|
| 40 |
+
input_constructor=None,
|
| 41 |
+
flush=False,
|
| 42 |
+
ost=sys.stdout):
|
| 43 |
+
"""Get complexity information of a model.
|
| 44 |
+
|
| 45 |
+
This method can calculate FLOPs and parameter counts of a model with
|
| 46 |
+
corresponding input shape. It can also print complexity information for
|
| 47 |
+
each layer in a model.
|
| 48 |
+
|
| 49 |
+
Supported layers are listed as below:
|
| 50 |
+
- Convolutions: ``nn.Conv1d``, ``nn.Conv2d``, ``nn.Conv3d``.
|
| 51 |
+
- Activations: ``nn.ReLU``, ``nn.PReLU``, ``nn.ELU``, ``nn.LeakyReLU``,
|
| 52 |
+
``nn.ReLU6``.
|
| 53 |
+
- Poolings: ``nn.MaxPool1d``, ``nn.MaxPool2d``, ``nn.MaxPool3d``,
|
| 54 |
+
``nn.AvgPool1d``, ``nn.AvgPool2d``, ``nn.AvgPool3d``,
|
| 55 |
+
``nn.AdaptiveMaxPool1d``, ``nn.AdaptiveMaxPool2d``,
|
| 56 |
+
``nn.AdaptiveMaxPool3d``, ``nn.AdaptiveAvgPool1d``,
|
| 57 |
+
``nn.AdaptiveAvgPool2d``, ``nn.AdaptiveAvgPool3d``.
|
| 58 |
+
- BatchNorms: ``nn.BatchNorm1d``, ``nn.BatchNorm2d``,
|
| 59 |
+
``nn.BatchNorm3d``, ``nn.GroupNorm``, ``nn.InstanceNorm1d``,
|
| 60 |
+
``InstanceNorm2d``, ``InstanceNorm3d``, ``nn.LayerNorm``.
|
| 61 |
+
- Linear: ``nn.Linear``.
|
| 62 |
+
- Deconvolution: ``nn.ConvTranspose2d``.
|
| 63 |
+
- Upsample: ``nn.Upsample``.
|
| 64 |
+
|
| 65 |
+
Args:
|
| 66 |
+
model (nn.Module): The model for complexity calculation.
|
| 67 |
+
input_shape (tuple): Input shape used for calculation.
|
| 68 |
+
print_per_layer_stat (bool): Whether to print complexity information
|
| 69 |
+
for each layer in a model. Default: True.
|
| 70 |
+
as_strings (bool): Output FLOPs and params counts in a string form.
|
| 71 |
+
Default: True.
|
| 72 |
+
input_constructor (None | callable): If specified, it takes a callable
|
| 73 |
+
method that generates input. otherwise, it will generate a random
|
| 74 |
+
tensor with input shape to calculate FLOPs. Default: None.
|
| 75 |
+
flush (bool): same as that in :func:`print`. Default: False.
|
| 76 |
+
ost (stream): same as ``file`` param in :func:`print`.
|
| 77 |
+
Default: sys.stdout.
|
| 78 |
+
|
| 79 |
+
Returns:
|
| 80 |
+
tuple[float | str]: If ``as_strings`` is set to True, it will return
|
| 81 |
+
FLOPs and parameter counts in a string format. otherwise, it will
|
| 82 |
+
return those in a float number format.
|
| 83 |
+
"""
|
| 84 |
+
assert type(input_shape) is tuple
|
| 85 |
+
assert len(input_shape) >= 1
|
| 86 |
+
assert isinstance(model, nn.Module)
|
| 87 |
+
flops_model = add_flops_counting_methods(model)
|
| 88 |
+
flops_model.eval()
|
| 89 |
+
flops_model.start_flops_count()
|
| 90 |
+
if input_constructor:
|
| 91 |
+
input = input_constructor(input_shape)
|
| 92 |
+
_ = flops_model(**input)
|
| 93 |
+
else:
|
| 94 |
+
try:
|
| 95 |
+
batch = torch.ones(()).new_empty(
|
| 96 |
+
(1, *input_shape),
|
| 97 |
+
dtype=next(flops_model.parameters()).dtype,
|
| 98 |
+
device=next(flops_model.parameters()).device)
|
| 99 |
+
except StopIteration:
|
| 100 |
+
# Avoid StopIteration for models which have no parameters,
|
| 101 |
+
# like `nn.Relu()`, `nn.AvgPool2d`, etc.
|
| 102 |
+
batch = torch.ones(()).new_empty((1, *input_shape))
|
| 103 |
+
|
| 104 |
+
_ = flops_model(batch)
|
| 105 |
+
|
| 106 |
+
flops_count, params_count = flops_model.compute_average_flops_cost()
|
| 107 |
+
if print_per_layer_stat:
|
| 108 |
+
print_model_with_flops(
|
| 109 |
+
flops_model, flops_count, params_count, ost=ost, flush=flush)
|
| 110 |
+
flops_model.stop_flops_count()
|
| 111 |
+
|
| 112 |
+
if as_strings:
|
| 113 |
+
return flops_to_string(flops_count), params_to_string(params_count)
|
| 114 |
+
|
| 115 |
+
return flops_count, params_count
|
| 116 |
+
|
| 117 |
+
|
| 118 |
+
def flops_to_string(flops, units='GFLOPs', precision=2):
|
| 119 |
+
"""Convert FLOPs number into a string.
|
| 120 |
+
|
| 121 |
+
Note that Here we take a multiply-add counts as one FLOP.
|
| 122 |
+
|
| 123 |
+
Args:
|
| 124 |
+
flops (float): FLOPs number to be converted.
|
| 125 |
+
units (str | None): Converted FLOPs units. Options are None, 'GFLOPs',
|
| 126 |
+
'MFLOPs', 'KFLOPs', 'FLOPs'. If set to None, it will automatically
|
| 127 |
+
choose the most suitable unit for FLOPs. Default: 'GFLOPs'.
|
| 128 |
+
precision (int): Digit number after the decimal point. Default: 2.
|
| 129 |
+
|
| 130 |
+
Returns:
|
| 131 |
+
str: The converted FLOPs number with units.
|
| 132 |
+
|
| 133 |
+
Examples:
|
| 134 |
+
>>> flops_to_string(1e9)
|
| 135 |
+
'1.0 GFLOPs'
|
| 136 |
+
>>> flops_to_string(2e5, 'MFLOPs')
|
| 137 |
+
'0.2 MFLOPs'
|
| 138 |
+
>>> flops_to_string(3e-9, None)
|
| 139 |
+
'3e-09 FLOPs'
|
| 140 |
+
"""
|
| 141 |
+
if units is None:
|
| 142 |
+
if flops // 10**9 > 0:
|
| 143 |
+
return str(round(flops / 10.**9, precision)) + ' GFLOPs'
|
| 144 |
+
elif flops // 10**6 > 0:
|
| 145 |
+
return str(round(flops / 10.**6, precision)) + ' MFLOPs'
|
| 146 |
+
elif flops // 10**3 > 0:
|
| 147 |
+
return str(round(flops / 10.**3, precision)) + ' KFLOPs'
|
| 148 |
+
else:
|
| 149 |
+
return str(flops) + ' FLOPs'
|
| 150 |
+
else:
|
| 151 |
+
if units == 'GFLOPs':
|
| 152 |
+
return str(round(flops / 10.**9, precision)) + ' ' + units
|
| 153 |
+
elif units == 'MFLOPs':
|
| 154 |
+
return str(round(flops / 10.**6, precision)) + ' ' + units
|
| 155 |
+
elif units == 'KFLOPs':
|
| 156 |
+
return str(round(flops / 10.**3, precision)) + ' ' + units
|
| 157 |
+
else:
|
| 158 |
+
return str(flops) + ' FLOPs'
|
| 159 |
+
|
| 160 |
+
|
| 161 |
+
def params_to_string(num_params, units=None, precision=2):
|
| 162 |
+
"""Convert parameter number into a string.
|
| 163 |
+
|
| 164 |
+
Args:
|
| 165 |
+
num_params (float): Parameter number to be converted.
|
| 166 |
+
units (str | None): Converted FLOPs units. Options are None, 'M',
|
| 167 |
+
'K' and ''. If set to None, it will automatically choose the most
|
| 168 |
+
suitable unit for Parameter number. Default: None.
|
| 169 |
+
precision (int): Digit number after the decimal point. Default: 2.
|
| 170 |
+
|
| 171 |
+
Returns:
|
| 172 |
+
str: The converted parameter number with units.
|
| 173 |
+
|
| 174 |
+
Examples:
|
| 175 |
+
>>> params_to_string(1e9)
|
| 176 |
+
'1000.0 M'
|
| 177 |
+
>>> params_to_string(2e5)
|
| 178 |
+
'200.0 k'
|
| 179 |
+
>>> params_to_string(3e-9)
|
| 180 |
+
'3e-09'
|
| 181 |
+
"""
|
| 182 |
+
if units is None:
|
| 183 |
+
if num_params // 10**6 > 0:
|
| 184 |
+
return str(round(num_params / 10**6, precision)) + ' M'
|
| 185 |
+
elif num_params // 10**3:
|
| 186 |
+
return str(round(num_params / 10**3, precision)) + ' k'
|
| 187 |
+
else:
|
| 188 |
+
return str(num_params)
|
| 189 |
+
else:
|
| 190 |
+
if units == 'M':
|
| 191 |
+
return str(round(num_params / 10.**6, precision)) + ' ' + units
|
| 192 |
+
elif units == 'K':
|
| 193 |
+
return str(round(num_params / 10.**3, precision)) + ' ' + units
|
| 194 |
+
else:
|
| 195 |
+
return str(num_params)
|
| 196 |
+
|
| 197 |
+
|
| 198 |
+
def print_model_with_flops(model,
|
| 199 |
+
total_flops,
|
| 200 |
+
total_params,
|
| 201 |
+
units='GFLOPs',
|
| 202 |
+
precision=3,
|
| 203 |
+
ost=sys.stdout,
|
| 204 |
+
flush=False):
|
| 205 |
+
"""Print a model with FLOPs for each layer.
|
| 206 |
+
|
| 207 |
+
Args:
|
| 208 |
+
model (nn.Module): The model to be printed.
|
| 209 |
+
total_flops (float): Total FLOPs of the model.
|
| 210 |
+
total_params (float): Total parameter counts of the model.
|
| 211 |
+
units (str | None): Converted FLOPs units. Default: 'GFLOPs'.
|
| 212 |
+
precision (int): Digit number after the decimal point. Default: 3.
|
| 213 |
+
ost (stream): same as `file` param in :func:`print`.
|
| 214 |
+
Default: sys.stdout.
|
| 215 |
+
flush (bool): same as that in :func:`print`. Default: False.
|
| 216 |
+
|
| 217 |
+
Example:
|
| 218 |
+
>>> class ExampleModel(nn.Module):
|
| 219 |
+
|
| 220 |
+
>>> def __init__(self):
|
| 221 |
+
>>> super().__init__()
|
| 222 |
+
>>> self.conv1 = nn.Conv2d(3, 8, 3)
|
| 223 |
+
>>> self.conv2 = nn.Conv2d(8, 256, 3)
|
| 224 |
+
>>> self.conv3 = nn.Conv2d(256, 8, 3)
|
| 225 |
+
>>> self.avg_pool = nn.AdaptiveAvgPool2d((1, 1))
|
| 226 |
+
>>> self.flatten = nn.Flatten()
|
| 227 |
+
>>> self.fc = nn.Linear(8, 1)
|
| 228 |
+
|
| 229 |
+
>>> def forward(self, x):
|
| 230 |
+
>>> x = self.conv1(x)
|
| 231 |
+
>>> x = self.conv2(x)
|
| 232 |
+
>>> x = self.conv3(x)
|
| 233 |
+
>>> x = self.avg_pool(x)
|
| 234 |
+
>>> x = self.flatten(x)
|
| 235 |
+
>>> x = self.fc(x)
|
| 236 |
+
>>> return x
|
| 237 |
+
|
| 238 |
+
>>> model = ExampleModel()
|
| 239 |
+
>>> x = (3, 16, 16)
|
| 240 |
+
to print the complexity information state for each layer, you can use
|
| 241 |
+
>>> get_model_complexity_info(model, x)
|
| 242 |
+
or directly use
|
| 243 |
+
>>> print_model_with_flops(model, 4579784.0, 37361)
|
| 244 |
+
ExampleModel(
|
| 245 |
+
0.037 M, 100.000% Params, 0.005 GFLOPs, 100.000% FLOPs,
|
| 246 |
+
(conv1): Conv2d(0.0 M, 0.600% Params, 0.0 GFLOPs, 0.959% FLOPs, 3, 8, kernel_size=(3, 3), stride=(1, 1)) # noqa: E501
|
| 247 |
+
(conv2): Conv2d(0.019 M, 50.020% Params, 0.003 GFLOPs, 58.760% FLOPs, 8, 256, kernel_size=(3, 3), stride=(1, 1))
|
| 248 |
+
(conv3): Conv2d(0.018 M, 49.356% Params, 0.002 GFLOPs, 40.264% FLOPs, 256, 8, kernel_size=(3, 3), stride=(1, 1))
|
| 249 |
+
(avg_pool): AdaptiveAvgPool2d(0.0 M, 0.000% Params, 0.0 GFLOPs, 0.017% FLOPs, output_size=(1, 1))
|
| 250 |
+
(flatten): Flatten(0.0 M, 0.000% Params, 0.0 GFLOPs, 0.000% FLOPs, )
|
| 251 |
+
(fc): Linear(0.0 M, 0.024% Params, 0.0 GFLOPs, 0.000% FLOPs, in_features=8, out_features=1, bias=True)
|
| 252 |
+
)
|
| 253 |
+
"""
|
| 254 |
+
|
| 255 |
+
def accumulate_params(self):
|
| 256 |
+
if is_supported_instance(self):
|
| 257 |
+
return self.__params__
|
| 258 |
+
else:
|
| 259 |
+
sum = 0
|
| 260 |
+
for m in self.children():
|
| 261 |
+
sum += m.accumulate_params()
|
| 262 |
+
return sum
|
| 263 |
+
|
| 264 |
+
def accumulate_flops(self):
|
| 265 |
+
if is_supported_instance(self):
|
| 266 |
+
return self.__flops__ / model.__batch_counter__
|
| 267 |
+
else:
|
| 268 |
+
sum = 0
|
| 269 |
+
for m in self.children():
|
| 270 |
+
sum += m.accumulate_flops()
|
| 271 |
+
return sum
|
| 272 |
+
|
| 273 |
+
def flops_repr(self):
|
| 274 |
+
accumulated_num_params = self.accumulate_params()
|
| 275 |
+
accumulated_flops_cost = self.accumulate_flops()
|
| 276 |
+
return ', '.join([
|
| 277 |
+
params_to_string(
|
| 278 |
+
accumulated_num_params, units='M', precision=precision),
|
| 279 |
+
'{:.3%} Params'.format(accumulated_num_params / total_params),
|
| 280 |
+
flops_to_string(
|
| 281 |
+
accumulated_flops_cost, units=units, precision=precision),
|
| 282 |
+
'{:.3%} FLOPs'.format(accumulated_flops_cost / total_flops),
|
| 283 |
+
self.original_extra_repr()
|
| 284 |
+
])
|
| 285 |
+
|
| 286 |
+
def add_extra_repr(m):
|
| 287 |
+
m.accumulate_flops = accumulate_flops.__get__(m)
|
| 288 |
+
m.accumulate_params = accumulate_params.__get__(m)
|
| 289 |
+
flops_extra_repr = flops_repr.__get__(m)
|
| 290 |
+
if m.extra_repr != flops_extra_repr:
|
| 291 |
+
m.original_extra_repr = m.extra_repr
|
| 292 |
+
m.extra_repr = flops_extra_repr
|
| 293 |
+
assert m.extra_repr != m.original_extra_repr
|
| 294 |
+
|
| 295 |
+
def del_extra_repr(m):
|
| 296 |
+
if hasattr(m, 'original_extra_repr'):
|
| 297 |
+
m.extra_repr = m.original_extra_repr
|
| 298 |
+
del m.original_extra_repr
|
| 299 |
+
if hasattr(m, 'accumulate_flops'):
|
| 300 |
+
del m.accumulate_flops
|
| 301 |
+
|
| 302 |
+
model.apply(add_extra_repr)
|
| 303 |
+
print(model, file=ost, flush=flush)
|
| 304 |
+
model.apply(del_extra_repr)
|
| 305 |
+
|
| 306 |
+
|
| 307 |
+
def get_model_parameters_number(model):
|
| 308 |
+
"""Calculate parameter number of a model.
|
| 309 |
+
|
| 310 |
+
Args:
|
| 311 |
+
model (nn.module): The model for parameter number calculation.
|
| 312 |
+
|
| 313 |
+
Returns:
|
| 314 |
+
float: Parameter number of the model.
|
| 315 |
+
"""
|
| 316 |
+
num_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
|
| 317 |
+
return num_params
|
| 318 |
+
|
| 319 |
+
|
| 320 |
+
def add_flops_counting_methods(net_main_module):
|
| 321 |
+
# adding additional methods to the existing module object,
|
| 322 |
+
# this is done this way so that each function has access to self object
|
| 323 |
+
net_main_module.start_flops_count = start_flops_count.__get__(
|
| 324 |
+
net_main_module)
|
| 325 |
+
net_main_module.stop_flops_count = stop_flops_count.__get__(
|
| 326 |
+
net_main_module)
|
| 327 |
+
net_main_module.reset_flops_count = reset_flops_count.__get__(
|
| 328 |
+
net_main_module)
|
| 329 |
+
net_main_module.compute_average_flops_cost = compute_average_flops_cost.__get__( # noqa: E501
|
| 330 |
+
net_main_module)
|
| 331 |
+
|
| 332 |
+
net_main_module.reset_flops_count()
|
| 333 |
+
|
| 334 |
+
return net_main_module
|
| 335 |
+
|
| 336 |
+
|
| 337 |
+
def compute_average_flops_cost(self):
|
| 338 |
+
"""Compute average FLOPs cost.
|
| 339 |
+
|
| 340 |
+
A method to compute average FLOPs cost, which will be available after
|
| 341 |
+
`add_flops_counting_methods()` is called on a desired net object.
|
| 342 |
+
|
| 343 |
+
Returns:
|
| 344 |
+
float: Current mean flops consumption per image.
|
| 345 |
+
"""
|
| 346 |
+
batches_count = self.__batch_counter__
|
| 347 |
+
flops_sum = 0
|
| 348 |
+
for module in self.modules():
|
| 349 |
+
if is_supported_instance(module):
|
| 350 |
+
flops_sum += module.__flops__
|
| 351 |
+
params_sum = get_model_parameters_number(self)
|
| 352 |
+
return flops_sum / batches_count, params_sum
|
| 353 |
+
|
| 354 |
+
|
| 355 |
+
def start_flops_count(self):
|
| 356 |
+
"""Activate the computation of mean flops consumption per image.
|
| 357 |
+
|
| 358 |
+
A method to activate the computation of mean flops consumption per image.
|
| 359 |
+
which will be available after ``add_flops_counting_methods()`` is called on
|
| 360 |
+
a desired net object. It should be called before running the network.
|
| 361 |
+
"""
|
| 362 |
+
add_batch_counter_hook_function(self)
|
| 363 |
+
|
| 364 |
+
def add_flops_counter_hook_function(module):
|
| 365 |
+
if is_supported_instance(module):
|
| 366 |
+
if hasattr(module, '__flops_handle__'):
|
| 367 |
+
return
|
| 368 |
+
|
| 369 |
+
else:
|
| 370 |
+
handle = module.register_forward_hook(
|
| 371 |
+
get_modules_mapping()[type(module)])
|
| 372 |
+
|
| 373 |
+
module.__flops_handle__ = handle
|
| 374 |
+
|
| 375 |
+
self.apply(partial(add_flops_counter_hook_function))
|
| 376 |
+
|
| 377 |
+
|
| 378 |
+
def stop_flops_count(self):
|
| 379 |
+
"""Stop computing the mean flops consumption per image.
|
| 380 |
+
|
| 381 |
+
A method to stop computing the mean flops consumption per image, which will
|
| 382 |
+
be available after ``add_flops_counting_methods()`` is called on a desired
|
| 383 |
+
net object. It can be called to pause the computation whenever.
|
| 384 |
+
"""
|
| 385 |
+
remove_batch_counter_hook_function(self)
|
| 386 |
+
self.apply(remove_flops_counter_hook_function)
|
| 387 |
+
|
| 388 |
+
|
| 389 |
+
def reset_flops_count(self):
|
| 390 |
+
"""Reset statistics computed so far.
|
| 391 |
+
|
| 392 |
+
A method to Reset computed statistics, which will be available after
|
| 393 |
+
`add_flops_counting_methods()` is called on a desired net object.
|
| 394 |
+
"""
|
| 395 |
+
add_batch_counter_variables_or_reset(self)
|
| 396 |
+
self.apply(add_flops_counter_variable_or_reset)
|
| 397 |
+
|
| 398 |
+
|
| 399 |
+
# ---- Internal functions
|
| 400 |
+
def empty_flops_counter_hook(module, input, output):
|
| 401 |
+
module.__flops__ += 0
|
| 402 |
+
|
| 403 |
+
|
| 404 |
+
def upsample_flops_counter_hook(module, input, output):
|
| 405 |
+
output_size = output[0]
|
| 406 |
+
batch_size = output_size.shape[0]
|
| 407 |
+
output_elements_count = batch_size
|
| 408 |
+
for val in output_size.shape[1:]:
|
| 409 |
+
output_elements_count *= val
|
| 410 |
+
module.__flops__ += int(output_elements_count)
|
| 411 |
+
|
| 412 |
+
|
| 413 |
+
def relu_flops_counter_hook(module, input, output):
|
| 414 |
+
active_elements_count = output.numel()
|
| 415 |
+
module.__flops__ += int(active_elements_count)
|
| 416 |
+
|
| 417 |
+
|
| 418 |
+
def linear_flops_counter_hook(module, input, output):
|
| 419 |
+
input = input[0]
|
| 420 |
+
output_last_dim = output.shape[
|
| 421 |
+
-1] # pytorch checks dimensions, so here we don't care much
|
| 422 |
+
module.__flops__ += int(np.prod(input.shape) * output_last_dim)
|
| 423 |
+
|
| 424 |
+
|
| 425 |
+
def pool_flops_counter_hook(module, input, output):
|
| 426 |
+
input = input[0]
|
| 427 |
+
module.__flops__ += int(np.prod(input.shape))
|
| 428 |
+
|
| 429 |
+
|
| 430 |
+
def norm_flops_counter_hook(module, input, output):
|
| 431 |
+
input = input[0]
|
| 432 |
+
|
| 433 |
+
batch_flops = np.prod(input.shape)
|
| 434 |
+
if (getattr(module, 'affine', False)
|
| 435 |
+
or getattr(module, 'elementwise_affine', False)):
|
| 436 |
+
batch_flops *= 2
|
| 437 |
+
module.__flops__ += int(batch_flops)
|
| 438 |
+
|
| 439 |
+
|
| 440 |
+
def deconv_flops_counter_hook(conv_module, input, output):
|
| 441 |
+
# Can have multiple inputs, getting the first one
|
| 442 |
+
input = input[0]
|
| 443 |
+
|
| 444 |
+
batch_size = input.shape[0]
|
| 445 |
+
input_height, input_width = input.shape[2:]
|
| 446 |
+
|
| 447 |
+
kernel_height, kernel_width = conv_module.kernel_size
|
| 448 |
+
in_channels = conv_module.in_channels
|
| 449 |
+
out_channels = conv_module.out_channels
|
| 450 |
+
groups = conv_module.groups
|
| 451 |
+
|
| 452 |
+
filters_per_channel = out_channels // groups
|
| 453 |
+
conv_per_position_flops = (
|
| 454 |
+
kernel_height * kernel_width * in_channels * filters_per_channel)
|
| 455 |
+
|
| 456 |
+
active_elements_count = batch_size * input_height * input_width
|
| 457 |
+
overall_conv_flops = conv_per_position_flops * active_elements_count
|
| 458 |
+
bias_flops = 0
|
| 459 |
+
if conv_module.bias is not None:
|
| 460 |
+
output_height, output_width = output.shape[2:]
|
| 461 |
+
bias_flops = out_channels * batch_size * output_height * output_height
|
| 462 |
+
overall_flops = overall_conv_flops + bias_flops
|
| 463 |
+
|
| 464 |
+
conv_module.__flops__ += int(overall_flops)
|
| 465 |
+
|
| 466 |
+
|
| 467 |
+
def conv_flops_counter_hook(conv_module, input, output):
|
| 468 |
+
# Can have multiple inputs, getting the first one
|
| 469 |
+
input = input[0]
|
| 470 |
+
|
| 471 |
+
batch_size = input.shape[0]
|
| 472 |
+
output_dims = list(output.shape[2:])
|
| 473 |
+
|
| 474 |
+
kernel_dims = list(conv_module.kernel_size)
|
| 475 |
+
in_channels = conv_module.in_channels
|
| 476 |
+
out_channels = conv_module.out_channels
|
| 477 |
+
groups = conv_module.groups
|
| 478 |
+
|
| 479 |
+
filters_per_channel = out_channels // groups
|
| 480 |
+
conv_per_position_flops = int(
|
| 481 |
+
np.prod(kernel_dims)) * in_channels * filters_per_channel
|
| 482 |
+
|
| 483 |
+
active_elements_count = batch_size * int(np.prod(output_dims))
|
| 484 |
+
|
| 485 |
+
overall_conv_flops = conv_per_position_flops * active_elements_count
|
| 486 |
+
|
| 487 |
+
bias_flops = 0
|
| 488 |
+
|
| 489 |
+
if conv_module.bias is not None:
|
| 490 |
+
|
| 491 |
+
bias_flops = out_channels * active_elements_count
|
| 492 |
+
|
| 493 |
+
overall_flops = overall_conv_flops + bias_flops
|
| 494 |
+
|
| 495 |
+
conv_module.__flops__ += int(overall_flops)
|
| 496 |
+
|
| 497 |
+
|
| 498 |
+
def batch_counter_hook(module, input, output):
|
| 499 |
+
batch_size = 1
|
| 500 |
+
if len(input) > 0:
|
| 501 |
+
# Can have multiple inputs, getting the first one
|
| 502 |
+
input = input[0]
|
| 503 |
+
batch_size = len(input)
|
| 504 |
+
else:
|
| 505 |
+
pass
|
| 506 |
+
print('Warning! No positional inputs found for a module, '
|
| 507 |
+
'assuming batch size is 1.')
|
| 508 |
+
module.__batch_counter__ += batch_size
|
| 509 |
+
|
| 510 |
+
|
| 511 |
+
def add_batch_counter_variables_or_reset(module):
|
| 512 |
+
|
| 513 |
+
module.__batch_counter__ = 0
|
| 514 |
+
|
| 515 |
+
|
| 516 |
+
def add_batch_counter_hook_function(module):
|
| 517 |
+
if hasattr(module, '__batch_counter_handle__'):
|
| 518 |
+
return
|
| 519 |
+
|
| 520 |
+
handle = module.register_forward_hook(batch_counter_hook)
|
| 521 |
+
module.__batch_counter_handle__ = handle
|
| 522 |
+
|
| 523 |
+
|
| 524 |
+
def remove_batch_counter_hook_function(module):
|
| 525 |
+
if hasattr(module, '__batch_counter_handle__'):
|
| 526 |
+
module.__batch_counter_handle__.remove()
|
| 527 |
+
del module.__batch_counter_handle__
|
| 528 |
+
|
| 529 |
+
|
| 530 |
+
def add_flops_counter_variable_or_reset(module):
|
| 531 |
+
if is_supported_instance(module):
|
| 532 |
+
if hasattr(module, '__flops__') or hasattr(module, '__params__'):
|
| 533 |
+
print('Warning: variables __flops__ or __params__ are already '
|
| 534 |
+
'defined for the module' + type(module).__name__ +
|
| 535 |
+
' ptflops can affect your code!')
|
| 536 |
+
module.__flops__ = 0
|
| 537 |
+
module.__params__ = get_model_parameters_number(module)
|
| 538 |
+
|
| 539 |
+
|
| 540 |
+
def is_supported_instance(module):
|
| 541 |
+
if type(module) in get_modules_mapping():
|
| 542 |
+
return True
|
| 543 |
+
return False
|
| 544 |
+
|
| 545 |
+
|
| 546 |
+
def remove_flops_counter_hook_function(module):
|
| 547 |
+
if is_supported_instance(module):
|
| 548 |
+
if hasattr(module, '__flops_handle__'):
|
| 549 |
+
module.__flops_handle__.remove()
|
| 550 |
+
del module.__flops_handle__
|
| 551 |
+
|
| 552 |
+
|
| 553 |
+
def get_modules_mapping():
|
| 554 |
+
return {
|
| 555 |
+
# convolutions
|
| 556 |
+
nn.Conv1d: conv_flops_counter_hook,
|
| 557 |
+
nn.Conv2d: conv_flops_counter_hook,
|
| 558 |
+
mmcv.cnn.bricks.Conv2d: conv_flops_counter_hook,
|
| 559 |
+
nn.Conv3d: conv_flops_counter_hook,
|
| 560 |
+
mmcv.cnn.bricks.Conv3d: conv_flops_counter_hook,
|
| 561 |
+
# activations
|
| 562 |
+
nn.ReLU: relu_flops_counter_hook,
|
| 563 |
+
nn.PReLU: relu_flops_counter_hook,
|
| 564 |
+
nn.ELU: relu_flops_counter_hook,
|
| 565 |
+
nn.LeakyReLU: relu_flops_counter_hook,
|
| 566 |
+
nn.ReLU6: relu_flops_counter_hook,
|
| 567 |
+
# poolings
|
| 568 |
+
nn.MaxPool1d: pool_flops_counter_hook,
|
| 569 |
+
nn.AvgPool1d: pool_flops_counter_hook,
|
| 570 |
+
nn.AvgPool2d: pool_flops_counter_hook,
|
| 571 |
+
nn.MaxPool2d: pool_flops_counter_hook,
|
| 572 |
+
mmcv.cnn.bricks.MaxPool2d: pool_flops_counter_hook,
|
| 573 |
+
nn.MaxPool3d: pool_flops_counter_hook,
|
| 574 |
+
mmcv.cnn.bricks.MaxPool3d: pool_flops_counter_hook,
|
| 575 |
+
nn.AvgPool3d: pool_flops_counter_hook,
|
| 576 |
+
nn.AdaptiveMaxPool1d: pool_flops_counter_hook,
|
| 577 |
+
nn.AdaptiveAvgPool1d: pool_flops_counter_hook,
|
| 578 |
+
nn.AdaptiveMaxPool2d: pool_flops_counter_hook,
|
| 579 |
+
nn.AdaptiveAvgPool2d: pool_flops_counter_hook,
|
| 580 |
+
nn.AdaptiveMaxPool3d: pool_flops_counter_hook,
|
| 581 |
+
nn.AdaptiveAvgPool3d: pool_flops_counter_hook,
|
| 582 |
+
# normalizations
|
| 583 |
+
nn.BatchNorm1d: norm_flops_counter_hook,
|
| 584 |
+
nn.BatchNorm2d: norm_flops_counter_hook,
|
| 585 |
+
nn.BatchNorm3d: norm_flops_counter_hook,
|
| 586 |
+
nn.GroupNorm: norm_flops_counter_hook,
|
| 587 |
+
nn.InstanceNorm1d: norm_flops_counter_hook,
|
| 588 |
+
nn.InstanceNorm2d: norm_flops_counter_hook,
|
| 589 |
+
nn.InstanceNorm3d: norm_flops_counter_hook,
|
| 590 |
+
nn.LayerNorm: norm_flops_counter_hook,
|
| 591 |
+
# FC
|
| 592 |
+
nn.Linear: linear_flops_counter_hook,
|
| 593 |
+
mmcv.cnn.bricks.Linear: linear_flops_counter_hook,
|
| 594 |
+
# Upscale
|
| 595 |
+
nn.Upsample: upsample_flops_counter_hook,
|
| 596 |
+
# Deconvolution
|
| 597 |
+
nn.ConvTranspose2d: deconv_flops_counter_hook,
|
| 598 |
+
mmcv.cnn.bricks.ConvTranspose2d: deconv_flops_counter_hook,
|
| 599 |
+
}
|
FRESCO/src/ControlNet/annotator/uniformer/mmcv/runner/base_module.py
ADDED
|
@@ -0,0 +1,195 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
import copy
|
| 3 |
+
import warnings
|
| 4 |
+
from abc import ABCMeta
|
| 5 |
+
from collections import defaultdict
|
| 6 |
+
from logging import FileHandler
|
| 7 |
+
|
| 8 |
+
import torch.nn as nn
|
| 9 |
+
|
| 10 |
+
from annotator.uniformer.mmcv.runner.dist_utils import master_only
|
| 11 |
+
from annotator.uniformer.mmcv.utils.logging import get_logger, logger_initialized, print_log
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
class BaseModule(nn.Module, metaclass=ABCMeta):
|
| 15 |
+
"""Base module for all modules in openmmlab.
|
| 16 |
+
|
| 17 |
+
``BaseModule`` is a wrapper of ``torch.nn.Module`` with additional
|
| 18 |
+
functionality of parameter initialization. Compared with
|
| 19 |
+
``torch.nn.Module``, ``BaseModule`` mainly adds three attributes.
|
| 20 |
+
|
| 21 |
+
- ``init_cfg``: the config to control the initialization.
|
| 22 |
+
- ``init_weights``: The function of parameter
|
| 23 |
+
initialization and recording initialization
|
| 24 |
+
information.
|
| 25 |
+
- ``_params_init_info``: Used to track the parameter
|
| 26 |
+
initialization information. This attribute only
|
| 27 |
+
exists during executing the ``init_weights``.
|
| 28 |
+
|
| 29 |
+
Args:
|
| 30 |
+
init_cfg (dict, optional): Initialization config dict.
|
| 31 |
+
"""
|
| 32 |
+
|
| 33 |
+
def __init__(self, init_cfg=None):
|
| 34 |
+
"""Initialize BaseModule, inherited from `torch.nn.Module`"""
|
| 35 |
+
|
| 36 |
+
# NOTE init_cfg can be defined in different levels, but init_cfg
|
| 37 |
+
# in low levels has a higher priority.
|
| 38 |
+
|
| 39 |
+
super(BaseModule, self).__init__()
|
| 40 |
+
# define default value of init_cfg instead of hard code
|
| 41 |
+
# in init_weights() function
|
| 42 |
+
self._is_init = False
|
| 43 |
+
|
| 44 |
+
self.init_cfg = copy.deepcopy(init_cfg)
|
| 45 |
+
|
| 46 |
+
# Backward compatibility in derived classes
|
| 47 |
+
# if pretrained is not None:
|
| 48 |
+
# warnings.warn('DeprecationWarning: pretrained is a deprecated \
|
| 49 |
+
# key, please consider using init_cfg')
|
| 50 |
+
# self.init_cfg = dict(type='Pretrained', checkpoint=pretrained)
|
| 51 |
+
|
| 52 |
+
@property
|
| 53 |
+
def is_init(self):
|
| 54 |
+
return self._is_init
|
| 55 |
+
|
| 56 |
+
def init_weights(self):
|
| 57 |
+
"""Initialize the weights."""
|
| 58 |
+
|
| 59 |
+
is_top_level_module = False
|
| 60 |
+
# check if it is top-level module
|
| 61 |
+
if not hasattr(self, '_params_init_info'):
|
| 62 |
+
# The `_params_init_info` is used to record the initialization
|
| 63 |
+
# information of the parameters
|
| 64 |
+
# the key should be the obj:`nn.Parameter` of model and the value
|
| 65 |
+
# should be a dict containing
|
| 66 |
+
# - init_info (str): The string that describes the initialization.
|
| 67 |
+
# - tmp_mean_value (FloatTensor): The mean of the parameter,
|
| 68 |
+
# which indicates whether the parameter has been modified.
|
| 69 |
+
# this attribute would be deleted after all parameters
|
| 70 |
+
# is initialized.
|
| 71 |
+
self._params_init_info = defaultdict(dict)
|
| 72 |
+
is_top_level_module = True
|
| 73 |
+
|
| 74 |
+
# Initialize the `_params_init_info`,
|
| 75 |
+
# When detecting the `tmp_mean_value` of
|
| 76 |
+
# the corresponding parameter is changed, update related
|
| 77 |
+
# initialization information
|
| 78 |
+
for name, param in self.named_parameters():
|
| 79 |
+
self._params_init_info[param][
|
| 80 |
+
'init_info'] = f'The value is the same before and ' \
|
| 81 |
+
f'after calling `init_weights` ' \
|
| 82 |
+
f'of {self.__class__.__name__} '
|
| 83 |
+
self._params_init_info[param][
|
| 84 |
+
'tmp_mean_value'] = param.data.mean()
|
| 85 |
+
|
| 86 |
+
# pass `params_init_info` to all submodules
|
| 87 |
+
# All submodules share the same `params_init_info`,
|
| 88 |
+
# so it will be updated when parameters are
|
| 89 |
+
# modified at any level of the model.
|
| 90 |
+
for sub_module in self.modules():
|
| 91 |
+
sub_module._params_init_info = self._params_init_info
|
| 92 |
+
|
| 93 |
+
# Get the initialized logger, if not exist,
|
| 94 |
+
# create a logger named `mmcv`
|
| 95 |
+
logger_names = list(logger_initialized.keys())
|
| 96 |
+
logger_name = logger_names[0] if logger_names else 'mmcv'
|
| 97 |
+
|
| 98 |
+
from ..cnn import initialize
|
| 99 |
+
from ..cnn.utils.weight_init import update_init_info
|
| 100 |
+
module_name = self.__class__.__name__
|
| 101 |
+
if not self._is_init:
|
| 102 |
+
if self.init_cfg:
|
| 103 |
+
print_log(
|
| 104 |
+
f'initialize {module_name} with init_cfg {self.init_cfg}',
|
| 105 |
+
logger=logger_name)
|
| 106 |
+
initialize(self, self.init_cfg)
|
| 107 |
+
if isinstance(self.init_cfg, dict):
|
| 108 |
+
# prevent the parameters of
|
| 109 |
+
# the pre-trained model
|
| 110 |
+
# from being overwritten by
|
| 111 |
+
# the `init_weights`
|
| 112 |
+
if self.init_cfg['type'] == 'Pretrained':
|
| 113 |
+
return
|
| 114 |
+
|
| 115 |
+
for m in self.children():
|
| 116 |
+
if hasattr(m, 'init_weights'):
|
| 117 |
+
m.init_weights()
|
| 118 |
+
# users may overload the `init_weights`
|
| 119 |
+
update_init_info(
|
| 120 |
+
m,
|
| 121 |
+
init_info=f'Initialized by '
|
| 122 |
+
f'user-defined `init_weights`'
|
| 123 |
+
f' in {m.__class__.__name__} ')
|
| 124 |
+
|
| 125 |
+
self._is_init = True
|
| 126 |
+
else:
|
| 127 |
+
warnings.warn(f'init_weights of {self.__class__.__name__} has '
|
| 128 |
+
f'been called more than once.')
|
| 129 |
+
|
| 130 |
+
if is_top_level_module:
|
| 131 |
+
self._dump_init_info(logger_name)
|
| 132 |
+
|
| 133 |
+
for sub_module in self.modules():
|
| 134 |
+
del sub_module._params_init_info
|
| 135 |
+
|
| 136 |
+
@master_only
|
| 137 |
+
def _dump_init_info(self, logger_name):
|
| 138 |
+
"""Dump the initialization information to a file named
|
| 139 |
+
`initialization.log.json` in workdir.
|
| 140 |
+
|
| 141 |
+
Args:
|
| 142 |
+
logger_name (str): The name of logger.
|
| 143 |
+
"""
|
| 144 |
+
|
| 145 |
+
logger = get_logger(logger_name)
|
| 146 |
+
|
| 147 |
+
with_file_handler = False
|
| 148 |
+
# dump the information to the logger file if there is a `FileHandler`
|
| 149 |
+
for handler in logger.handlers:
|
| 150 |
+
if isinstance(handler, FileHandler):
|
| 151 |
+
handler.stream.write(
|
| 152 |
+
'Name of parameter - Initialization information\n')
|
| 153 |
+
for name, param in self.named_parameters():
|
| 154 |
+
handler.stream.write(
|
| 155 |
+
f'\n{name} - {param.shape}: '
|
| 156 |
+
f"\n{self._params_init_info[param]['init_info']} \n")
|
| 157 |
+
handler.stream.flush()
|
| 158 |
+
with_file_handler = True
|
| 159 |
+
if not with_file_handler:
|
| 160 |
+
for name, param in self.named_parameters():
|
| 161 |
+
print_log(
|
| 162 |
+
f'\n{name} - {param.shape}: '
|
| 163 |
+
f"\n{self._params_init_info[param]['init_info']} \n ",
|
| 164 |
+
logger=logger_name)
|
| 165 |
+
|
| 166 |
+
def __repr__(self):
|
| 167 |
+
s = super().__repr__()
|
| 168 |
+
if self.init_cfg:
|
| 169 |
+
s += f'\ninit_cfg={self.init_cfg}'
|
| 170 |
+
return s
|
| 171 |
+
|
| 172 |
+
|
| 173 |
+
class Sequential(BaseModule, nn.Sequential):
|
| 174 |
+
"""Sequential module in openmmlab.
|
| 175 |
+
|
| 176 |
+
Args:
|
| 177 |
+
init_cfg (dict, optional): Initialization config dict.
|
| 178 |
+
"""
|
| 179 |
+
|
| 180 |
+
def __init__(self, *args, init_cfg=None):
|
| 181 |
+
BaseModule.__init__(self, init_cfg)
|
| 182 |
+
nn.Sequential.__init__(self, *args)
|
| 183 |
+
|
| 184 |
+
|
| 185 |
+
class ModuleList(BaseModule, nn.ModuleList):
|
| 186 |
+
"""ModuleList in openmmlab.
|
| 187 |
+
|
| 188 |
+
Args:
|
| 189 |
+
modules (iterable, optional): an iterable of modules to add.
|
| 190 |
+
init_cfg (dict, optional): Initialization config dict.
|
| 191 |
+
"""
|
| 192 |
+
|
| 193 |
+
def __init__(self, modules=None, init_cfg=None):
|
| 194 |
+
BaseModule.__init__(self, init_cfg)
|
| 195 |
+
nn.ModuleList.__init__(self, modules)
|
FRESCO/src/ControlNet/annotator/uniformer/mmcv/runner/checkpoint.py
ADDED
|
@@ -0,0 +1,707 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
import io
|
| 3 |
+
import os
|
| 4 |
+
import os.path as osp
|
| 5 |
+
import pkgutil
|
| 6 |
+
import re
|
| 7 |
+
import time
|
| 8 |
+
import warnings
|
| 9 |
+
from collections import OrderedDict
|
| 10 |
+
from importlib import import_module
|
| 11 |
+
from tempfile import TemporaryDirectory
|
| 12 |
+
|
| 13 |
+
import torch
|
| 14 |
+
import torchvision
|
| 15 |
+
from torch.optim import Optimizer
|
| 16 |
+
from torch.utils import model_zoo
|
| 17 |
+
|
| 18 |
+
import annotator.uniformer.mmcv as mmcv
|
| 19 |
+
from ..fileio import FileClient
|
| 20 |
+
from ..fileio import load as load_file
|
| 21 |
+
from ..parallel import is_module_wrapper
|
| 22 |
+
from ..utils import mkdir_or_exist
|
| 23 |
+
from .dist_utils import get_dist_info
|
| 24 |
+
|
| 25 |
+
ENV_MMCV_HOME = 'MMCV_HOME'
|
| 26 |
+
ENV_XDG_CACHE_HOME = 'XDG_CACHE_HOME'
|
| 27 |
+
DEFAULT_CACHE_DIR = '~/.cache'
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
def _get_mmcv_home():
|
| 31 |
+
mmcv_home = os.path.expanduser(
|
| 32 |
+
os.getenv(
|
| 33 |
+
ENV_MMCV_HOME,
|
| 34 |
+
os.path.join(
|
| 35 |
+
os.getenv(ENV_XDG_CACHE_HOME, DEFAULT_CACHE_DIR), 'mmcv')))
|
| 36 |
+
|
| 37 |
+
mkdir_or_exist(mmcv_home)
|
| 38 |
+
return mmcv_home
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
def load_state_dict(module, state_dict, strict=False, logger=None):
|
| 42 |
+
"""Load state_dict to a module.
|
| 43 |
+
|
| 44 |
+
This method is modified from :meth:`torch.nn.Module.load_state_dict`.
|
| 45 |
+
Default value for ``strict`` is set to ``False`` and the message for
|
| 46 |
+
param mismatch will be shown even if strict is False.
|
| 47 |
+
|
| 48 |
+
Args:
|
| 49 |
+
module (Module): Module that receives the state_dict.
|
| 50 |
+
state_dict (OrderedDict): Weights.
|
| 51 |
+
strict (bool): whether to strictly enforce that the keys
|
| 52 |
+
in :attr:`state_dict` match the keys returned by this module's
|
| 53 |
+
:meth:`~torch.nn.Module.state_dict` function. Default: ``False``.
|
| 54 |
+
logger (:obj:`logging.Logger`, optional): Logger to log the error
|
| 55 |
+
message. If not specified, print function will be used.
|
| 56 |
+
"""
|
| 57 |
+
unexpected_keys = []
|
| 58 |
+
all_missing_keys = []
|
| 59 |
+
err_msg = []
|
| 60 |
+
|
| 61 |
+
metadata = getattr(state_dict, '_metadata', None)
|
| 62 |
+
state_dict = state_dict.copy()
|
| 63 |
+
if metadata is not None:
|
| 64 |
+
state_dict._metadata = metadata
|
| 65 |
+
|
| 66 |
+
# use _load_from_state_dict to enable checkpoint version control
|
| 67 |
+
def load(module, prefix=''):
|
| 68 |
+
# recursively check parallel module in case that the model has a
|
| 69 |
+
# complicated structure, e.g., nn.Module(nn.Module(DDP))
|
| 70 |
+
if is_module_wrapper(module):
|
| 71 |
+
module = module.module
|
| 72 |
+
local_metadata = {} if metadata is None else metadata.get(
|
| 73 |
+
prefix[:-1], {})
|
| 74 |
+
module._load_from_state_dict(state_dict, prefix, local_metadata, True,
|
| 75 |
+
all_missing_keys, unexpected_keys,
|
| 76 |
+
err_msg)
|
| 77 |
+
for name, child in module._modules.items():
|
| 78 |
+
if child is not None:
|
| 79 |
+
load(child, prefix + name + '.')
|
| 80 |
+
|
| 81 |
+
load(module)
|
| 82 |
+
load = None # break load->load reference cycle
|
| 83 |
+
|
| 84 |
+
# ignore "num_batches_tracked" of BN layers
|
| 85 |
+
missing_keys = [
|
| 86 |
+
key for key in all_missing_keys if 'num_batches_tracked' not in key
|
| 87 |
+
]
|
| 88 |
+
|
| 89 |
+
if unexpected_keys:
|
| 90 |
+
err_msg.append('unexpected key in source '
|
| 91 |
+
f'state_dict: {", ".join(unexpected_keys)}\n')
|
| 92 |
+
if missing_keys:
|
| 93 |
+
err_msg.append(
|
| 94 |
+
f'missing keys in source state_dict: {", ".join(missing_keys)}\n')
|
| 95 |
+
|
| 96 |
+
rank, _ = get_dist_info()
|
| 97 |
+
if len(err_msg) > 0 and rank == 0:
|
| 98 |
+
err_msg.insert(
|
| 99 |
+
0, 'The model and loaded state dict do not match exactly\n')
|
| 100 |
+
err_msg = '\n'.join(err_msg)
|
| 101 |
+
if strict:
|
| 102 |
+
raise RuntimeError(err_msg)
|
| 103 |
+
elif logger is not None:
|
| 104 |
+
logger.warning(err_msg)
|
| 105 |
+
else:
|
| 106 |
+
print(err_msg)
|
| 107 |
+
|
| 108 |
+
|
| 109 |
+
def get_torchvision_models():
|
| 110 |
+
model_urls = dict()
|
| 111 |
+
for _, name, ispkg in pkgutil.walk_packages(torchvision.models.__path__):
|
| 112 |
+
if ispkg:
|
| 113 |
+
continue
|
| 114 |
+
_zoo = import_module(f'torchvision.models.{name}')
|
| 115 |
+
if hasattr(_zoo, 'model_urls'):
|
| 116 |
+
_urls = getattr(_zoo, 'model_urls')
|
| 117 |
+
model_urls.update(_urls)
|
| 118 |
+
return model_urls
|
| 119 |
+
|
| 120 |
+
|
| 121 |
+
def get_external_models():
|
| 122 |
+
mmcv_home = _get_mmcv_home()
|
| 123 |
+
default_json_path = osp.join(mmcv.__path__[0], 'model_zoo/open_mmlab.json')
|
| 124 |
+
default_urls = load_file(default_json_path)
|
| 125 |
+
assert isinstance(default_urls, dict)
|
| 126 |
+
external_json_path = osp.join(mmcv_home, 'open_mmlab.json')
|
| 127 |
+
if osp.exists(external_json_path):
|
| 128 |
+
external_urls = load_file(external_json_path)
|
| 129 |
+
assert isinstance(external_urls, dict)
|
| 130 |
+
default_urls.update(external_urls)
|
| 131 |
+
|
| 132 |
+
return default_urls
|
| 133 |
+
|
| 134 |
+
|
| 135 |
+
def get_mmcls_models():
|
| 136 |
+
mmcls_json_path = osp.join(mmcv.__path__[0], 'model_zoo/mmcls.json')
|
| 137 |
+
mmcls_urls = load_file(mmcls_json_path)
|
| 138 |
+
|
| 139 |
+
return mmcls_urls
|
| 140 |
+
|
| 141 |
+
|
| 142 |
+
def get_deprecated_model_names():
|
| 143 |
+
deprecate_json_path = osp.join(mmcv.__path__[0],
|
| 144 |
+
'model_zoo/deprecated.json')
|
| 145 |
+
deprecate_urls = load_file(deprecate_json_path)
|
| 146 |
+
assert isinstance(deprecate_urls, dict)
|
| 147 |
+
|
| 148 |
+
return deprecate_urls
|
| 149 |
+
|
| 150 |
+
|
| 151 |
+
def _process_mmcls_checkpoint(checkpoint):
|
| 152 |
+
state_dict = checkpoint['state_dict']
|
| 153 |
+
new_state_dict = OrderedDict()
|
| 154 |
+
for k, v in state_dict.items():
|
| 155 |
+
if k.startswith('backbone.'):
|
| 156 |
+
new_state_dict[k[9:]] = v
|
| 157 |
+
new_checkpoint = dict(state_dict=new_state_dict)
|
| 158 |
+
|
| 159 |
+
return new_checkpoint
|
| 160 |
+
|
| 161 |
+
|
| 162 |
+
class CheckpointLoader:
|
| 163 |
+
"""A general checkpoint loader to manage all schemes."""
|
| 164 |
+
|
| 165 |
+
_schemes = {}
|
| 166 |
+
|
| 167 |
+
@classmethod
|
| 168 |
+
def _register_scheme(cls, prefixes, loader, force=False):
|
| 169 |
+
if isinstance(prefixes, str):
|
| 170 |
+
prefixes = [prefixes]
|
| 171 |
+
else:
|
| 172 |
+
assert isinstance(prefixes, (list, tuple))
|
| 173 |
+
for prefix in prefixes:
|
| 174 |
+
if (prefix not in cls._schemes) or force:
|
| 175 |
+
cls._schemes[prefix] = loader
|
| 176 |
+
else:
|
| 177 |
+
raise KeyError(
|
| 178 |
+
f'{prefix} is already registered as a loader backend, '
|
| 179 |
+
'add "force=True" if you want to override it')
|
| 180 |
+
# sort, longer prefixes take priority
|
| 181 |
+
cls._schemes = OrderedDict(
|
| 182 |
+
sorted(cls._schemes.items(), key=lambda t: t[0], reverse=True))
|
| 183 |
+
|
| 184 |
+
@classmethod
|
| 185 |
+
def register_scheme(cls, prefixes, loader=None, force=False):
|
| 186 |
+
"""Register a loader to CheckpointLoader.
|
| 187 |
+
|
| 188 |
+
This method can be used as a normal class method or a decorator.
|
| 189 |
+
|
| 190 |
+
Args:
|
| 191 |
+
prefixes (str or list[str] or tuple[str]):
|
| 192 |
+
The prefix of the registered loader.
|
| 193 |
+
loader (function, optional): The loader function to be registered.
|
| 194 |
+
When this method is used as a decorator, loader is None.
|
| 195 |
+
Defaults to None.
|
| 196 |
+
force (bool, optional): Whether to override the loader
|
| 197 |
+
if the prefix has already been registered. Defaults to False.
|
| 198 |
+
"""
|
| 199 |
+
|
| 200 |
+
if loader is not None:
|
| 201 |
+
cls._register_scheme(prefixes, loader, force=force)
|
| 202 |
+
return
|
| 203 |
+
|
| 204 |
+
def _register(loader_cls):
|
| 205 |
+
cls._register_scheme(prefixes, loader_cls, force=force)
|
| 206 |
+
return loader_cls
|
| 207 |
+
|
| 208 |
+
return _register
|
| 209 |
+
|
| 210 |
+
@classmethod
|
| 211 |
+
def _get_checkpoint_loader(cls, path):
|
| 212 |
+
"""Finds a loader that supports the given path. Falls back to the local
|
| 213 |
+
loader if no other loader is found.
|
| 214 |
+
|
| 215 |
+
Args:
|
| 216 |
+
path (str): checkpoint path
|
| 217 |
+
|
| 218 |
+
Returns:
|
| 219 |
+
loader (function): checkpoint loader
|
| 220 |
+
"""
|
| 221 |
+
|
| 222 |
+
for p in cls._schemes:
|
| 223 |
+
if path.startswith(p):
|
| 224 |
+
return cls._schemes[p]
|
| 225 |
+
|
| 226 |
+
@classmethod
|
| 227 |
+
def load_checkpoint(cls, filename, map_location=None, logger=None):
|
| 228 |
+
"""load checkpoint through URL scheme path.
|
| 229 |
+
|
| 230 |
+
Args:
|
| 231 |
+
filename (str): checkpoint file name with given prefix
|
| 232 |
+
map_location (str, optional): Same as :func:`torch.load`.
|
| 233 |
+
Default: None
|
| 234 |
+
logger (:mod:`logging.Logger`, optional): The logger for message.
|
| 235 |
+
Default: None
|
| 236 |
+
|
| 237 |
+
Returns:
|
| 238 |
+
dict or OrderedDict: The loaded checkpoint.
|
| 239 |
+
"""
|
| 240 |
+
|
| 241 |
+
checkpoint_loader = cls._get_checkpoint_loader(filename)
|
| 242 |
+
class_name = checkpoint_loader.__name__
|
| 243 |
+
mmcv.print_log(
|
| 244 |
+
f'load checkpoint from {class_name[10:]} path: {filename}', logger)
|
| 245 |
+
return checkpoint_loader(filename, map_location)
|
| 246 |
+
|
| 247 |
+
|
| 248 |
+
@CheckpointLoader.register_scheme(prefixes='')
|
| 249 |
+
def load_from_local(filename, map_location):
|
| 250 |
+
"""load checkpoint by local file path.
|
| 251 |
+
|
| 252 |
+
Args:
|
| 253 |
+
filename (str): local checkpoint file path
|
| 254 |
+
map_location (str, optional): Same as :func:`torch.load`.
|
| 255 |
+
|
| 256 |
+
Returns:
|
| 257 |
+
dict or OrderedDict: The loaded checkpoint.
|
| 258 |
+
"""
|
| 259 |
+
|
| 260 |
+
if not osp.isfile(filename):
|
| 261 |
+
raise IOError(f'{filename} is not a checkpoint file')
|
| 262 |
+
checkpoint = torch.load(filename, map_location=map_location)
|
| 263 |
+
return checkpoint
|
| 264 |
+
|
| 265 |
+
|
| 266 |
+
@CheckpointLoader.register_scheme(prefixes=('http://', 'https://'))
|
| 267 |
+
def load_from_http(filename, map_location=None, model_dir=None):
|
| 268 |
+
"""load checkpoint through HTTP or HTTPS scheme path. In distributed
|
| 269 |
+
setting, this function only download checkpoint at local rank 0.
|
| 270 |
+
|
| 271 |
+
Args:
|
| 272 |
+
filename (str): checkpoint file path with modelzoo or
|
| 273 |
+
torchvision prefix
|
| 274 |
+
map_location (str, optional): Same as :func:`torch.load`.
|
| 275 |
+
model_dir (string, optional): directory in which to save the object,
|
| 276 |
+
Default: None
|
| 277 |
+
|
| 278 |
+
Returns:
|
| 279 |
+
dict or OrderedDict: The loaded checkpoint.
|
| 280 |
+
"""
|
| 281 |
+
rank, world_size = get_dist_info()
|
| 282 |
+
rank = int(os.environ.get('LOCAL_RANK', rank))
|
| 283 |
+
if rank == 0:
|
| 284 |
+
checkpoint = model_zoo.load_url(
|
| 285 |
+
filename, model_dir=model_dir, map_location=map_location)
|
| 286 |
+
if world_size > 1:
|
| 287 |
+
torch.distributed.barrier()
|
| 288 |
+
if rank > 0:
|
| 289 |
+
checkpoint = model_zoo.load_url(
|
| 290 |
+
filename, model_dir=model_dir, map_location=map_location)
|
| 291 |
+
return checkpoint
|
| 292 |
+
|
| 293 |
+
|
| 294 |
+
@CheckpointLoader.register_scheme(prefixes='pavi://')
|
| 295 |
+
def load_from_pavi(filename, map_location=None):
|
| 296 |
+
"""load checkpoint through the file path prefixed with pavi. In distributed
|
| 297 |
+
setting, this function download ckpt at all ranks to different temporary
|
| 298 |
+
directories.
|
| 299 |
+
|
| 300 |
+
Args:
|
| 301 |
+
filename (str): checkpoint file path with pavi prefix
|
| 302 |
+
map_location (str, optional): Same as :func:`torch.load`.
|
| 303 |
+
Default: None
|
| 304 |
+
|
| 305 |
+
Returns:
|
| 306 |
+
dict or OrderedDict: The loaded checkpoint.
|
| 307 |
+
"""
|
| 308 |
+
assert filename.startswith('pavi://'), \
|
| 309 |
+
f'Expected filename startswith `pavi://`, but get {filename}'
|
| 310 |
+
model_path = filename[7:]
|
| 311 |
+
|
| 312 |
+
try:
|
| 313 |
+
from pavi import modelcloud
|
| 314 |
+
except ImportError:
|
| 315 |
+
raise ImportError(
|
| 316 |
+
'Please install pavi to load checkpoint from modelcloud.')
|
| 317 |
+
|
| 318 |
+
model = modelcloud.get(model_path)
|
| 319 |
+
with TemporaryDirectory() as tmp_dir:
|
| 320 |
+
downloaded_file = osp.join(tmp_dir, model.name)
|
| 321 |
+
model.download(downloaded_file)
|
| 322 |
+
checkpoint = torch.load(downloaded_file, map_location=map_location)
|
| 323 |
+
return checkpoint
|
| 324 |
+
|
| 325 |
+
|
| 326 |
+
@CheckpointLoader.register_scheme(prefixes='s3://')
|
| 327 |
+
def load_from_ceph(filename, map_location=None, backend='petrel'):
|
| 328 |
+
"""load checkpoint through the file path prefixed with s3. In distributed
|
| 329 |
+
setting, this function download ckpt at all ranks to different temporary
|
| 330 |
+
directories.
|
| 331 |
+
|
| 332 |
+
Args:
|
| 333 |
+
filename (str): checkpoint file path with s3 prefix
|
| 334 |
+
map_location (str, optional): Same as :func:`torch.load`.
|
| 335 |
+
backend (str, optional): The storage backend type. Options are 'ceph',
|
| 336 |
+
'petrel'. Default: 'petrel'.
|
| 337 |
+
|
| 338 |
+
.. warning::
|
| 339 |
+
:class:`mmcv.fileio.file_client.CephBackend` will be deprecated,
|
| 340 |
+
please use :class:`mmcv.fileio.file_client.PetrelBackend` instead.
|
| 341 |
+
|
| 342 |
+
Returns:
|
| 343 |
+
dict or OrderedDict: The loaded checkpoint.
|
| 344 |
+
"""
|
| 345 |
+
allowed_backends = ['ceph', 'petrel']
|
| 346 |
+
if backend not in allowed_backends:
|
| 347 |
+
raise ValueError(f'Load from Backend {backend} is not supported.')
|
| 348 |
+
|
| 349 |
+
if backend == 'ceph':
|
| 350 |
+
warnings.warn(
|
| 351 |
+
'CephBackend will be deprecated, please use PetrelBackend instead')
|
| 352 |
+
|
| 353 |
+
# CephClient and PetrelBackend have the same prefix 's3://' and the latter
|
| 354 |
+
# will be chosen as default. If PetrelBackend can not be instantiated
|
| 355 |
+
# successfully, the CephClient will be chosen.
|
| 356 |
+
try:
|
| 357 |
+
file_client = FileClient(backend=backend)
|
| 358 |
+
except ImportError:
|
| 359 |
+
allowed_backends.remove(backend)
|
| 360 |
+
file_client = FileClient(backend=allowed_backends[0])
|
| 361 |
+
|
| 362 |
+
with io.BytesIO(file_client.get(filename)) as buffer:
|
| 363 |
+
checkpoint = torch.load(buffer, map_location=map_location)
|
| 364 |
+
return checkpoint
|
| 365 |
+
|
| 366 |
+
|
| 367 |
+
@CheckpointLoader.register_scheme(prefixes=('modelzoo://', 'torchvision://'))
|
| 368 |
+
def load_from_torchvision(filename, map_location=None):
|
| 369 |
+
"""load checkpoint through the file path prefixed with modelzoo or
|
| 370 |
+
torchvision.
|
| 371 |
+
|
| 372 |
+
Args:
|
| 373 |
+
filename (str): checkpoint file path with modelzoo or
|
| 374 |
+
torchvision prefix
|
| 375 |
+
map_location (str, optional): Same as :func:`torch.load`.
|
| 376 |
+
|
| 377 |
+
Returns:
|
| 378 |
+
dict or OrderedDict: The loaded checkpoint.
|
| 379 |
+
"""
|
| 380 |
+
model_urls = get_torchvision_models()
|
| 381 |
+
if filename.startswith('modelzoo://'):
|
| 382 |
+
warnings.warn('The URL scheme of "modelzoo://" is deprecated, please '
|
| 383 |
+
'use "torchvision://" instead')
|
| 384 |
+
model_name = filename[11:]
|
| 385 |
+
else:
|
| 386 |
+
model_name = filename[14:]
|
| 387 |
+
return load_from_http(model_urls[model_name], map_location=map_location)
|
| 388 |
+
|
| 389 |
+
|
| 390 |
+
@CheckpointLoader.register_scheme(prefixes=('open-mmlab://', 'openmmlab://'))
|
| 391 |
+
def load_from_openmmlab(filename, map_location=None):
|
| 392 |
+
"""load checkpoint through the file path prefixed with open-mmlab or
|
| 393 |
+
openmmlab.
|
| 394 |
+
|
| 395 |
+
Args:
|
| 396 |
+
filename (str): checkpoint file path with open-mmlab or
|
| 397 |
+
openmmlab prefix
|
| 398 |
+
map_location (str, optional): Same as :func:`torch.load`.
|
| 399 |
+
Default: None
|
| 400 |
+
|
| 401 |
+
Returns:
|
| 402 |
+
dict or OrderedDict: The loaded checkpoint.
|
| 403 |
+
"""
|
| 404 |
+
|
| 405 |
+
model_urls = get_external_models()
|
| 406 |
+
prefix_str = 'open-mmlab://'
|
| 407 |
+
if filename.startswith(prefix_str):
|
| 408 |
+
model_name = filename[13:]
|
| 409 |
+
else:
|
| 410 |
+
model_name = filename[12:]
|
| 411 |
+
prefix_str = 'openmmlab://'
|
| 412 |
+
|
| 413 |
+
deprecated_urls = get_deprecated_model_names()
|
| 414 |
+
if model_name in deprecated_urls:
|
| 415 |
+
warnings.warn(f'{prefix_str}{model_name} is deprecated in favor '
|
| 416 |
+
f'of {prefix_str}{deprecated_urls[model_name]}')
|
| 417 |
+
model_name = deprecated_urls[model_name]
|
| 418 |
+
model_url = model_urls[model_name]
|
| 419 |
+
# check if is url
|
| 420 |
+
if model_url.startswith(('http://', 'https://')):
|
| 421 |
+
checkpoint = load_from_http(model_url, map_location=map_location)
|
| 422 |
+
else:
|
| 423 |
+
filename = osp.join(_get_mmcv_home(), model_url)
|
| 424 |
+
if not osp.isfile(filename):
|
| 425 |
+
raise IOError(f'{filename} is not a checkpoint file')
|
| 426 |
+
checkpoint = torch.load(filename, map_location=map_location)
|
| 427 |
+
return checkpoint
|
| 428 |
+
|
| 429 |
+
|
| 430 |
+
@CheckpointLoader.register_scheme(prefixes='mmcls://')
|
| 431 |
+
def load_from_mmcls(filename, map_location=None):
|
| 432 |
+
"""load checkpoint through the file path prefixed with mmcls.
|
| 433 |
+
|
| 434 |
+
Args:
|
| 435 |
+
filename (str): checkpoint file path with mmcls prefix
|
| 436 |
+
map_location (str, optional): Same as :func:`torch.load`.
|
| 437 |
+
|
| 438 |
+
Returns:
|
| 439 |
+
dict or OrderedDict: The loaded checkpoint.
|
| 440 |
+
"""
|
| 441 |
+
|
| 442 |
+
model_urls = get_mmcls_models()
|
| 443 |
+
model_name = filename[8:]
|
| 444 |
+
checkpoint = load_from_http(
|
| 445 |
+
model_urls[model_name], map_location=map_location)
|
| 446 |
+
checkpoint = _process_mmcls_checkpoint(checkpoint)
|
| 447 |
+
return checkpoint
|
| 448 |
+
|
| 449 |
+
|
| 450 |
+
def _load_checkpoint(filename, map_location=None, logger=None):
|
| 451 |
+
"""Load checkpoint from somewhere (modelzoo, file, url).
|
| 452 |
+
|
| 453 |
+
Args:
|
| 454 |
+
filename (str): Accept local filepath, URL, ``torchvision://xxx``,
|
| 455 |
+
``open-mmlab://xxx``. Please refer to ``docs/model_zoo.md`` for
|
| 456 |
+
details.
|
| 457 |
+
map_location (str, optional): Same as :func:`torch.load`.
|
| 458 |
+
Default: None.
|
| 459 |
+
logger (:mod:`logging.Logger`, optional): The logger for error message.
|
| 460 |
+
Default: None
|
| 461 |
+
|
| 462 |
+
Returns:
|
| 463 |
+
dict or OrderedDict: The loaded checkpoint. It can be either an
|
| 464 |
+
OrderedDict storing model weights or a dict containing other
|
| 465 |
+
information, which depends on the checkpoint.
|
| 466 |
+
"""
|
| 467 |
+
return CheckpointLoader.load_checkpoint(filename, map_location, logger)
|
| 468 |
+
|
| 469 |
+
|
| 470 |
+
def _load_checkpoint_with_prefix(prefix, filename, map_location=None):
|
| 471 |
+
"""Load partial pretrained model with specific prefix.
|
| 472 |
+
|
| 473 |
+
Args:
|
| 474 |
+
prefix (str): The prefix of sub-module.
|
| 475 |
+
filename (str): Accept local filepath, URL, ``torchvision://xxx``,
|
| 476 |
+
``open-mmlab://xxx``. Please refer to ``docs/model_zoo.md`` for
|
| 477 |
+
details.
|
| 478 |
+
map_location (str | None): Same as :func:`torch.load`. Default: None.
|
| 479 |
+
|
| 480 |
+
Returns:
|
| 481 |
+
dict or OrderedDict: The loaded checkpoint.
|
| 482 |
+
"""
|
| 483 |
+
|
| 484 |
+
checkpoint = _load_checkpoint(filename, map_location=map_location)
|
| 485 |
+
|
| 486 |
+
if 'state_dict' in checkpoint:
|
| 487 |
+
state_dict = checkpoint['state_dict']
|
| 488 |
+
else:
|
| 489 |
+
state_dict = checkpoint
|
| 490 |
+
if not prefix.endswith('.'):
|
| 491 |
+
prefix += '.'
|
| 492 |
+
prefix_len = len(prefix)
|
| 493 |
+
|
| 494 |
+
state_dict = {
|
| 495 |
+
k[prefix_len:]: v
|
| 496 |
+
for k, v in state_dict.items() if k.startswith(prefix)
|
| 497 |
+
}
|
| 498 |
+
|
| 499 |
+
assert state_dict, f'{prefix} is not in the pretrained model'
|
| 500 |
+
return state_dict
|
| 501 |
+
|
| 502 |
+
|
| 503 |
+
def load_checkpoint(model,
|
| 504 |
+
filename,
|
| 505 |
+
map_location=None,
|
| 506 |
+
strict=False,
|
| 507 |
+
logger=None,
|
| 508 |
+
revise_keys=[(r'^module\.', '')]):
|
| 509 |
+
"""Load checkpoint from a file or URI.
|
| 510 |
+
|
| 511 |
+
Args:
|
| 512 |
+
model (Module): Module to load checkpoint.
|
| 513 |
+
filename (str): Accept local filepath, URL, ``torchvision://xxx``,
|
| 514 |
+
``open-mmlab://xxx``. Please refer to ``docs/model_zoo.md`` for
|
| 515 |
+
details.
|
| 516 |
+
map_location (str): Same as :func:`torch.load`.
|
| 517 |
+
strict (bool): Whether to allow different params for the model and
|
| 518 |
+
checkpoint.
|
| 519 |
+
logger (:mod:`logging.Logger` or None): The logger for error message.
|
| 520 |
+
revise_keys (list): A list of customized keywords to modify the
|
| 521 |
+
state_dict in checkpoint. Each item is a (pattern, replacement)
|
| 522 |
+
pair of the regular expression operations. Default: strip
|
| 523 |
+
the prefix 'module.' by [(r'^module\\.', '')].
|
| 524 |
+
|
| 525 |
+
Returns:
|
| 526 |
+
dict or OrderedDict: The loaded checkpoint.
|
| 527 |
+
"""
|
| 528 |
+
checkpoint = _load_checkpoint(filename, map_location, logger)
|
| 529 |
+
# OrderedDict is a subclass of dict
|
| 530 |
+
if not isinstance(checkpoint, dict):
|
| 531 |
+
raise RuntimeError(
|
| 532 |
+
f'No state_dict found in checkpoint file {filename}')
|
| 533 |
+
# get state_dict from checkpoint
|
| 534 |
+
if 'state_dict' in checkpoint:
|
| 535 |
+
state_dict = checkpoint['state_dict']
|
| 536 |
+
else:
|
| 537 |
+
state_dict = checkpoint
|
| 538 |
+
|
| 539 |
+
# strip prefix of state_dict
|
| 540 |
+
metadata = getattr(state_dict, '_metadata', OrderedDict())
|
| 541 |
+
for p, r in revise_keys:
|
| 542 |
+
state_dict = OrderedDict(
|
| 543 |
+
{re.sub(p, r, k): v
|
| 544 |
+
for k, v in state_dict.items()})
|
| 545 |
+
# Keep metadata in state_dict
|
| 546 |
+
state_dict._metadata = metadata
|
| 547 |
+
|
| 548 |
+
# load state_dict
|
| 549 |
+
load_state_dict(model, state_dict, strict, logger)
|
| 550 |
+
return checkpoint
|
| 551 |
+
|
| 552 |
+
|
| 553 |
+
def weights_to_cpu(state_dict):
|
| 554 |
+
"""Copy a model state_dict to cpu.
|
| 555 |
+
|
| 556 |
+
Args:
|
| 557 |
+
state_dict (OrderedDict): Model weights on GPU.
|
| 558 |
+
|
| 559 |
+
Returns:
|
| 560 |
+
OrderedDict: Model weights on GPU.
|
| 561 |
+
"""
|
| 562 |
+
state_dict_cpu = OrderedDict()
|
| 563 |
+
for key, val in state_dict.items():
|
| 564 |
+
state_dict_cpu[key] = val.cpu()
|
| 565 |
+
# Keep metadata in state_dict
|
| 566 |
+
state_dict_cpu._metadata = getattr(state_dict, '_metadata', OrderedDict())
|
| 567 |
+
return state_dict_cpu
|
| 568 |
+
|
| 569 |
+
|
| 570 |
+
def _save_to_state_dict(module, destination, prefix, keep_vars):
|
| 571 |
+
"""Saves module state to `destination` dictionary.
|
| 572 |
+
|
| 573 |
+
This method is modified from :meth:`torch.nn.Module._save_to_state_dict`.
|
| 574 |
+
|
| 575 |
+
Args:
|
| 576 |
+
module (nn.Module): The module to generate state_dict.
|
| 577 |
+
destination (dict): A dict where state will be stored.
|
| 578 |
+
prefix (str): The prefix for parameters and buffers used in this
|
| 579 |
+
module.
|
| 580 |
+
"""
|
| 581 |
+
for name, param in module._parameters.items():
|
| 582 |
+
if param is not None:
|
| 583 |
+
destination[prefix + name] = param if keep_vars else param.detach()
|
| 584 |
+
for name, buf in module._buffers.items():
|
| 585 |
+
# remove check of _non_persistent_buffers_set to allow nn.BatchNorm2d
|
| 586 |
+
if buf is not None:
|
| 587 |
+
destination[prefix + name] = buf if keep_vars else buf.detach()
|
| 588 |
+
|
| 589 |
+
|
| 590 |
+
def get_state_dict(module, destination=None, prefix='', keep_vars=False):
|
| 591 |
+
"""Returns a dictionary containing a whole state of the module.
|
| 592 |
+
|
| 593 |
+
Both parameters and persistent buffers (e.g. running averages) are
|
| 594 |
+
included. Keys are corresponding parameter and buffer names.
|
| 595 |
+
|
| 596 |
+
This method is modified from :meth:`torch.nn.Module.state_dict` to
|
| 597 |
+
recursively check parallel module in case that the model has a complicated
|
| 598 |
+
structure, e.g., nn.Module(nn.Module(DDP)).
|
| 599 |
+
|
| 600 |
+
Args:
|
| 601 |
+
module (nn.Module): The module to generate state_dict.
|
| 602 |
+
destination (OrderedDict): Returned dict for the state of the
|
| 603 |
+
module.
|
| 604 |
+
prefix (str): Prefix of the key.
|
| 605 |
+
keep_vars (bool): Whether to keep the variable property of the
|
| 606 |
+
parameters. Default: False.
|
| 607 |
+
|
| 608 |
+
Returns:
|
| 609 |
+
dict: A dictionary containing a whole state of the module.
|
| 610 |
+
"""
|
| 611 |
+
# recursively check parallel module in case that the model has a
|
| 612 |
+
# complicated structure, e.g., nn.Module(nn.Module(DDP))
|
| 613 |
+
if is_module_wrapper(module):
|
| 614 |
+
module = module.module
|
| 615 |
+
|
| 616 |
+
# below is the same as torch.nn.Module.state_dict()
|
| 617 |
+
if destination is None:
|
| 618 |
+
destination = OrderedDict()
|
| 619 |
+
destination._metadata = OrderedDict()
|
| 620 |
+
destination._metadata[prefix[:-1]] = local_metadata = dict(
|
| 621 |
+
version=module._version)
|
| 622 |
+
_save_to_state_dict(module, destination, prefix, keep_vars)
|
| 623 |
+
for name, child in module._modules.items():
|
| 624 |
+
if child is not None:
|
| 625 |
+
get_state_dict(
|
| 626 |
+
child, destination, prefix + name + '.', keep_vars=keep_vars)
|
| 627 |
+
for hook in module._state_dict_hooks.values():
|
| 628 |
+
hook_result = hook(module, destination, prefix, local_metadata)
|
| 629 |
+
if hook_result is not None:
|
| 630 |
+
destination = hook_result
|
| 631 |
+
return destination
|
| 632 |
+
|
| 633 |
+
|
| 634 |
+
def save_checkpoint(model,
|
| 635 |
+
filename,
|
| 636 |
+
optimizer=None,
|
| 637 |
+
meta=None,
|
| 638 |
+
file_client_args=None):
|
| 639 |
+
"""Save checkpoint to file.
|
| 640 |
+
|
| 641 |
+
The checkpoint will have 3 fields: ``meta``, ``state_dict`` and
|
| 642 |
+
``optimizer``. By default ``meta`` will contain version and time info.
|
| 643 |
+
|
| 644 |
+
Args:
|
| 645 |
+
model (Module): Module whose params are to be saved.
|
| 646 |
+
filename (str): Checkpoint filename.
|
| 647 |
+
optimizer (:obj:`Optimizer`, optional): Optimizer to be saved.
|
| 648 |
+
meta (dict, optional): Metadata to be saved in checkpoint.
|
| 649 |
+
file_client_args (dict, optional): Arguments to instantiate a
|
| 650 |
+
FileClient. See :class:`mmcv.fileio.FileClient` for details.
|
| 651 |
+
Default: None.
|
| 652 |
+
`New in version 1.3.16.`
|
| 653 |
+
"""
|
| 654 |
+
if meta is None:
|
| 655 |
+
meta = {}
|
| 656 |
+
elif not isinstance(meta, dict):
|
| 657 |
+
raise TypeError(f'meta must be a dict or None, but got {type(meta)}')
|
| 658 |
+
meta.update(mmcv_version=mmcv.__version__, time=time.asctime())
|
| 659 |
+
|
| 660 |
+
if is_module_wrapper(model):
|
| 661 |
+
model = model.module
|
| 662 |
+
|
| 663 |
+
if hasattr(model, 'CLASSES') and model.CLASSES is not None:
|
| 664 |
+
# save class name to the meta
|
| 665 |
+
meta.update(CLASSES=model.CLASSES)
|
| 666 |
+
|
| 667 |
+
checkpoint = {
|
| 668 |
+
'meta': meta,
|
| 669 |
+
'state_dict': weights_to_cpu(get_state_dict(model))
|
| 670 |
+
}
|
| 671 |
+
# save optimizer state dict in the checkpoint
|
| 672 |
+
if isinstance(optimizer, Optimizer):
|
| 673 |
+
checkpoint['optimizer'] = optimizer.state_dict()
|
| 674 |
+
elif isinstance(optimizer, dict):
|
| 675 |
+
checkpoint['optimizer'] = {}
|
| 676 |
+
for name, optim in optimizer.items():
|
| 677 |
+
checkpoint['optimizer'][name] = optim.state_dict()
|
| 678 |
+
|
| 679 |
+
if filename.startswith('pavi://'):
|
| 680 |
+
if file_client_args is not None:
|
| 681 |
+
raise ValueError(
|
| 682 |
+
'file_client_args should be "None" if filename starts with'
|
| 683 |
+
f'"pavi://", but got {file_client_args}')
|
| 684 |
+
try:
|
| 685 |
+
from pavi import modelcloud
|
| 686 |
+
from pavi import exception
|
| 687 |
+
except ImportError:
|
| 688 |
+
raise ImportError(
|
| 689 |
+
'Please install pavi to load checkpoint from modelcloud.')
|
| 690 |
+
model_path = filename[7:]
|
| 691 |
+
root = modelcloud.Folder()
|
| 692 |
+
model_dir, model_name = osp.split(model_path)
|
| 693 |
+
try:
|
| 694 |
+
model = modelcloud.get(model_dir)
|
| 695 |
+
except exception.NodeNotFoundError:
|
| 696 |
+
model = root.create_training_model(model_dir)
|
| 697 |
+
with TemporaryDirectory() as tmp_dir:
|
| 698 |
+
checkpoint_file = osp.join(tmp_dir, model_name)
|
| 699 |
+
with open(checkpoint_file, 'wb') as f:
|
| 700 |
+
torch.save(checkpoint, f)
|
| 701 |
+
f.flush()
|
| 702 |
+
model.create_file(checkpoint_file, name=model_name)
|
| 703 |
+
else:
|
| 704 |
+
file_client = FileClient.infer_client(file_client_args, filename)
|
| 705 |
+
with io.BytesIO() as f:
|
| 706 |
+
torch.save(checkpoint, f)
|
| 707 |
+
file_client.put(f.getvalue(), filename)
|
FRESCO/src/ControlNet/annotator/uniformer/mmcv/runner/hooks/__init__.py
ADDED
|
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
from .checkpoint import CheckpointHook
|
| 3 |
+
from .closure import ClosureHook
|
| 4 |
+
from .ema import EMAHook
|
| 5 |
+
from .evaluation import DistEvalHook, EvalHook
|
| 6 |
+
from .hook import HOOKS, Hook
|
| 7 |
+
from .iter_timer import IterTimerHook
|
| 8 |
+
from .logger import (DvcliveLoggerHook, LoggerHook, MlflowLoggerHook,
|
| 9 |
+
NeptuneLoggerHook, PaviLoggerHook, TensorboardLoggerHook,
|
| 10 |
+
TextLoggerHook, WandbLoggerHook)
|
| 11 |
+
from .lr_updater import LrUpdaterHook
|
| 12 |
+
from .memory import EmptyCacheHook
|
| 13 |
+
from .momentum_updater import MomentumUpdaterHook
|
| 14 |
+
from .optimizer import (Fp16OptimizerHook, GradientCumulativeFp16OptimizerHook,
|
| 15 |
+
GradientCumulativeOptimizerHook, OptimizerHook)
|
| 16 |
+
from .profiler import ProfilerHook
|
| 17 |
+
from .sampler_seed import DistSamplerSeedHook
|
| 18 |
+
from .sync_buffer import SyncBuffersHook
|
| 19 |
+
|
| 20 |
+
__all__ = [
|
| 21 |
+
'HOOKS', 'Hook', 'CheckpointHook', 'ClosureHook', 'LrUpdaterHook',
|
| 22 |
+
'OptimizerHook', 'Fp16OptimizerHook', 'IterTimerHook',
|
| 23 |
+
'DistSamplerSeedHook', 'EmptyCacheHook', 'LoggerHook', 'MlflowLoggerHook',
|
| 24 |
+
'PaviLoggerHook', 'TextLoggerHook', 'TensorboardLoggerHook',
|
| 25 |
+
'NeptuneLoggerHook', 'WandbLoggerHook', 'DvcliveLoggerHook',
|
| 26 |
+
'MomentumUpdaterHook', 'SyncBuffersHook', 'EMAHook', 'EvalHook',
|
| 27 |
+
'DistEvalHook', 'ProfilerHook', 'GradientCumulativeOptimizerHook',
|
| 28 |
+
'GradientCumulativeFp16OptimizerHook'
|
| 29 |
+
]
|
FRESCO/src/ControlNet/annotator/uniformer/mmcv/runner/hooks/checkpoint.py
ADDED
|
@@ -0,0 +1,167 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
import os.path as osp
|
| 3 |
+
import warnings
|
| 4 |
+
|
| 5 |
+
from annotator.uniformer.mmcv.fileio import FileClient
|
| 6 |
+
from ..dist_utils import allreduce_params, master_only
|
| 7 |
+
from .hook import HOOKS, Hook
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
@HOOKS.register_module()
|
| 11 |
+
class CheckpointHook(Hook):
|
| 12 |
+
"""Save checkpoints periodically.
|
| 13 |
+
|
| 14 |
+
Args:
|
| 15 |
+
interval (int): The saving period. If ``by_epoch=True``, interval
|
| 16 |
+
indicates epochs, otherwise it indicates iterations.
|
| 17 |
+
Default: -1, which means "never".
|
| 18 |
+
by_epoch (bool): Saving checkpoints by epoch or by iteration.
|
| 19 |
+
Default: True.
|
| 20 |
+
save_optimizer (bool): Whether to save optimizer state_dict in the
|
| 21 |
+
checkpoint. It is usually used for resuming experiments.
|
| 22 |
+
Default: True.
|
| 23 |
+
out_dir (str, optional): The root directory to save checkpoints. If not
|
| 24 |
+
specified, ``runner.work_dir`` will be used by default. If
|
| 25 |
+
specified, the ``out_dir`` will be the concatenation of ``out_dir``
|
| 26 |
+
and the last level directory of ``runner.work_dir``.
|
| 27 |
+
`Changed in version 1.3.16.`
|
| 28 |
+
max_keep_ckpts (int, optional): The maximum checkpoints to keep.
|
| 29 |
+
In some cases we want only the latest few checkpoints and would
|
| 30 |
+
like to delete old ones to save the disk space.
|
| 31 |
+
Default: -1, which means unlimited.
|
| 32 |
+
save_last (bool, optional): Whether to force the last checkpoint to be
|
| 33 |
+
saved regardless of interval. Default: True.
|
| 34 |
+
sync_buffer (bool, optional): Whether to synchronize buffers in
|
| 35 |
+
different gpus. Default: False.
|
| 36 |
+
file_client_args (dict, optional): Arguments to instantiate a
|
| 37 |
+
FileClient. See :class:`mmcv.fileio.FileClient` for details.
|
| 38 |
+
Default: None.
|
| 39 |
+
`New in version 1.3.16.`
|
| 40 |
+
|
| 41 |
+
.. warning::
|
| 42 |
+
Before v1.3.16, the ``out_dir`` argument indicates the path where the
|
| 43 |
+
checkpoint is stored. However, since v1.3.16, ``out_dir`` indicates the
|
| 44 |
+
root directory and the final path to save checkpoint is the
|
| 45 |
+
concatenation of ``out_dir`` and the last level directory of
|
| 46 |
+
``runner.work_dir``. Suppose the value of ``out_dir`` is "/path/of/A"
|
| 47 |
+
and the value of ``runner.work_dir`` is "/path/of/B", then the final
|
| 48 |
+
path will be "/path/of/A/B".
|
| 49 |
+
"""
|
| 50 |
+
|
| 51 |
+
def __init__(self,
|
| 52 |
+
interval=-1,
|
| 53 |
+
by_epoch=True,
|
| 54 |
+
save_optimizer=True,
|
| 55 |
+
out_dir=None,
|
| 56 |
+
max_keep_ckpts=-1,
|
| 57 |
+
save_last=True,
|
| 58 |
+
sync_buffer=False,
|
| 59 |
+
file_client_args=None,
|
| 60 |
+
**kwargs):
|
| 61 |
+
self.interval = interval
|
| 62 |
+
self.by_epoch = by_epoch
|
| 63 |
+
self.save_optimizer = save_optimizer
|
| 64 |
+
self.out_dir = out_dir
|
| 65 |
+
self.max_keep_ckpts = max_keep_ckpts
|
| 66 |
+
self.save_last = save_last
|
| 67 |
+
self.args = kwargs
|
| 68 |
+
self.sync_buffer = sync_buffer
|
| 69 |
+
self.file_client_args = file_client_args
|
| 70 |
+
|
| 71 |
+
def before_run(self, runner):
|
| 72 |
+
if not self.out_dir:
|
| 73 |
+
self.out_dir = runner.work_dir
|
| 74 |
+
|
| 75 |
+
self.file_client = FileClient.infer_client(self.file_client_args,
|
| 76 |
+
self.out_dir)
|
| 77 |
+
|
| 78 |
+
# if `self.out_dir` is not equal to `runner.work_dir`, it means that
|
| 79 |
+
# `self.out_dir` is set so the final `self.out_dir` is the
|
| 80 |
+
# concatenation of `self.out_dir` and the last level directory of
|
| 81 |
+
# `runner.work_dir`
|
| 82 |
+
if self.out_dir != runner.work_dir:
|
| 83 |
+
basename = osp.basename(runner.work_dir.rstrip(osp.sep))
|
| 84 |
+
self.out_dir = self.file_client.join_path(self.out_dir, basename)
|
| 85 |
+
|
| 86 |
+
runner.logger.info((f'Checkpoints will be saved to {self.out_dir} by '
|
| 87 |
+
f'{self.file_client.name}.'))
|
| 88 |
+
|
| 89 |
+
# disable the create_symlink option because some file backends do not
|
| 90 |
+
# allow to create a symlink
|
| 91 |
+
if 'create_symlink' in self.args:
|
| 92 |
+
if self.args[
|
| 93 |
+
'create_symlink'] and not self.file_client.allow_symlink:
|
| 94 |
+
self.args['create_symlink'] = False
|
| 95 |
+
warnings.warn(
|
| 96 |
+
('create_symlink is set as True by the user but is changed'
|
| 97 |
+
'to be False because creating symbolic link is not '
|
| 98 |
+
f'allowed in {self.file_client.name}'))
|
| 99 |
+
else:
|
| 100 |
+
self.args['create_symlink'] = self.file_client.allow_symlink
|
| 101 |
+
|
| 102 |
+
def after_train_epoch(self, runner):
|
| 103 |
+
if not self.by_epoch:
|
| 104 |
+
return
|
| 105 |
+
|
| 106 |
+
# save checkpoint for following cases:
|
| 107 |
+
# 1. every ``self.interval`` epochs
|
| 108 |
+
# 2. reach the last epoch of training
|
| 109 |
+
if self.every_n_epochs(
|
| 110 |
+
runner, self.interval) or (self.save_last
|
| 111 |
+
and self.is_last_epoch(runner)):
|
| 112 |
+
runner.logger.info(
|
| 113 |
+
f'Saving checkpoint at {runner.epoch + 1} epochs')
|
| 114 |
+
if self.sync_buffer:
|
| 115 |
+
allreduce_params(runner.model.buffers())
|
| 116 |
+
self._save_checkpoint(runner)
|
| 117 |
+
|
| 118 |
+
@master_only
|
| 119 |
+
def _save_checkpoint(self, runner):
|
| 120 |
+
"""Save the current checkpoint and delete unwanted checkpoint."""
|
| 121 |
+
runner.save_checkpoint(
|
| 122 |
+
self.out_dir, save_optimizer=self.save_optimizer, **self.args)
|
| 123 |
+
if runner.meta is not None:
|
| 124 |
+
if self.by_epoch:
|
| 125 |
+
cur_ckpt_filename = self.args.get(
|
| 126 |
+
'filename_tmpl', 'epoch_{}.pth').format(runner.epoch + 1)
|
| 127 |
+
else:
|
| 128 |
+
cur_ckpt_filename = self.args.get(
|
| 129 |
+
'filename_tmpl', 'iter_{}.pth').format(runner.iter + 1)
|
| 130 |
+
runner.meta.setdefault('hook_msgs', dict())
|
| 131 |
+
runner.meta['hook_msgs']['last_ckpt'] = self.file_client.join_path(
|
| 132 |
+
self.out_dir, cur_ckpt_filename)
|
| 133 |
+
# remove other checkpoints
|
| 134 |
+
if self.max_keep_ckpts > 0:
|
| 135 |
+
if self.by_epoch:
|
| 136 |
+
name = 'epoch_{}.pth'
|
| 137 |
+
current_ckpt = runner.epoch + 1
|
| 138 |
+
else:
|
| 139 |
+
name = 'iter_{}.pth'
|
| 140 |
+
current_ckpt = runner.iter + 1
|
| 141 |
+
redundant_ckpts = range(
|
| 142 |
+
current_ckpt - self.max_keep_ckpts * self.interval, 0,
|
| 143 |
+
-self.interval)
|
| 144 |
+
filename_tmpl = self.args.get('filename_tmpl', name)
|
| 145 |
+
for _step in redundant_ckpts:
|
| 146 |
+
ckpt_path = self.file_client.join_path(
|
| 147 |
+
self.out_dir, filename_tmpl.format(_step))
|
| 148 |
+
if self.file_client.isfile(ckpt_path):
|
| 149 |
+
self.file_client.remove(ckpt_path)
|
| 150 |
+
else:
|
| 151 |
+
break
|
| 152 |
+
|
| 153 |
+
def after_train_iter(self, runner):
|
| 154 |
+
if self.by_epoch:
|
| 155 |
+
return
|
| 156 |
+
|
| 157 |
+
# save checkpoint for following cases:
|
| 158 |
+
# 1. every ``self.interval`` iterations
|
| 159 |
+
# 2. reach the last iteration of training
|
| 160 |
+
if self.every_n_iters(
|
| 161 |
+
runner, self.interval) or (self.save_last
|
| 162 |
+
and self.is_last_iter(runner)):
|
| 163 |
+
runner.logger.info(
|
| 164 |
+
f'Saving checkpoint at {runner.iter + 1} iterations')
|
| 165 |
+
if self.sync_buffer:
|
| 166 |
+
allreduce_params(runner.model.buffers())
|
| 167 |
+
self._save_checkpoint(runner)
|
FRESCO/src/ControlNet/annotator/uniformer/mmcv/runner/hooks/closure.py
ADDED
|
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
from .hook import HOOKS, Hook
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
@HOOKS.register_module()
|
| 6 |
+
class ClosureHook(Hook):
|
| 7 |
+
|
| 8 |
+
def __init__(self, fn_name, fn):
|
| 9 |
+
assert hasattr(self, fn_name)
|
| 10 |
+
assert callable(fn)
|
| 11 |
+
setattr(self, fn_name, fn)
|
FRESCO/src/ControlNet/annotator/uniformer/mmcv/runner/hooks/ema.py
ADDED
|
@@ -0,0 +1,89 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
from ...parallel import is_module_wrapper
|
| 3 |
+
from ..hooks.hook import HOOKS, Hook
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
@HOOKS.register_module()
|
| 7 |
+
class EMAHook(Hook):
|
| 8 |
+
r"""Exponential Moving Average Hook.
|
| 9 |
+
|
| 10 |
+
Use Exponential Moving Average on all parameters of model in training
|
| 11 |
+
process. All parameters have a ema backup, which update by the formula
|
| 12 |
+
as below. EMAHook takes priority over EvalHook and CheckpointSaverHook.
|
| 13 |
+
|
| 14 |
+
.. math::
|
| 15 |
+
|
| 16 |
+
\text{Xema\_{t+1}} = (1 - \text{momentum}) \times
|
| 17 |
+
\text{Xema\_{t}} + \text{momentum} \times X_t
|
| 18 |
+
|
| 19 |
+
Args:
|
| 20 |
+
momentum (float): The momentum used for updating ema parameter.
|
| 21 |
+
Defaults to 0.0002.
|
| 22 |
+
interval (int): Update ema parameter every interval iteration.
|
| 23 |
+
Defaults to 1.
|
| 24 |
+
warm_up (int): During first warm_up steps, we may use smaller momentum
|
| 25 |
+
to update ema parameters more slowly. Defaults to 100.
|
| 26 |
+
resume_from (str): The checkpoint path. Defaults to None.
|
| 27 |
+
"""
|
| 28 |
+
|
| 29 |
+
def __init__(self,
|
| 30 |
+
momentum=0.0002,
|
| 31 |
+
interval=1,
|
| 32 |
+
warm_up=100,
|
| 33 |
+
resume_from=None):
|
| 34 |
+
assert isinstance(interval, int) and interval > 0
|
| 35 |
+
self.warm_up = warm_up
|
| 36 |
+
self.interval = interval
|
| 37 |
+
assert momentum > 0 and momentum < 1
|
| 38 |
+
self.momentum = momentum**interval
|
| 39 |
+
self.checkpoint = resume_from
|
| 40 |
+
|
| 41 |
+
def before_run(self, runner):
|
| 42 |
+
"""To resume model with it's ema parameters more friendly.
|
| 43 |
+
|
| 44 |
+
Register ema parameter as ``named_buffer`` to model
|
| 45 |
+
"""
|
| 46 |
+
model = runner.model
|
| 47 |
+
if is_module_wrapper(model):
|
| 48 |
+
model = model.module
|
| 49 |
+
self.param_ema_buffer = {}
|
| 50 |
+
self.model_parameters = dict(model.named_parameters(recurse=True))
|
| 51 |
+
for name, value in self.model_parameters.items():
|
| 52 |
+
# "." is not allowed in module's buffer name
|
| 53 |
+
buffer_name = f"ema_{name.replace('.', '_')}"
|
| 54 |
+
self.param_ema_buffer[name] = buffer_name
|
| 55 |
+
model.register_buffer(buffer_name, value.data.clone())
|
| 56 |
+
self.model_buffers = dict(model.named_buffers(recurse=True))
|
| 57 |
+
if self.checkpoint is not None:
|
| 58 |
+
runner.resume(self.checkpoint)
|
| 59 |
+
|
| 60 |
+
def after_train_iter(self, runner):
|
| 61 |
+
"""Update ema parameter every self.interval iterations."""
|
| 62 |
+
curr_step = runner.iter
|
| 63 |
+
# We warm up the momentum considering the instability at beginning
|
| 64 |
+
momentum = min(self.momentum,
|
| 65 |
+
(1 + curr_step) / (self.warm_up + curr_step))
|
| 66 |
+
if curr_step % self.interval != 0:
|
| 67 |
+
return
|
| 68 |
+
for name, parameter in self.model_parameters.items():
|
| 69 |
+
buffer_name = self.param_ema_buffer[name]
|
| 70 |
+
buffer_parameter = self.model_buffers[buffer_name]
|
| 71 |
+
buffer_parameter.mul_(1 - momentum).add_(momentum, parameter.data)
|
| 72 |
+
|
| 73 |
+
def after_train_epoch(self, runner):
|
| 74 |
+
"""We load parameter values from ema backup to model before the
|
| 75 |
+
EvalHook."""
|
| 76 |
+
self._swap_ema_parameters()
|
| 77 |
+
|
| 78 |
+
def before_train_epoch(self, runner):
|
| 79 |
+
"""We recover model's parameter from ema backup after last epoch's
|
| 80 |
+
EvalHook."""
|
| 81 |
+
self._swap_ema_parameters()
|
| 82 |
+
|
| 83 |
+
def _swap_ema_parameters(self):
|
| 84 |
+
"""Swap the parameter of model with parameter in ema_buffer."""
|
| 85 |
+
for name, value in self.model_parameters.items():
|
| 86 |
+
temp = value.data.clone()
|
| 87 |
+
ema_buffer = self.model_buffers[self.param_ema_buffer[name]]
|
| 88 |
+
value.data.copy_(ema_buffer.data)
|
| 89 |
+
ema_buffer.data.copy_(temp)
|
FRESCO/src/ControlNet/annotator/uniformer/mmcv/runner/hooks/evaluation.py
ADDED
|
@@ -0,0 +1,509 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
import os.path as osp
|
| 3 |
+
import warnings
|
| 4 |
+
from math import inf
|
| 5 |
+
|
| 6 |
+
import torch.distributed as dist
|
| 7 |
+
from torch.nn.modules.batchnorm import _BatchNorm
|
| 8 |
+
from torch.utils.data import DataLoader
|
| 9 |
+
|
| 10 |
+
from annotator.uniformer.mmcv.fileio import FileClient
|
| 11 |
+
from annotator.uniformer.mmcv.utils import is_seq_of
|
| 12 |
+
from .hook import Hook
|
| 13 |
+
from .logger import LoggerHook
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
class EvalHook(Hook):
|
| 17 |
+
"""Non-Distributed evaluation hook.
|
| 18 |
+
|
| 19 |
+
This hook will regularly perform evaluation in a given interval when
|
| 20 |
+
performing in non-distributed environment.
|
| 21 |
+
|
| 22 |
+
Args:
|
| 23 |
+
dataloader (DataLoader): A PyTorch dataloader, whose dataset has
|
| 24 |
+
implemented ``evaluate`` function.
|
| 25 |
+
start (int | None, optional): Evaluation starting epoch. It enables
|
| 26 |
+
evaluation before the training starts if ``start`` <= the resuming
|
| 27 |
+
epoch. If None, whether to evaluate is merely decided by
|
| 28 |
+
``interval``. Default: None.
|
| 29 |
+
interval (int): Evaluation interval. Default: 1.
|
| 30 |
+
by_epoch (bool): Determine perform evaluation by epoch or by iteration.
|
| 31 |
+
If set to True, it will perform by epoch. Otherwise, by iteration.
|
| 32 |
+
Default: True.
|
| 33 |
+
save_best (str, optional): If a metric is specified, it would measure
|
| 34 |
+
the best checkpoint during evaluation. The information about best
|
| 35 |
+
checkpoint would be saved in ``runner.meta['hook_msgs']`` to keep
|
| 36 |
+
best score value and best checkpoint path, which will be also
|
| 37 |
+
loaded when resume checkpoint. Options are the evaluation metrics
|
| 38 |
+
on the test dataset. e.g., ``bbox_mAP``, ``segm_mAP`` for bbox
|
| 39 |
+
detection and instance segmentation. ``AR@100`` for proposal
|
| 40 |
+
recall. If ``save_best`` is ``auto``, the first key of the returned
|
| 41 |
+
``OrderedDict`` result will be used. Default: None.
|
| 42 |
+
rule (str | None, optional): Comparison rule for best score. If set to
|
| 43 |
+
None, it will infer a reasonable rule. Keys such as 'acc', 'top'
|
| 44 |
+
.etc will be inferred by 'greater' rule. Keys contain 'loss' will
|
| 45 |
+
be inferred by 'less' rule. Options are 'greater', 'less', None.
|
| 46 |
+
Default: None.
|
| 47 |
+
test_fn (callable, optional): test a model with samples from a
|
| 48 |
+
dataloader, and return the test results. If ``None``, the default
|
| 49 |
+
test function ``mmcv.engine.single_gpu_test`` will be used.
|
| 50 |
+
(default: ``None``)
|
| 51 |
+
greater_keys (List[str] | None, optional): Metric keys that will be
|
| 52 |
+
inferred by 'greater' comparison rule. If ``None``,
|
| 53 |
+
_default_greater_keys will be used. (default: ``None``)
|
| 54 |
+
less_keys (List[str] | None, optional): Metric keys that will be
|
| 55 |
+
inferred by 'less' comparison rule. If ``None``, _default_less_keys
|
| 56 |
+
will be used. (default: ``None``)
|
| 57 |
+
out_dir (str, optional): The root directory to save checkpoints. If not
|
| 58 |
+
specified, `runner.work_dir` will be used by default. If specified,
|
| 59 |
+
the `out_dir` will be the concatenation of `out_dir` and the last
|
| 60 |
+
level directory of `runner.work_dir`.
|
| 61 |
+
`New in version 1.3.16.`
|
| 62 |
+
file_client_args (dict): Arguments to instantiate a FileClient.
|
| 63 |
+
See :class:`mmcv.fileio.FileClient` for details. Default: None.
|
| 64 |
+
`New in version 1.3.16.`
|
| 65 |
+
**eval_kwargs: Evaluation arguments fed into the evaluate function of
|
| 66 |
+
the dataset.
|
| 67 |
+
|
| 68 |
+
Notes:
|
| 69 |
+
If new arguments are added for EvalHook, tools/test.py,
|
| 70 |
+
tools/eval_metric.py may be affected.
|
| 71 |
+
"""
|
| 72 |
+
|
| 73 |
+
# Since the key for determine greater or less is related to the downstream
|
| 74 |
+
# tasks, downstream repos may need to overwrite the following inner
|
| 75 |
+
# variable accordingly.
|
| 76 |
+
|
| 77 |
+
rule_map = {'greater': lambda x, y: x > y, 'less': lambda x, y: x < y}
|
| 78 |
+
init_value_map = {'greater': -inf, 'less': inf}
|
| 79 |
+
_default_greater_keys = [
|
| 80 |
+
'acc', 'top', 'AR@', 'auc', 'precision', 'mAP', 'mDice', 'mIoU',
|
| 81 |
+
'mAcc', 'aAcc'
|
| 82 |
+
]
|
| 83 |
+
_default_less_keys = ['loss']
|
| 84 |
+
|
| 85 |
+
def __init__(self,
|
| 86 |
+
dataloader,
|
| 87 |
+
start=None,
|
| 88 |
+
interval=1,
|
| 89 |
+
by_epoch=True,
|
| 90 |
+
save_best=None,
|
| 91 |
+
rule=None,
|
| 92 |
+
test_fn=None,
|
| 93 |
+
greater_keys=None,
|
| 94 |
+
less_keys=None,
|
| 95 |
+
out_dir=None,
|
| 96 |
+
file_client_args=None,
|
| 97 |
+
**eval_kwargs):
|
| 98 |
+
if not isinstance(dataloader, DataLoader):
|
| 99 |
+
raise TypeError(f'dataloader must be a pytorch DataLoader, '
|
| 100 |
+
f'but got {type(dataloader)}')
|
| 101 |
+
|
| 102 |
+
if interval <= 0:
|
| 103 |
+
raise ValueError(f'interval must be a positive number, '
|
| 104 |
+
f'but got {interval}')
|
| 105 |
+
|
| 106 |
+
assert isinstance(by_epoch, bool), '``by_epoch`` should be a boolean'
|
| 107 |
+
|
| 108 |
+
if start is not None and start < 0:
|
| 109 |
+
raise ValueError(f'The evaluation start epoch {start} is smaller '
|
| 110 |
+
f'than 0')
|
| 111 |
+
|
| 112 |
+
self.dataloader = dataloader
|
| 113 |
+
self.interval = interval
|
| 114 |
+
self.start = start
|
| 115 |
+
self.by_epoch = by_epoch
|
| 116 |
+
|
| 117 |
+
assert isinstance(save_best, str) or save_best is None, \
|
| 118 |
+
'""save_best"" should be a str or None ' \
|
| 119 |
+
f'rather than {type(save_best)}'
|
| 120 |
+
self.save_best = save_best
|
| 121 |
+
self.eval_kwargs = eval_kwargs
|
| 122 |
+
self.initial_flag = True
|
| 123 |
+
|
| 124 |
+
if test_fn is None:
|
| 125 |
+
from annotator.uniformer.mmcv.engine import single_gpu_test
|
| 126 |
+
self.test_fn = single_gpu_test
|
| 127 |
+
else:
|
| 128 |
+
self.test_fn = test_fn
|
| 129 |
+
|
| 130 |
+
if greater_keys is None:
|
| 131 |
+
self.greater_keys = self._default_greater_keys
|
| 132 |
+
else:
|
| 133 |
+
if not isinstance(greater_keys, (list, tuple)):
|
| 134 |
+
greater_keys = (greater_keys, )
|
| 135 |
+
assert is_seq_of(greater_keys, str)
|
| 136 |
+
self.greater_keys = greater_keys
|
| 137 |
+
|
| 138 |
+
if less_keys is None:
|
| 139 |
+
self.less_keys = self._default_less_keys
|
| 140 |
+
else:
|
| 141 |
+
if not isinstance(less_keys, (list, tuple)):
|
| 142 |
+
less_keys = (less_keys, )
|
| 143 |
+
assert is_seq_of(less_keys, str)
|
| 144 |
+
self.less_keys = less_keys
|
| 145 |
+
|
| 146 |
+
if self.save_best is not None:
|
| 147 |
+
self.best_ckpt_path = None
|
| 148 |
+
self._init_rule(rule, self.save_best)
|
| 149 |
+
|
| 150 |
+
self.out_dir = out_dir
|
| 151 |
+
self.file_client_args = file_client_args
|
| 152 |
+
|
| 153 |
+
def _init_rule(self, rule, key_indicator):
|
| 154 |
+
"""Initialize rule, key_indicator, comparison_func, and best score.
|
| 155 |
+
|
| 156 |
+
Here is the rule to determine which rule is used for key indicator
|
| 157 |
+
when the rule is not specific (note that the key indicator matching
|
| 158 |
+
is case-insensitive):
|
| 159 |
+
1. If the key indicator is in ``self.greater_keys``, the rule will be
|
| 160 |
+
specified as 'greater'.
|
| 161 |
+
2. Or if the key indicator is in ``self.less_keys``, the rule will be
|
| 162 |
+
specified as 'less'.
|
| 163 |
+
3. Or if the key indicator is equal to the substring in any one item
|
| 164 |
+
in ``self.greater_keys``, the rule will be specified as 'greater'.
|
| 165 |
+
4. Or if the key indicator is equal to the substring in any one item
|
| 166 |
+
in ``self.less_keys``, the rule will be specified as 'less'.
|
| 167 |
+
|
| 168 |
+
Args:
|
| 169 |
+
rule (str | None): Comparison rule for best score.
|
| 170 |
+
key_indicator (str | None): Key indicator to determine the
|
| 171 |
+
comparison rule.
|
| 172 |
+
"""
|
| 173 |
+
if rule not in self.rule_map and rule is not None:
|
| 174 |
+
raise KeyError(f'rule must be greater, less or None, '
|
| 175 |
+
f'but got {rule}.')
|
| 176 |
+
|
| 177 |
+
if rule is None:
|
| 178 |
+
if key_indicator != 'auto':
|
| 179 |
+
# `_lc` here means we use the lower case of keys for
|
| 180 |
+
# case-insensitive matching
|
| 181 |
+
key_indicator_lc = key_indicator.lower()
|
| 182 |
+
greater_keys = [key.lower() for key in self.greater_keys]
|
| 183 |
+
less_keys = [key.lower() for key in self.less_keys]
|
| 184 |
+
|
| 185 |
+
if key_indicator_lc in greater_keys:
|
| 186 |
+
rule = 'greater'
|
| 187 |
+
elif key_indicator_lc in less_keys:
|
| 188 |
+
rule = 'less'
|
| 189 |
+
elif any(key in key_indicator_lc for key in greater_keys):
|
| 190 |
+
rule = 'greater'
|
| 191 |
+
elif any(key in key_indicator_lc for key in less_keys):
|
| 192 |
+
rule = 'less'
|
| 193 |
+
else:
|
| 194 |
+
raise ValueError(f'Cannot infer the rule for key '
|
| 195 |
+
f'{key_indicator}, thus a specific rule '
|
| 196 |
+
f'must be specified.')
|
| 197 |
+
self.rule = rule
|
| 198 |
+
self.key_indicator = key_indicator
|
| 199 |
+
if self.rule is not None:
|
| 200 |
+
self.compare_func = self.rule_map[self.rule]
|
| 201 |
+
|
| 202 |
+
def before_run(self, runner):
|
| 203 |
+
if not self.out_dir:
|
| 204 |
+
self.out_dir = runner.work_dir
|
| 205 |
+
|
| 206 |
+
self.file_client = FileClient.infer_client(self.file_client_args,
|
| 207 |
+
self.out_dir)
|
| 208 |
+
|
| 209 |
+
# if `self.out_dir` is not equal to `runner.work_dir`, it means that
|
| 210 |
+
# `self.out_dir` is set so the final `self.out_dir` is the
|
| 211 |
+
# concatenation of `self.out_dir` and the last level directory of
|
| 212 |
+
# `runner.work_dir`
|
| 213 |
+
if self.out_dir != runner.work_dir:
|
| 214 |
+
basename = osp.basename(runner.work_dir.rstrip(osp.sep))
|
| 215 |
+
self.out_dir = self.file_client.join_path(self.out_dir, basename)
|
| 216 |
+
runner.logger.info(
|
| 217 |
+
(f'The best checkpoint will be saved to {self.out_dir} by '
|
| 218 |
+
f'{self.file_client.name}'))
|
| 219 |
+
|
| 220 |
+
if self.save_best is not None:
|
| 221 |
+
if runner.meta is None:
|
| 222 |
+
warnings.warn('runner.meta is None. Creating an empty one.')
|
| 223 |
+
runner.meta = dict()
|
| 224 |
+
runner.meta.setdefault('hook_msgs', dict())
|
| 225 |
+
self.best_ckpt_path = runner.meta['hook_msgs'].get(
|
| 226 |
+
'best_ckpt', None)
|
| 227 |
+
|
| 228 |
+
def before_train_iter(self, runner):
|
| 229 |
+
"""Evaluate the model only at the start of training by iteration."""
|
| 230 |
+
if self.by_epoch or not self.initial_flag:
|
| 231 |
+
return
|
| 232 |
+
if self.start is not None and runner.iter >= self.start:
|
| 233 |
+
self.after_train_iter(runner)
|
| 234 |
+
self.initial_flag = False
|
| 235 |
+
|
| 236 |
+
def before_train_epoch(self, runner):
|
| 237 |
+
"""Evaluate the model only at the start of training by epoch."""
|
| 238 |
+
if not (self.by_epoch and self.initial_flag):
|
| 239 |
+
return
|
| 240 |
+
if self.start is not None and runner.epoch >= self.start:
|
| 241 |
+
self.after_train_epoch(runner)
|
| 242 |
+
self.initial_flag = False
|
| 243 |
+
|
| 244 |
+
def after_train_iter(self, runner):
|
| 245 |
+
"""Called after every training iter to evaluate the results."""
|
| 246 |
+
if not self.by_epoch and self._should_evaluate(runner):
|
| 247 |
+
# Because the priority of EvalHook is higher than LoggerHook, the
|
| 248 |
+
# training log and the evaluating log are mixed. Therefore,
|
| 249 |
+
# we need to dump the training log and clear it before evaluating
|
| 250 |
+
# log is generated. In addition, this problem will only appear in
|
| 251 |
+
# `IterBasedRunner` whose `self.by_epoch` is False, because
|
| 252 |
+
# `EpochBasedRunner` whose `self.by_epoch` is True calls
|
| 253 |
+
# `_do_evaluate` in `after_train_epoch` stage, and at this stage
|
| 254 |
+
# the training log has been printed, so it will not cause any
|
| 255 |
+
# problem. more details at
|
| 256 |
+
# https://github.com/open-mmlab/mmsegmentation/issues/694
|
| 257 |
+
for hook in runner._hooks:
|
| 258 |
+
if isinstance(hook, LoggerHook):
|
| 259 |
+
hook.after_train_iter(runner)
|
| 260 |
+
runner.log_buffer.clear()
|
| 261 |
+
|
| 262 |
+
self._do_evaluate(runner)
|
| 263 |
+
|
| 264 |
+
def after_train_epoch(self, runner):
|
| 265 |
+
"""Called after every training epoch to evaluate the results."""
|
| 266 |
+
if self.by_epoch and self._should_evaluate(runner):
|
| 267 |
+
self._do_evaluate(runner)
|
| 268 |
+
|
| 269 |
+
def _do_evaluate(self, runner):
|
| 270 |
+
"""perform evaluation and save ckpt."""
|
| 271 |
+
results = self.test_fn(runner.model, self.dataloader)
|
| 272 |
+
runner.log_buffer.output['eval_iter_num'] = len(self.dataloader)
|
| 273 |
+
key_score = self.evaluate(runner, results)
|
| 274 |
+
# the key_score may be `None` so it needs to skip the action to save
|
| 275 |
+
# the best checkpoint
|
| 276 |
+
if self.save_best and key_score:
|
| 277 |
+
self._save_ckpt(runner, key_score)
|
| 278 |
+
|
| 279 |
+
def _should_evaluate(self, runner):
|
| 280 |
+
"""Judge whether to perform evaluation.
|
| 281 |
+
|
| 282 |
+
Here is the rule to judge whether to perform evaluation:
|
| 283 |
+
1. It will not perform evaluation during the epoch/iteration interval,
|
| 284 |
+
which is determined by ``self.interval``.
|
| 285 |
+
2. It will not perform evaluation if the start time is larger than
|
| 286 |
+
current time.
|
| 287 |
+
3. It will not perform evaluation when current time is larger than
|
| 288 |
+
the start time but during epoch/iteration interval.
|
| 289 |
+
|
| 290 |
+
Returns:
|
| 291 |
+
bool: The flag indicating whether to perform evaluation.
|
| 292 |
+
"""
|
| 293 |
+
if self.by_epoch:
|
| 294 |
+
current = runner.epoch
|
| 295 |
+
check_time = self.every_n_epochs
|
| 296 |
+
else:
|
| 297 |
+
current = runner.iter
|
| 298 |
+
check_time = self.every_n_iters
|
| 299 |
+
|
| 300 |
+
if self.start is None:
|
| 301 |
+
if not check_time(runner, self.interval):
|
| 302 |
+
# No evaluation during the interval.
|
| 303 |
+
return False
|
| 304 |
+
elif (current + 1) < self.start:
|
| 305 |
+
# No evaluation if start is larger than the current time.
|
| 306 |
+
return False
|
| 307 |
+
else:
|
| 308 |
+
# Evaluation only at epochs/iters 3, 5, 7...
|
| 309 |
+
# if start==3 and interval==2
|
| 310 |
+
if (current + 1 - self.start) % self.interval:
|
| 311 |
+
return False
|
| 312 |
+
return True
|
| 313 |
+
|
| 314 |
+
def _save_ckpt(self, runner, key_score):
|
| 315 |
+
"""Save the best checkpoint.
|
| 316 |
+
|
| 317 |
+
It will compare the score according to the compare function, write
|
| 318 |
+
related information (best score, best checkpoint path) and save the
|
| 319 |
+
best checkpoint into ``work_dir``.
|
| 320 |
+
"""
|
| 321 |
+
if self.by_epoch:
|
| 322 |
+
current = f'epoch_{runner.epoch + 1}'
|
| 323 |
+
cur_type, cur_time = 'epoch', runner.epoch + 1
|
| 324 |
+
else:
|
| 325 |
+
current = f'iter_{runner.iter + 1}'
|
| 326 |
+
cur_type, cur_time = 'iter', runner.iter + 1
|
| 327 |
+
|
| 328 |
+
best_score = runner.meta['hook_msgs'].get(
|
| 329 |
+
'best_score', self.init_value_map[self.rule])
|
| 330 |
+
if self.compare_func(key_score, best_score):
|
| 331 |
+
best_score = key_score
|
| 332 |
+
runner.meta['hook_msgs']['best_score'] = best_score
|
| 333 |
+
|
| 334 |
+
if self.best_ckpt_path and self.file_client.isfile(
|
| 335 |
+
self.best_ckpt_path):
|
| 336 |
+
self.file_client.remove(self.best_ckpt_path)
|
| 337 |
+
runner.logger.info(
|
| 338 |
+
(f'The previous best checkpoint {self.best_ckpt_path} was '
|
| 339 |
+
'removed'))
|
| 340 |
+
|
| 341 |
+
best_ckpt_name = f'best_{self.key_indicator}_{current}.pth'
|
| 342 |
+
self.best_ckpt_path = self.file_client.join_path(
|
| 343 |
+
self.out_dir, best_ckpt_name)
|
| 344 |
+
runner.meta['hook_msgs']['best_ckpt'] = self.best_ckpt_path
|
| 345 |
+
|
| 346 |
+
runner.save_checkpoint(
|
| 347 |
+
self.out_dir, best_ckpt_name, create_symlink=False)
|
| 348 |
+
runner.logger.info(
|
| 349 |
+
f'Now best checkpoint is saved as {best_ckpt_name}.')
|
| 350 |
+
runner.logger.info(
|
| 351 |
+
f'Best {self.key_indicator} is {best_score:0.4f} '
|
| 352 |
+
f'at {cur_time} {cur_type}.')
|
| 353 |
+
|
| 354 |
+
def evaluate(self, runner, results):
|
| 355 |
+
"""Evaluate the results.
|
| 356 |
+
|
| 357 |
+
Args:
|
| 358 |
+
runner (:obj:`mmcv.Runner`): The underlined training runner.
|
| 359 |
+
results (list): Output results.
|
| 360 |
+
"""
|
| 361 |
+
eval_res = self.dataloader.dataset.evaluate(
|
| 362 |
+
results, logger=runner.logger, **self.eval_kwargs)
|
| 363 |
+
|
| 364 |
+
for name, val in eval_res.items():
|
| 365 |
+
runner.log_buffer.output[name] = val
|
| 366 |
+
runner.log_buffer.ready = True
|
| 367 |
+
|
| 368 |
+
if self.save_best is not None:
|
| 369 |
+
# If the performance of model is pool, the `eval_res` may be an
|
| 370 |
+
# empty dict and it will raise exception when `self.save_best` is
|
| 371 |
+
# not None. More details at
|
| 372 |
+
# https://github.com/open-mmlab/mmdetection/issues/6265.
|
| 373 |
+
if not eval_res:
|
| 374 |
+
warnings.warn(
|
| 375 |
+
'Since `eval_res` is an empty dict, the behavior to save '
|
| 376 |
+
'the best checkpoint will be skipped in this evaluation.')
|
| 377 |
+
return None
|
| 378 |
+
|
| 379 |
+
if self.key_indicator == 'auto':
|
| 380 |
+
# infer from eval_results
|
| 381 |
+
self._init_rule(self.rule, list(eval_res.keys())[0])
|
| 382 |
+
return eval_res[self.key_indicator]
|
| 383 |
+
|
| 384 |
+
return None
|
| 385 |
+
|
| 386 |
+
|
| 387 |
+
class DistEvalHook(EvalHook):
|
| 388 |
+
"""Distributed evaluation hook.
|
| 389 |
+
|
| 390 |
+
This hook will regularly perform evaluation in a given interval when
|
| 391 |
+
performing in distributed environment.
|
| 392 |
+
|
| 393 |
+
Args:
|
| 394 |
+
dataloader (DataLoader): A PyTorch dataloader, whose dataset has
|
| 395 |
+
implemented ``evaluate`` function.
|
| 396 |
+
start (int | None, optional): Evaluation starting epoch. It enables
|
| 397 |
+
evaluation before the training starts if ``start`` <= the resuming
|
| 398 |
+
epoch. If None, whether to evaluate is merely decided by
|
| 399 |
+
``interval``. Default: None.
|
| 400 |
+
interval (int): Evaluation interval. Default: 1.
|
| 401 |
+
by_epoch (bool): Determine perform evaluation by epoch or by iteration.
|
| 402 |
+
If set to True, it will perform by epoch. Otherwise, by iteration.
|
| 403 |
+
default: True.
|
| 404 |
+
save_best (str, optional): If a metric is specified, it would measure
|
| 405 |
+
the best checkpoint during evaluation. The information about best
|
| 406 |
+
checkpoint would be saved in ``runner.meta['hook_msgs']`` to keep
|
| 407 |
+
best score value and best checkpoint path, which will be also
|
| 408 |
+
loaded when resume checkpoint. Options are the evaluation metrics
|
| 409 |
+
on the test dataset. e.g., ``bbox_mAP``, ``segm_mAP`` for bbox
|
| 410 |
+
detection and instance segmentation. ``AR@100`` for proposal
|
| 411 |
+
recall. If ``save_best`` is ``auto``, the first key of the returned
|
| 412 |
+
``OrderedDict`` result will be used. Default: None.
|
| 413 |
+
rule (str | None, optional): Comparison rule for best score. If set to
|
| 414 |
+
None, it will infer a reasonable rule. Keys such as 'acc', 'top'
|
| 415 |
+
.etc will be inferred by 'greater' rule. Keys contain 'loss' will
|
| 416 |
+
be inferred by 'less' rule. Options are 'greater', 'less', None.
|
| 417 |
+
Default: None.
|
| 418 |
+
test_fn (callable, optional): test a model with samples from a
|
| 419 |
+
dataloader in a multi-gpu manner, and return the test results. If
|
| 420 |
+
``None``, the default test function ``mmcv.engine.multi_gpu_test``
|
| 421 |
+
will be used. (default: ``None``)
|
| 422 |
+
tmpdir (str | None): Temporary directory to save the results of all
|
| 423 |
+
processes. Default: None.
|
| 424 |
+
gpu_collect (bool): Whether to use gpu or cpu to collect results.
|
| 425 |
+
Default: False.
|
| 426 |
+
broadcast_bn_buffer (bool): Whether to broadcast the
|
| 427 |
+
buffer(running_mean and running_var) of rank 0 to other rank
|
| 428 |
+
before evaluation. Default: True.
|
| 429 |
+
out_dir (str, optional): The root directory to save checkpoints. If not
|
| 430 |
+
specified, `runner.work_dir` will be used by default. If specified,
|
| 431 |
+
the `out_dir` will be the concatenation of `out_dir` and the last
|
| 432 |
+
level directory of `runner.work_dir`.
|
| 433 |
+
file_client_args (dict): Arguments to instantiate a FileClient.
|
| 434 |
+
See :class:`mmcv.fileio.FileClient` for details. Default: None.
|
| 435 |
+
**eval_kwargs: Evaluation arguments fed into the evaluate function of
|
| 436 |
+
the dataset.
|
| 437 |
+
"""
|
| 438 |
+
|
| 439 |
+
def __init__(self,
|
| 440 |
+
dataloader,
|
| 441 |
+
start=None,
|
| 442 |
+
interval=1,
|
| 443 |
+
by_epoch=True,
|
| 444 |
+
save_best=None,
|
| 445 |
+
rule=None,
|
| 446 |
+
test_fn=None,
|
| 447 |
+
greater_keys=None,
|
| 448 |
+
less_keys=None,
|
| 449 |
+
broadcast_bn_buffer=True,
|
| 450 |
+
tmpdir=None,
|
| 451 |
+
gpu_collect=False,
|
| 452 |
+
out_dir=None,
|
| 453 |
+
file_client_args=None,
|
| 454 |
+
**eval_kwargs):
|
| 455 |
+
|
| 456 |
+
if test_fn is None:
|
| 457 |
+
from annotator.uniformer.mmcv.engine import multi_gpu_test
|
| 458 |
+
test_fn = multi_gpu_test
|
| 459 |
+
|
| 460 |
+
super().__init__(
|
| 461 |
+
dataloader,
|
| 462 |
+
start=start,
|
| 463 |
+
interval=interval,
|
| 464 |
+
by_epoch=by_epoch,
|
| 465 |
+
save_best=save_best,
|
| 466 |
+
rule=rule,
|
| 467 |
+
test_fn=test_fn,
|
| 468 |
+
greater_keys=greater_keys,
|
| 469 |
+
less_keys=less_keys,
|
| 470 |
+
out_dir=out_dir,
|
| 471 |
+
file_client_args=file_client_args,
|
| 472 |
+
**eval_kwargs)
|
| 473 |
+
|
| 474 |
+
self.broadcast_bn_buffer = broadcast_bn_buffer
|
| 475 |
+
self.tmpdir = tmpdir
|
| 476 |
+
self.gpu_collect = gpu_collect
|
| 477 |
+
|
| 478 |
+
def _do_evaluate(self, runner):
|
| 479 |
+
"""perform evaluation and save ckpt."""
|
| 480 |
+
# Synchronization of BatchNorm's buffer (running_mean
|
| 481 |
+
# and running_var) is not supported in the DDP of pytorch,
|
| 482 |
+
# which may cause the inconsistent performance of models in
|
| 483 |
+
# different ranks, so we broadcast BatchNorm's buffers
|
| 484 |
+
# of rank 0 to other ranks to avoid this.
|
| 485 |
+
if self.broadcast_bn_buffer:
|
| 486 |
+
model = runner.model
|
| 487 |
+
for name, module in model.named_modules():
|
| 488 |
+
if isinstance(module,
|
| 489 |
+
_BatchNorm) and module.track_running_stats:
|
| 490 |
+
dist.broadcast(module.running_var, 0)
|
| 491 |
+
dist.broadcast(module.running_mean, 0)
|
| 492 |
+
|
| 493 |
+
tmpdir = self.tmpdir
|
| 494 |
+
if tmpdir is None:
|
| 495 |
+
tmpdir = osp.join(runner.work_dir, '.eval_hook')
|
| 496 |
+
|
| 497 |
+
results = self.test_fn(
|
| 498 |
+
runner.model,
|
| 499 |
+
self.dataloader,
|
| 500 |
+
tmpdir=tmpdir,
|
| 501 |
+
gpu_collect=self.gpu_collect)
|
| 502 |
+
if runner.rank == 0:
|
| 503 |
+
print('\n')
|
| 504 |
+
runner.log_buffer.output['eval_iter_num'] = len(self.dataloader)
|
| 505 |
+
key_score = self.evaluate(runner, results)
|
| 506 |
+
# the key_score may be `None` so it needs to skip the action to
|
| 507 |
+
# save the best checkpoint
|
| 508 |
+
if self.save_best and key_score:
|
| 509 |
+
self._save_ckpt(runner, key_score)
|
FRESCO/src/ControlNet/annotator/uniformer/mmcv/runner/hooks/hook.py
ADDED
|
@@ -0,0 +1,92 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
from annotator.uniformer.mmcv.utils import Registry, is_method_overridden
|
| 3 |
+
|
| 4 |
+
HOOKS = Registry('hook')
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
class Hook:
|
| 8 |
+
stages = ('before_run', 'before_train_epoch', 'before_train_iter',
|
| 9 |
+
'after_train_iter', 'after_train_epoch', 'before_val_epoch',
|
| 10 |
+
'before_val_iter', 'after_val_iter', 'after_val_epoch',
|
| 11 |
+
'after_run')
|
| 12 |
+
|
| 13 |
+
def before_run(self, runner):
|
| 14 |
+
pass
|
| 15 |
+
|
| 16 |
+
def after_run(self, runner):
|
| 17 |
+
pass
|
| 18 |
+
|
| 19 |
+
def before_epoch(self, runner):
|
| 20 |
+
pass
|
| 21 |
+
|
| 22 |
+
def after_epoch(self, runner):
|
| 23 |
+
pass
|
| 24 |
+
|
| 25 |
+
def before_iter(self, runner):
|
| 26 |
+
pass
|
| 27 |
+
|
| 28 |
+
def after_iter(self, runner):
|
| 29 |
+
pass
|
| 30 |
+
|
| 31 |
+
def before_train_epoch(self, runner):
|
| 32 |
+
self.before_epoch(runner)
|
| 33 |
+
|
| 34 |
+
def before_val_epoch(self, runner):
|
| 35 |
+
self.before_epoch(runner)
|
| 36 |
+
|
| 37 |
+
def after_train_epoch(self, runner):
|
| 38 |
+
self.after_epoch(runner)
|
| 39 |
+
|
| 40 |
+
def after_val_epoch(self, runner):
|
| 41 |
+
self.after_epoch(runner)
|
| 42 |
+
|
| 43 |
+
def before_train_iter(self, runner):
|
| 44 |
+
self.before_iter(runner)
|
| 45 |
+
|
| 46 |
+
def before_val_iter(self, runner):
|
| 47 |
+
self.before_iter(runner)
|
| 48 |
+
|
| 49 |
+
def after_train_iter(self, runner):
|
| 50 |
+
self.after_iter(runner)
|
| 51 |
+
|
| 52 |
+
def after_val_iter(self, runner):
|
| 53 |
+
self.after_iter(runner)
|
| 54 |
+
|
| 55 |
+
def every_n_epochs(self, runner, n):
|
| 56 |
+
return (runner.epoch + 1) % n == 0 if n > 0 else False
|
| 57 |
+
|
| 58 |
+
def every_n_inner_iters(self, runner, n):
|
| 59 |
+
return (runner.inner_iter + 1) % n == 0 if n > 0 else False
|
| 60 |
+
|
| 61 |
+
def every_n_iters(self, runner, n):
|
| 62 |
+
return (runner.iter + 1) % n == 0 if n > 0 else False
|
| 63 |
+
|
| 64 |
+
def end_of_epoch(self, runner):
|
| 65 |
+
return runner.inner_iter + 1 == len(runner.data_loader)
|
| 66 |
+
|
| 67 |
+
def is_last_epoch(self, runner):
|
| 68 |
+
return runner.epoch + 1 == runner._max_epochs
|
| 69 |
+
|
| 70 |
+
def is_last_iter(self, runner):
|
| 71 |
+
return runner.iter + 1 == runner._max_iters
|
| 72 |
+
|
| 73 |
+
def get_triggered_stages(self):
|
| 74 |
+
trigger_stages = set()
|
| 75 |
+
for stage in Hook.stages:
|
| 76 |
+
if is_method_overridden(stage, Hook, self):
|
| 77 |
+
trigger_stages.add(stage)
|
| 78 |
+
|
| 79 |
+
# some methods will be triggered in multi stages
|
| 80 |
+
# use this dict to map method to stages.
|
| 81 |
+
method_stages_map = {
|
| 82 |
+
'before_epoch': ['before_train_epoch', 'before_val_epoch'],
|
| 83 |
+
'after_epoch': ['after_train_epoch', 'after_val_epoch'],
|
| 84 |
+
'before_iter': ['before_train_iter', 'before_val_iter'],
|
| 85 |
+
'after_iter': ['after_train_iter', 'after_val_iter'],
|
| 86 |
+
}
|
| 87 |
+
|
| 88 |
+
for method, map_stages in method_stages_map.items():
|
| 89 |
+
if is_method_overridden(method, Hook, self):
|
| 90 |
+
trigger_stages.update(map_stages)
|
| 91 |
+
|
| 92 |
+
return [stage for stage in Hook.stages if stage in trigger_stages]
|
FRESCO/src/ControlNet/annotator/uniformer/mmcv/runner/hooks/iter_timer.py
ADDED
|
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
import time
|
| 3 |
+
|
| 4 |
+
from .hook import HOOKS, Hook
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
@HOOKS.register_module()
|
| 8 |
+
class IterTimerHook(Hook):
|
| 9 |
+
|
| 10 |
+
def before_epoch(self, runner):
|
| 11 |
+
self.t = time.time()
|
| 12 |
+
|
| 13 |
+
def before_iter(self, runner):
|
| 14 |
+
runner.log_buffer.update({'data_time': time.time() - self.t})
|
| 15 |
+
|
| 16 |
+
def after_iter(self, runner):
|
| 17 |
+
runner.log_buffer.update({'time': time.time() - self.t})
|
| 18 |
+
self.t = time.time()
|
FRESCO/src/ControlNet/annotator/uniformer/mmcv/runner/hooks/logger/__init__.py
ADDED
|
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
from .base import LoggerHook
|
| 3 |
+
from .dvclive import DvcliveLoggerHook
|
| 4 |
+
from .mlflow import MlflowLoggerHook
|
| 5 |
+
from .neptune import NeptuneLoggerHook
|
| 6 |
+
from .pavi import PaviLoggerHook
|
| 7 |
+
from .tensorboard import TensorboardLoggerHook
|
| 8 |
+
from .text import TextLoggerHook
|
| 9 |
+
from .wandb import WandbLoggerHook
|
| 10 |
+
|
| 11 |
+
__all__ = [
|
| 12 |
+
'LoggerHook', 'MlflowLoggerHook', 'PaviLoggerHook',
|
| 13 |
+
'TensorboardLoggerHook', 'TextLoggerHook', 'WandbLoggerHook',
|
| 14 |
+
'NeptuneLoggerHook', 'DvcliveLoggerHook'
|
| 15 |
+
]
|
FRESCO/src/ControlNet/annotator/uniformer/mmcv/runner/hooks/logger/base.py
ADDED
|
@@ -0,0 +1,166 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
import numbers
|
| 3 |
+
from abc import ABCMeta, abstractmethod
|
| 4 |
+
|
| 5 |
+
import numpy as np
|
| 6 |
+
import torch
|
| 7 |
+
|
| 8 |
+
from ..hook import Hook
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
class LoggerHook(Hook):
|
| 12 |
+
"""Base class for logger hooks.
|
| 13 |
+
|
| 14 |
+
Args:
|
| 15 |
+
interval (int): Logging interval (every k iterations).
|
| 16 |
+
ignore_last (bool): Ignore the log of last iterations in each epoch
|
| 17 |
+
if less than `interval`.
|
| 18 |
+
reset_flag (bool): Whether to clear the output buffer after logging.
|
| 19 |
+
by_epoch (bool): Whether EpochBasedRunner is used.
|
| 20 |
+
"""
|
| 21 |
+
|
| 22 |
+
__metaclass__ = ABCMeta
|
| 23 |
+
|
| 24 |
+
def __init__(self,
|
| 25 |
+
interval=10,
|
| 26 |
+
ignore_last=True,
|
| 27 |
+
reset_flag=False,
|
| 28 |
+
by_epoch=True):
|
| 29 |
+
self.interval = interval
|
| 30 |
+
self.ignore_last = ignore_last
|
| 31 |
+
self.reset_flag = reset_flag
|
| 32 |
+
self.by_epoch = by_epoch
|
| 33 |
+
|
| 34 |
+
@abstractmethod
|
| 35 |
+
def log(self, runner):
|
| 36 |
+
pass
|
| 37 |
+
|
| 38 |
+
@staticmethod
|
| 39 |
+
def is_scalar(val, include_np=True, include_torch=True):
|
| 40 |
+
"""Tell the input variable is a scalar or not.
|
| 41 |
+
|
| 42 |
+
Args:
|
| 43 |
+
val: Input variable.
|
| 44 |
+
include_np (bool): Whether include 0-d np.ndarray as a scalar.
|
| 45 |
+
include_torch (bool): Whether include 0-d torch.Tensor as a scalar.
|
| 46 |
+
|
| 47 |
+
Returns:
|
| 48 |
+
bool: True or False.
|
| 49 |
+
"""
|
| 50 |
+
if isinstance(val, numbers.Number):
|
| 51 |
+
return True
|
| 52 |
+
elif include_np and isinstance(val, np.ndarray) and val.ndim == 0:
|
| 53 |
+
return True
|
| 54 |
+
elif include_torch and isinstance(val, torch.Tensor) and len(val) == 1:
|
| 55 |
+
return True
|
| 56 |
+
else:
|
| 57 |
+
return False
|
| 58 |
+
|
| 59 |
+
def get_mode(self, runner):
|
| 60 |
+
if runner.mode == 'train':
|
| 61 |
+
if 'time' in runner.log_buffer.output:
|
| 62 |
+
mode = 'train'
|
| 63 |
+
else:
|
| 64 |
+
mode = 'val'
|
| 65 |
+
elif runner.mode == 'val':
|
| 66 |
+
mode = 'val'
|
| 67 |
+
else:
|
| 68 |
+
raise ValueError(f"runner mode should be 'train' or 'val', "
|
| 69 |
+
f'but got {runner.mode}')
|
| 70 |
+
return mode
|
| 71 |
+
|
| 72 |
+
def get_epoch(self, runner):
|
| 73 |
+
if runner.mode == 'train':
|
| 74 |
+
epoch = runner.epoch + 1
|
| 75 |
+
elif runner.mode == 'val':
|
| 76 |
+
# normal val mode
|
| 77 |
+
# runner.epoch += 1 has been done before val workflow
|
| 78 |
+
epoch = runner.epoch
|
| 79 |
+
else:
|
| 80 |
+
raise ValueError(f"runner mode should be 'train' or 'val', "
|
| 81 |
+
f'but got {runner.mode}')
|
| 82 |
+
return epoch
|
| 83 |
+
|
| 84 |
+
def get_iter(self, runner, inner_iter=False):
|
| 85 |
+
"""Get the current training iteration step."""
|
| 86 |
+
if self.by_epoch and inner_iter:
|
| 87 |
+
current_iter = runner.inner_iter + 1
|
| 88 |
+
else:
|
| 89 |
+
current_iter = runner.iter + 1
|
| 90 |
+
return current_iter
|
| 91 |
+
|
| 92 |
+
def get_lr_tags(self, runner):
|
| 93 |
+
tags = {}
|
| 94 |
+
lrs = runner.current_lr()
|
| 95 |
+
if isinstance(lrs, dict):
|
| 96 |
+
for name, value in lrs.items():
|
| 97 |
+
tags[f'learning_rate/{name}'] = value[0]
|
| 98 |
+
else:
|
| 99 |
+
tags['learning_rate'] = lrs[0]
|
| 100 |
+
return tags
|
| 101 |
+
|
| 102 |
+
def get_momentum_tags(self, runner):
|
| 103 |
+
tags = {}
|
| 104 |
+
momentums = runner.current_momentum()
|
| 105 |
+
if isinstance(momentums, dict):
|
| 106 |
+
for name, value in momentums.items():
|
| 107 |
+
tags[f'momentum/{name}'] = value[0]
|
| 108 |
+
else:
|
| 109 |
+
tags['momentum'] = momentums[0]
|
| 110 |
+
return tags
|
| 111 |
+
|
| 112 |
+
def get_loggable_tags(self,
|
| 113 |
+
runner,
|
| 114 |
+
allow_scalar=True,
|
| 115 |
+
allow_text=False,
|
| 116 |
+
add_mode=True,
|
| 117 |
+
tags_to_skip=('time', 'data_time')):
|
| 118 |
+
tags = {}
|
| 119 |
+
for var, val in runner.log_buffer.output.items():
|
| 120 |
+
if var in tags_to_skip:
|
| 121 |
+
continue
|
| 122 |
+
if self.is_scalar(val) and not allow_scalar:
|
| 123 |
+
continue
|
| 124 |
+
if isinstance(val, str) and not allow_text:
|
| 125 |
+
continue
|
| 126 |
+
if add_mode:
|
| 127 |
+
var = f'{self.get_mode(runner)}/{var}'
|
| 128 |
+
tags[var] = val
|
| 129 |
+
tags.update(self.get_lr_tags(runner))
|
| 130 |
+
tags.update(self.get_momentum_tags(runner))
|
| 131 |
+
return tags
|
| 132 |
+
|
| 133 |
+
def before_run(self, runner):
|
| 134 |
+
for hook in runner.hooks[::-1]:
|
| 135 |
+
if isinstance(hook, LoggerHook):
|
| 136 |
+
hook.reset_flag = True
|
| 137 |
+
break
|
| 138 |
+
|
| 139 |
+
def before_epoch(self, runner):
|
| 140 |
+
runner.log_buffer.clear() # clear logs of last epoch
|
| 141 |
+
|
| 142 |
+
def after_train_iter(self, runner):
|
| 143 |
+
if self.by_epoch and self.every_n_inner_iters(runner, self.interval):
|
| 144 |
+
runner.log_buffer.average(self.interval)
|
| 145 |
+
elif not self.by_epoch and self.every_n_iters(runner, self.interval):
|
| 146 |
+
runner.log_buffer.average(self.interval)
|
| 147 |
+
elif self.end_of_epoch(runner) and not self.ignore_last:
|
| 148 |
+
# not precise but more stable
|
| 149 |
+
runner.log_buffer.average(self.interval)
|
| 150 |
+
|
| 151 |
+
if runner.log_buffer.ready:
|
| 152 |
+
self.log(runner)
|
| 153 |
+
if self.reset_flag:
|
| 154 |
+
runner.log_buffer.clear_output()
|
| 155 |
+
|
| 156 |
+
def after_train_epoch(self, runner):
|
| 157 |
+
if runner.log_buffer.ready:
|
| 158 |
+
self.log(runner)
|
| 159 |
+
if self.reset_flag:
|
| 160 |
+
runner.log_buffer.clear_output()
|
| 161 |
+
|
| 162 |
+
def after_val_epoch(self, runner):
|
| 163 |
+
runner.log_buffer.average()
|
| 164 |
+
self.log(runner)
|
| 165 |
+
if self.reset_flag:
|
| 166 |
+
runner.log_buffer.clear_output()
|
FRESCO/src/ControlNet/annotator/uniformer/mmcv/runner/hooks/logger/dvclive.py
ADDED
|
@@ -0,0 +1,58 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
from ...dist_utils import master_only
|
| 3 |
+
from ..hook import HOOKS
|
| 4 |
+
from .base import LoggerHook
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
@HOOKS.register_module()
|
| 8 |
+
class DvcliveLoggerHook(LoggerHook):
|
| 9 |
+
"""Class to log metrics with dvclive.
|
| 10 |
+
|
| 11 |
+
It requires `dvclive`_ to be installed.
|
| 12 |
+
|
| 13 |
+
Args:
|
| 14 |
+
path (str): Directory where dvclive will write TSV log files.
|
| 15 |
+
interval (int): Logging interval (every k iterations).
|
| 16 |
+
Default 10.
|
| 17 |
+
ignore_last (bool): Ignore the log of last iterations in each epoch
|
| 18 |
+
if less than `interval`.
|
| 19 |
+
Default: True.
|
| 20 |
+
reset_flag (bool): Whether to clear the output buffer after logging.
|
| 21 |
+
Default: True.
|
| 22 |
+
by_epoch (bool): Whether EpochBasedRunner is used.
|
| 23 |
+
Default: True.
|
| 24 |
+
|
| 25 |
+
.. _dvclive:
|
| 26 |
+
https://dvc.org/doc/dvclive
|
| 27 |
+
"""
|
| 28 |
+
|
| 29 |
+
def __init__(self,
|
| 30 |
+
path,
|
| 31 |
+
interval=10,
|
| 32 |
+
ignore_last=True,
|
| 33 |
+
reset_flag=True,
|
| 34 |
+
by_epoch=True):
|
| 35 |
+
|
| 36 |
+
super(DvcliveLoggerHook, self).__init__(interval, ignore_last,
|
| 37 |
+
reset_flag, by_epoch)
|
| 38 |
+
self.path = path
|
| 39 |
+
self.import_dvclive()
|
| 40 |
+
|
| 41 |
+
def import_dvclive(self):
|
| 42 |
+
try:
|
| 43 |
+
import dvclive
|
| 44 |
+
except ImportError:
|
| 45 |
+
raise ImportError(
|
| 46 |
+
'Please run "pip install dvclive" to install dvclive')
|
| 47 |
+
self.dvclive = dvclive
|
| 48 |
+
|
| 49 |
+
@master_only
|
| 50 |
+
def before_run(self, runner):
|
| 51 |
+
self.dvclive.init(self.path)
|
| 52 |
+
|
| 53 |
+
@master_only
|
| 54 |
+
def log(self, runner):
|
| 55 |
+
tags = self.get_loggable_tags(runner)
|
| 56 |
+
if tags:
|
| 57 |
+
for k, v in tags.items():
|
| 58 |
+
self.dvclive.log(k, v, step=self.get_iter(runner))
|
FRESCO/src/ControlNet/annotator/uniformer/mmcv/runner/hooks/logger/mlflow.py
ADDED
|
@@ -0,0 +1,78 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
from ...dist_utils import master_only
|
| 3 |
+
from ..hook import HOOKS
|
| 4 |
+
from .base import LoggerHook
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
@HOOKS.register_module()
|
| 8 |
+
class MlflowLoggerHook(LoggerHook):
|
| 9 |
+
|
| 10 |
+
def __init__(self,
|
| 11 |
+
exp_name=None,
|
| 12 |
+
tags=None,
|
| 13 |
+
log_model=True,
|
| 14 |
+
interval=10,
|
| 15 |
+
ignore_last=True,
|
| 16 |
+
reset_flag=False,
|
| 17 |
+
by_epoch=True):
|
| 18 |
+
"""Class to log metrics and (optionally) a trained model to MLflow.
|
| 19 |
+
|
| 20 |
+
It requires `MLflow`_ to be installed.
|
| 21 |
+
|
| 22 |
+
Args:
|
| 23 |
+
exp_name (str, optional): Name of the experiment to be used.
|
| 24 |
+
Default None.
|
| 25 |
+
If not None, set the active experiment.
|
| 26 |
+
If experiment does not exist, an experiment with provided name
|
| 27 |
+
will be created.
|
| 28 |
+
tags (dict of str: str, optional): Tags for the current run.
|
| 29 |
+
Default None.
|
| 30 |
+
If not None, set tags for the current run.
|
| 31 |
+
log_model (bool, optional): Whether to log an MLflow artifact.
|
| 32 |
+
Default True.
|
| 33 |
+
If True, log runner.model as an MLflow artifact
|
| 34 |
+
for the current run.
|
| 35 |
+
interval (int): Logging interval (every k iterations).
|
| 36 |
+
ignore_last (bool): Ignore the log of last iterations in each epoch
|
| 37 |
+
if less than `interval`.
|
| 38 |
+
reset_flag (bool): Whether to clear the output buffer after logging
|
| 39 |
+
by_epoch (bool): Whether EpochBasedRunner is used.
|
| 40 |
+
|
| 41 |
+
.. _MLflow:
|
| 42 |
+
https://www.mlflow.org/docs/latest/index.html
|
| 43 |
+
"""
|
| 44 |
+
super(MlflowLoggerHook, self).__init__(interval, ignore_last,
|
| 45 |
+
reset_flag, by_epoch)
|
| 46 |
+
self.import_mlflow()
|
| 47 |
+
self.exp_name = exp_name
|
| 48 |
+
self.tags = tags
|
| 49 |
+
self.log_model = log_model
|
| 50 |
+
|
| 51 |
+
def import_mlflow(self):
|
| 52 |
+
try:
|
| 53 |
+
import mlflow
|
| 54 |
+
import mlflow.pytorch as mlflow_pytorch
|
| 55 |
+
except ImportError:
|
| 56 |
+
raise ImportError(
|
| 57 |
+
'Please run "pip install mlflow" to install mlflow')
|
| 58 |
+
self.mlflow = mlflow
|
| 59 |
+
self.mlflow_pytorch = mlflow_pytorch
|
| 60 |
+
|
| 61 |
+
@master_only
|
| 62 |
+
def before_run(self, runner):
|
| 63 |
+
super(MlflowLoggerHook, self).before_run(runner)
|
| 64 |
+
if self.exp_name is not None:
|
| 65 |
+
self.mlflow.set_experiment(self.exp_name)
|
| 66 |
+
if self.tags is not None:
|
| 67 |
+
self.mlflow.set_tags(self.tags)
|
| 68 |
+
|
| 69 |
+
@master_only
|
| 70 |
+
def log(self, runner):
|
| 71 |
+
tags = self.get_loggable_tags(runner)
|
| 72 |
+
if tags:
|
| 73 |
+
self.mlflow.log_metrics(tags, step=self.get_iter(runner))
|
| 74 |
+
|
| 75 |
+
@master_only
|
| 76 |
+
def after_run(self, runner):
|
| 77 |
+
if self.log_model:
|
| 78 |
+
self.mlflow_pytorch.log_model(runner.model, 'models')
|
FRESCO/src/ControlNet/annotator/uniformer/mmcv/runner/hooks/logger/neptune.py
ADDED
|
@@ -0,0 +1,82 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
from ...dist_utils import master_only
|
| 3 |
+
from ..hook import HOOKS
|
| 4 |
+
from .base import LoggerHook
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
@HOOKS.register_module()
|
| 8 |
+
class NeptuneLoggerHook(LoggerHook):
|
| 9 |
+
"""Class to log metrics to NeptuneAI.
|
| 10 |
+
|
| 11 |
+
It requires `neptune-client` to be installed.
|
| 12 |
+
|
| 13 |
+
Args:
|
| 14 |
+
init_kwargs (dict): a dict contains the initialization keys as below:
|
| 15 |
+
- project (str): Name of a project in a form of
|
| 16 |
+
namespace/project_name. If None, the value of
|
| 17 |
+
NEPTUNE_PROJECT environment variable will be taken.
|
| 18 |
+
- api_token (str): User’s API token.
|
| 19 |
+
If None, the value of NEPTUNE_API_TOKEN environment
|
| 20 |
+
variable will be taken. Note: It is strongly recommended
|
| 21 |
+
to use NEPTUNE_API_TOKEN environment variable rather than
|
| 22 |
+
placing your API token in plain text in your source code.
|
| 23 |
+
- name (str, optional, default is 'Untitled'): Editable name of
|
| 24 |
+
the run. Name is displayed in the run's Details and in
|
| 25 |
+
Runs table as a column.
|
| 26 |
+
Check https://docs.neptune.ai/api-reference/neptune#init for
|
| 27 |
+
more init arguments.
|
| 28 |
+
interval (int): Logging interval (every k iterations).
|
| 29 |
+
ignore_last (bool): Ignore the log of last iterations in each epoch
|
| 30 |
+
if less than `interval`.
|
| 31 |
+
reset_flag (bool): Whether to clear the output buffer after logging
|
| 32 |
+
by_epoch (bool): Whether EpochBasedRunner is used.
|
| 33 |
+
|
| 34 |
+
.. _NeptuneAI:
|
| 35 |
+
https://docs.neptune.ai/you-should-know/logging-metadata
|
| 36 |
+
"""
|
| 37 |
+
|
| 38 |
+
def __init__(self,
|
| 39 |
+
init_kwargs=None,
|
| 40 |
+
interval=10,
|
| 41 |
+
ignore_last=True,
|
| 42 |
+
reset_flag=True,
|
| 43 |
+
with_step=True,
|
| 44 |
+
by_epoch=True):
|
| 45 |
+
|
| 46 |
+
super(NeptuneLoggerHook, self).__init__(interval, ignore_last,
|
| 47 |
+
reset_flag, by_epoch)
|
| 48 |
+
self.import_neptune()
|
| 49 |
+
self.init_kwargs = init_kwargs
|
| 50 |
+
self.with_step = with_step
|
| 51 |
+
|
| 52 |
+
def import_neptune(self):
|
| 53 |
+
try:
|
| 54 |
+
import neptune.new as neptune
|
| 55 |
+
except ImportError:
|
| 56 |
+
raise ImportError(
|
| 57 |
+
'Please run "pip install neptune-client" to install neptune')
|
| 58 |
+
self.neptune = neptune
|
| 59 |
+
self.run = None
|
| 60 |
+
|
| 61 |
+
@master_only
|
| 62 |
+
def before_run(self, runner):
|
| 63 |
+
if self.init_kwargs:
|
| 64 |
+
self.run = self.neptune.init(**self.init_kwargs)
|
| 65 |
+
else:
|
| 66 |
+
self.run = self.neptune.init()
|
| 67 |
+
|
| 68 |
+
@master_only
|
| 69 |
+
def log(self, runner):
|
| 70 |
+
tags = self.get_loggable_tags(runner)
|
| 71 |
+
if tags:
|
| 72 |
+
for tag_name, tag_value in tags.items():
|
| 73 |
+
if self.with_step:
|
| 74 |
+
self.run[tag_name].log(
|
| 75 |
+
tag_value, step=self.get_iter(runner))
|
| 76 |
+
else:
|
| 77 |
+
tags['global_step'] = self.get_iter(runner)
|
| 78 |
+
self.run[tag_name].log(tags)
|
| 79 |
+
|
| 80 |
+
@master_only
|
| 81 |
+
def after_run(self, runner):
|
| 82 |
+
self.run.stop()
|
FRESCO/src/ControlNet/annotator/uniformer/mmcv/runner/hooks/logger/pavi.py
ADDED
|
@@ -0,0 +1,117 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
import json
|
| 3 |
+
import os
|
| 4 |
+
import os.path as osp
|
| 5 |
+
|
| 6 |
+
import torch
|
| 7 |
+
import yaml
|
| 8 |
+
|
| 9 |
+
import annotator.uniformer.mmcv as mmcv
|
| 10 |
+
from ....parallel.utils import is_module_wrapper
|
| 11 |
+
from ...dist_utils import master_only
|
| 12 |
+
from ..hook import HOOKS
|
| 13 |
+
from .base import LoggerHook
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
@HOOKS.register_module()
|
| 17 |
+
class PaviLoggerHook(LoggerHook):
|
| 18 |
+
|
| 19 |
+
def __init__(self,
|
| 20 |
+
init_kwargs=None,
|
| 21 |
+
add_graph=False,
|
| 22 |
+
add_last_ckpt=False,
|
| 23 |
+
interval=10,
|
| 24 |
+
ignore_last=True,
|
| 25 |
+
reset_flag=False,
|
| 26 |
+
by_epoch=True,
|
| 27 |
+
img_key='img_info'):
|
| 28 |
+
super(PaviLoggerHook, self).__init__(interval, ignore_last, reset_flag,
|
| 29 |
+
by_epoch)
|
| 30 |
+
self.init_kwargs = init_kwargs
|
| 31 |
+
self.add_graph = add_graph
|
| 32 |
+
self.add_last_ckpt = add_last_ckpt
|
| 33 |
+
self.img_key = img_key
|
| 34 |
+
|
| 35 |
+
@master_only
|
| 36 |
+
def before_run(self, runner):
|
| 37 |
+
super(PaviLoggerHook, self).before_run(runner)
|
| 38 |
+
try:
|
| 39 |
+
from pavi import SummaryWriter
|
| 40 |
+
except ImportError:
|
| 41 |
+
raise ImportError('Please run "pip install pavi" to install pavi.')
|
| 42 |
+
|
| 43 |
+
self.run_name = runner.work_dir.split('/')[-1]
|
| 44 |
+
|
| 45 |
+
if not self.init_kwargs:
|
| 46 |
+
self.init_kwargs = dict()
|
| 47 |
+
self.init_kwargs['name'] = self.run_name
|
| 48 |
+
self.init_kwargs['model'] = runner._model_name
|
| 49 |
+
if runner.meta is not None:
|
| 50 |
+
if 'config_dict' in runner.meta:
|
| 51 |
+
config_dict = runner.meta['config_dict']
|
| 52 |
+
assert isinstance(
|
| 53 |
+
config_dict,
|
| 54 |
+
dict), ('meta["config_dict"] has to be of a dict, '
|
| 55 |
+
f'but got {type(config_dict)}')
|
| 56 |
+
elif 'config_file' in runner.meta:
|
| 57 |
+
config_file = runner.meta['config_file']
|
| 58 |
+
config_dict = dict(mmcv.Config.fromfile(config_file))
|
| 59 |
+
else:
|
| 60 |
+
config_dict = None
|
| 61 |
+
if config_dict is not None:
|
| 62 |
+
# 'max_.*iter' is parsed in pavi sdk as the maximum iterations
|
| 63 |
+
# to properly set up the progress bar.
|
| 64 |
+
config_dict = config_dict.copy()
|
| 65 |
+
config_dict.setdefault('max_iter', runner.max_iters)
|
| 66 |
+
# non-serializable values are first converted in
|
| 67 |
+
# mmcv.dump to json
|
| 68 |
+
config_dict = json.loads(
|
| 69 |
+
mmcv.dump(config_dict, file_format='json'))
|
| 70 |
+
session_text = yaml.dump(config_dict)
|
| 71 |
+
self.init_kwargs['session_text'] = session_text
|
| 72 |
+
self.writer = SummaryWriter(**self.init_kwargs)
|
| 73 |
+
|
| 74 |
+
def get_step(self, runner):
|
| 75 |
+
"""Get the total training step/epoch."""
|
| 76 |
+
if self.get_mode(runner) == 'val' and self.by_epoch:
|
| 77 |
+
return self.get_epoch(runner)
|
| 78 |
+
else:
|
| 79 |
+
return self.get_iter(runner)
|
| 80 |
+
|
| 81 |
+
@master_only
|
| 82 |
+
def log(self, runner):
|
| 83 |
+
tags = self.get_loggable_tags(runner, add_mode=False)
|
| 84 |
+
if tags:
|
| 85 |
+
self.writer.add_scalars(
|
| 86 |
+
self.get_mode(runner), tags, self.get_step(runner))
|
| 87 |
+
|
| 88 |
+
@master_only
|
| 89 |
+
def after_run(self, runner):
|
| 90 |
+
if self.add_last_ckpt:
|
| 91 |
+
ckpt_path = osp.join(runner.work_dir, 'latest.pth')
|
| 92 |
+
if osp.islink(ckpt_path):
|
| 93 |
+
ckpt_path = osp.join(runner.work_dir, os.readlink(ckpt_path))
|
| 94 |
+
|
| 95 |
+
if osp.isfile(ckpt_path):
|
| 96 |
+
# runner.epoch += 1 has been done before `after_run`.
|
| 97 |
+
iteration = runner.epoch if self.by_epoch else runner.iter
|
| 98 |
+
return self.writer.add_snapshot_file(
|
| 99 |
+
tag=self.run_name,
|
| 100 |
+
snapshot_file_path=ckpt_path,
|
| 101 |
+
iteration=iteration)
|
| 102 |
+
|
| 103 |
+
# flush the buffer and send a task ending signal to Pavi
|
| 104 |
+
self.writer.close()
|
| 105 |
+
|
| 106 |
+
@master_only
|
| 107 |
+
def before_epoch(self, runner):
|
| 108 |
+
if runner.epoch == 0 and self.add_graph:
|
| 109 |
+
if is_module_wrapper(runner.model):
|
| 110 |
+
_model = runner.model.module
|
| 111 |
+
else:
|
| 112 |
+
_model = runner.model
|
| 113 |
+
device = next(_model.parameters()).device
|
| 114 |
+
data = next(iter(runner.data_loader))
|
| 115 |
+
image = data[self.img_key][0:1].to(device)
|
| 116 |
+
with torch.no_grad():
|
| 117 |
+
self.writer.add_graph(_model, image)
|
FRESCO/src/ControlNet/annotator/uniformer/mmcv/runner/hooks/logger/tensorboard.py
ADDED
|
@@ -0,0 +1,57 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
import os.path as osp
|
| 3 |
+
|
| 4 |
+
from annotator.uniformer.mmcv.utils import TORCH_VERSION, digit_version
|
| 5 |
+
from ...dist_utils import master_only
|
| 6 |
+
from ..hook import HOOKS
|
| 7 |
+
from .base import LoggerHook
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
@HOOKS.register_module()
|
| 11 |
+
class TensorboardLoggerHook(LoggerHook):
|
| 12 |
+
|
| 13 |
+
def __init__(self,
|
| 14 |
+
log_dir=None,
|
| 15 |
+
interval=10,
|
| 16 |
+
ignore_last=True,
|
| 17 |
+
reset_flag=False,
|
| 18 |
+
by_epoch=True):
|
| 19 |
+
super(TensorboardLoggerHook, self).__init__(interval, ignore_last,
|
| 20 |
+
reset_flag, by_epoch)
|
| 21 |
+
self.log_dir = log_dir
|
| 22 |
+
|
| 23 |
+
@master_only
|
| 24 |
+
def before_run(self, runner):
|
| 25 |
+
super(TensorboardLoggerHook, self).before_run(runner)
|
| 26 |
+
if (TORCH_VERSION == 'parrots'
|
| 27 |
+
or digit_version(TORCH_VERSION) < digit_version('1.1')):
|
| 28 |
+
try:
|
| 29 |
+
from tensorboardX import SummaryWriter
|
| 30 |
+
except ImportError:
|
| 31 |
+
raise ImportError('Please install tensorboardX to use '
|
| 32 |
+
'TensorboardLoggerHook.')
|
| 33 |
+
else:
|
| 34 |
+
try:
|
| 35 |
+
from torch.utils.tensorboard import SummaryWriter
|
| 36 |
+
except ImportError:
|
| 37 |
+
raise ImportError(
|
| 38 |
+
'Please run "pip install future tensorboard" to install '
|
| 39 |
+
'the dependencies to use torch.utils.tensorboard '
|
| 40 |
+
'(applicable to PyTorch 1.1 or higher)')
|
| 41 |
+
|
| 42 |
+
if self.log_dir is None:
|
| 43 |
+
self.log_dir = osp.join(runner.work_dir, 'tf_logs')
|
| 44 |
+
self.writer = SummaryWriter(self.log_dir)
|
| 45 |
+
|
| 46 |
+
@master_only
|
| 47 |
+
def log(self, runner):
|
| 48 |
+
tags = self.get_loggable_tags(runner, allow_text=True)
|
| 49 |
+
for tag, val in tags.items():
|
| 50 |
+
if isinstance(val, str):
|
| 51 |
+
self.writer.add_text(tag, val, self.get_iter(runner))
|
| 52 |
+
else:
|
| 53 |
+
self.writer.add_scalar(tag, val, self.get_iter(runner))
|
| 54 |
+
|
| 55 |
+
@master_only
|
| 56 |
+
def after_run(self, runner):
|
| 57 |
+
self.writer.close()
|
FRESCO/src/ControlNet/annotator/uniformer/mmcv/runner/hooks/logger/text.py
ADDED
|
@@ -0,0 +1,256 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
import datetime
|
| 3 |
+
import os
|
| 4 |
+
import os.path as osp
|
| 5 |
+
from collections import OrderedDict
|
| 6 |
+
|
| 7 |
+
import torch
|
| 8 |
+
import torch.distributed as dist
|
| 9 |
+
|
| 10 |
+
import annotator.uniformer.mmcv as mmcv
|
| 11 |
+
from annotator.uniformer.mmcv.fileio.file_client import FileClient
|
| 12 |
+
from annotator.uniformer.mmcv.utils import is_tuple_of, scandir
|
| 13 |
+
from ..hook import HOOKS
|
| 14 |
+
from .base import LoggerHook
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
@HOOKS.register_module()
|
| 18 |
+
class TextLoggerHook(LoggerHook):
|
| 19 |
+
"""Logger hook in text.
|
| 20 |
+
|
| 21 |
+
In this logger hook, the information will be printed on terminal and
|
| 22 |
+
saved in json file.
|
| 23 |
+
|
| 24 |
+
Args:
|
| 25 |
+
by_epoch (bool, optional): Whether EpochBasedRunner is used.
|
| 26 |
+
Default: True.
|
| 27 |
+
interval (int, optional): Logging interval (every k iterations).
|
| 28 |
+
Default: 10.
|
| 29 |
+
ignore_last (bool, optional): Ignore the log of last iterations in each
|
| 30 |
+
epoch if less than :attr:`interval`. Default: True.
|
| 31 |
+
reset_flag (bool, optional): Whether to clear the output buffer after
|
| 32 |
+
logging. Default: False.
|
| 33 |
+
interval_exp_name (int, optional): Logging interval for experiment
|
| 34 |
+
name. This feature is to help users conveniently get the experiment
|
| 35 |
+
information from screen or log file. Default: 1000.
|
| 36 |
+
out_dir (str, optional): Logs are saved in ``runner.work_dir`` default.
|
| 37 |
+
If ``out_dir`` is specified, logs will be copied to a new directory
|
| 38 |
+
which is the concatenation of ``out_dir`` and the last level
|
| 39 |
+
directory of ``runner.work_dir``. Default: None.
|
| 40 |
+
`New in version 1.3.16.`
|
| 41 |
+
out_suffix (str or tuple[str], optional): Those filenames ending with
|
| 42 |
+
``out_suffix`` will be copied to ``out_dir``.
|
| 43 |
+
Default: ('.log.json', '.log', '.py').
|
| 44 |
+
`New in version 1.3.16.`
|
| 45 |
+
keep_local (bool, optional): Whether to keep local log when
|
| 46 |
+
:attr:`out_dir` is specified. If False, the local log will be
|
| 47 |
+
removed. Default: True.
|
| 48 |
+
`New in version 1.3.16.`
|
| 49 |
+
file_client_args (dict, optional): Arguments to instantiate a
|
| 50 |
+
FileClient. See :class:`mmcv.fileio.FileClient` for details.
|
| 51 |
+
Default: None.
|
| 52 |
+
`New in version 1.3.16.`
|
| 53 |
+
"""
|
| 54 |
+
|
| 55 |
+
def __init__(self,
|
| 56 |
+
by_epoch=True,
|
| 57 |
+
interval=10,
|
| 58 |
+
ignore_last=True,
|
| 59 |
+
reset_flag=False,
|
| 60 |
+
interval_exp_name=1000,
|
| 61 |
+
out_dir=None,
|
| 62 |
+
out_suffix=('.log.json', '.log', '.py'),
|
| 63 |
+
keep_local=True,
|
| 64 |
+
file_client_args=None):
|
| 65 |
+
super(TextLoggerHook, self).__init__(interval, ignore_last, reset_flag,
|
| 66 |
+
by_epoch)
|
| 67 |
+
self.by_epoch = by_epoch
|
| 68 |
+
self.time_sec_tot = 0
|
| 69 |
+
self.interval_exp_name = interval_exp_name
|
| 70 |
+
|
| 71 |
+
if out_dir is None and file_client_args is not None:
|
| 72 |
+
raise ValueError(
|
| 73 |
+
'file_client_args should be "None" when `out_dir` is not'
|
| 74 |
+
'specified.')
|
| 75 |
+
self.out_dir = out_dir
|
| 76 |
+
|
| 77 |
+
if not (out_dir is None or isinstance(out_dir, str)
|
| 78 |
+
or is_tuple_of(out_dir, str)):
|
| 79 |
+
raise TypeError('out_dir should be "None" or string or tuple of '
|
| 80 |
+
'string, but got {out_dir}')
|
| 81 |
+
self.out_suffix = out_suffix
|
| 82 |
+
|
| 83 |
+
self.keep_local = keep_local
|
| 84 |
+
self.file_client_args = file_client_args
|
| 85 |
+
if self.out_dir is not None:
|
| 86 |
+
self.file_client = FileClient.infer_client(file_client_args,
|
| 87 |
+
self.out_dir)
|
| 88 |
+
|
| 89 |
+
def before_run(self, runner):
|
| 90 |
+
super(TextLoggerHook, self).before_run(runner)
|
| 91 |
+
|
| 92 |
+
if self.out_dir is not None:
|
| 93 |
+
self.file_client = FileClient.infer_client(self.file_client_args,
|
| 94 |
+
self.out_dir)
|
| 95 |
+
# The final `self.out_dir` is the concatenation of `self.out_dir`
|
| 96 |
+
# and the last level directory of `runner.work_dir`
|
| 97 |
+
basename = osp.basename(runner.work_dir.rstrip(osp.sep))
|
| 98 |
+
self.out_dir = self.file_client.join_path(self.out_dir, basename)
|
| 99 |
+
runner.logger.info(
|
| 100 |
+
(f'Text logs will be saved to {self.out_dir} by '
|
| 101 |
+
f'{self.file_client.name} after the training process.'))
|
| 102 |
+
|
| 103 |
+
self.start_iter = runner.iter
|
| 104 |
+
self.json_log_path = osp.join(runner.work_dir,
|
| 105 |
+
f'{runner.timestamp}.log.json')
|
| 106 |
+
if runner.meta is not None:
|
| 107 |
+
self._dump_log(runner.meta, runner)
|
| 108 |
+
|
| 109 |
+
def _get_max_memory(self, runner):
|
| 110 |
+
device = getattr(runner.model, 'output_device', None)
|
| 111 |
+
mem = torch.cuda.max_memory_allocated(device=device)
|
| 112 |
+
mem_mb = torch.tensor([mem / (1024 * 1024)],
|
| 113 |
+
dtype=torch.int,
|
| 114 |
+
device=device)
|
| 115 |
+
if runner.world_size > 1:
|
| 116 |
+
dist.reduce(mem_mb, 0, op=dist.ReduceOp.MAX)
|
| 117 |
+
return mem_mb.item()
|
| 118 |
+
|
| 119 |
+
def _log_info(self, log_dict, runner):
|
| 120 |
+
# print exp name for users to distinguish experiments
|
| 121 |
+
# at every ``interval_exp_name`` iterations and the end of each epoch
|
| 122 |
+
if runner.meta is not None and 'exp_name' in runner.meta:
|
| 123 |
+
if (self.every_n_iters(runner, self.interval_exp_name)) or (
|
| 124 |
+
self.by_epoch and self.end_of_epoch(runner)):
|
| 125 |
+
exp_info = f'Exp name: {runner.meta["exp_name"]}'
|
| 126 |
+
runner.logger.info(exp_info)
|
| 127 |
+
|
| 128 |
+
if log_dict['mode'] == 'train':
|
| 129 |
+
if isinstance(log_dict['lr'], dict):
|
| 130 |
+
lr_str = []
|
| 131 |
+
for k, val in log_dict['lr'].items():
|
| 132 |
+
lr_str.append(f'lr_{k}: {val:.3e}')
|
| 133 |
+
lr_str = ' '.join(lr_str)
|
| 134 |
+
else:
|
| 135 |
+
lr_str = f'lr: {log_dict["lr"]:.3e}'
|
| 136 |
+
|
| 137 |
+
# by epoch: Epoch [4][100/1000]
|
| 138 |
+
# by iter: Iter [100/100000]
|
| 139 |
+
if self.by_epoch:
|
| 140 |
+
log_str = f'Epoch [{log_dict["epoch"]}]' \
|
| 141 |
+
f'[{log_dict["iter"]}/{len(runner.data_loader)}]\t'
|
| 142 |
+
else:
|
| 143 |
+
log_str = f'Iter [{log_dict["iter"]}/{runner.max_iters}]\t'
|
| 144 |
+
log_str += f'{lr_str}, '
|
| 145 |
+
|
| 146 |
+
if 'time' in log_dict.keys():
|
| 147 |
+
self.time_sec_tot += (log_dict['time'] * self.interval)
|
| 148 |
+
time_sec_avg = self.time_sec_tot / (
|
| 149 |
+
runner.iter - self.start_iter + 1)
|
| 150 |
+
eta_sec = time_sec_avg * (runner.max_iters - runner.iter - 1)
|
| 151 |
+
eta_str = str(datetime.timedelta(seconds=int(eta_sec)))
|
| 152 |
+
log_str += f'eta: {eta_str}, '
|
| 153 |
+
log_str += f'time: {log_dict["time"]:.3f}, ' \
|
| 154 |
+
f'data_time: {log_dict["data_time"]:.3f}, '
|
| 155 |
+
# statistic memory
|
| 156 |
+
if torch.cuda.is_available():
|
| 157 |
+
log_str += f'memory: {log_dict["memory"]}, '
|
| 158 |
+
else:
|
| 159 |
+
# val/test time
|
| 160 |
+
# here 1000 is the length of the val dataloader
|
| 161 |
+
# by epoch: Epoch[val] [4][1000]
|
| 162 |
+
# by iter: Iter[val] [1000]
|
| 163 |
+
if self.by_epoch:
|
| 164 |
+
log_str = f'Epoch({log_dict["mode"]}) ' \
|
| 165 |
+
f'[{log_dict["epoch"]}][{log_dict["iter"]}]\t'
|
| 166 |
+
else:
|
| 167 |
+
log_str = f'Iter({log_dict["mode"]}) [{log_dict["iter"]}]\t'
|
| 168 |
+
|
| 169 |
+
log_items = []
|
| 170 |
+
for name, val in log_dict.items():
|
| 171 |
+
# TODO: resolve this hack
|
| 172 |
+
# these items have been in log_str
|
| 173 |
+
if name in [
|
| 174 |
+
'mode', 'Epoch', 'iter', 'lr', 'time', 'data_time',
|
| 175 |
+
'memory', 'epoch'
|
| 176 |
+
]:
|
| 177 |
+
continue
|
| 178 |
+
if isinstance(val, float):
|
| 179 |
+
val = f'{val:.4f}'
|
| 180 |
+
log_items.append(f'{name}: {val}')
|
| 181 |
+
log_str += ', '.join(log_items)
|
| 182 |
+
|
| 183 |
+
runner.logger.info(log_str)
|
| 184 |
+
|
| 185 |
+
def _dump_log(self, log_dict, runner):
|
| 186 |
+
# dump log in json format
|
| 187 |
+
json_log = OrderedDict()
|
| 188 |
+
for k, v in log_dict.items():
|
| 189 |
+
json_log[k] = self._round_float(v)
|
| 190 |
+
# only append log at last line
|
| 191 |
+
if runner.rank == 0:
|
| 192 |
+
with open(self.json_log_path, 'a+') as f:
|
| 193 |
+
mmcv.dump(json_log, f, file_format='json')
|
| 194 |
+
f.write('\n')
|
| 195 |
+
|
| 196 |
+
def _round_float(self, items):
|
| 197 |
+
if isinstance(items, list):
|
| 198 |
+
return [self._round_float(item) for item in items]
|
| 199 |
+
elif isinstance(items, float):
|
| 200 |
+
return round(items, 5)
|
| 201 |
+
else:
|
| 202 |
+
return items
|
| 203 |
+
|
| 204 |
+
def log(self, runner):
|
| 205 |
+
if 'eval_iter_num' in runner.log_buffer.output:
|
| 206 |
+
# this doesn't modify runner.iter and is regardless of by_epoch
|
| 207 |
+
cur_iter = runner.log_buffer.output.pop('eval_iter_num')
|
| 208 |
+
else:
|
| 209 |
+
cur_iter = self.get_iter(runner, inner_iter=True)
|
| 210 |
+
|
| 211 |
+
log_dict = OrderedDict(
|
| 212 |
+
mode=self.get_mode(runner),
|
| 213 |
+
epoch=self.get_epoch(runner),
|
| 214 |
+
iter=cur_iter)
|
| 215 |
+
|
| 216 |
+
# only record lr of the first param group
|
| 217 |
+
cur_lr = runner.current_lr()
|
| 218 |
+
if isinstance(cur_lr, list):
|
| 219 |
+
log_dict['lr'] = cur_lr[0]
|
| 220 |
+
else:
|
| 221 |
+
assert isinstance(cur_lr, dict)
|
| 222 |
+
log_dict['lr'] = {}
|
| 223 |
+
for k, lr_ in cur_lr.items():
|
| 224 |
+
assert isinstance(lr_, list)
|
| 225 |
+
log_dict['lr'].update({k: lr_[0]})
|
| 226 |
+
|
| 227 |
+
if 'time' in runner.log_buffer.output:
|
| 228 |
+
# statistic memory
|
| 229 |
+
if torch.cuda.is_available():
|
| 230 |
+
log_dict['memory'] = self._get_max_memory(runner)
|
| 231 |
+
|
| 232 |
+
log_dict = dict(log_dict, **runner.log_buffer.output)
|
| 233 |
+
|
| 234 |
+
self._log_info(log_dict, runner)
|
| 235 |
+
self._dump_log(log_dict, runner)
|
| 236 |
+
return log_dict
|
| 237 |
+
|
| 238 |
+
def after_run(self, runner):
|
| 239 |
+
# copy or upload logs to self.out_dir
|
| 240 |
+
if self.out_dir is not None:
|
| 241 |
+
for filename in scandir(runner.work_dir, self.out_suffix, True):
|
| 242 |
+
local_filepath = osp.join(runner.work_dir, filename)
|
| 243 |
+
out_filepath = self.file_client.join_path(
|
| 244 |
+
self.out_dir, filename)
|
| 245 |
+
with open(local_filepath, 'r') as f:
|
| 246 |
+
self.file_client.put_text(f.read(), out_filepath)
|
| 247 |
+
|
| 248 |
+
runner.logger.info(
|
| 249 |
+
(f'The file {local_filepath} has been uploaded to '
|
| 250 |
+
f'{out_filepath}.'))
|
| 251 |
+
|
| 252 |
+
if not self.keep_local:
|
| 253 |
+
os.remove(local_filepath)
|
| 254 |
+
runner.logger.info(
|
| 255 |
+
(f'{local_filepath} was removed due to the '
|
| 256 |
+
'`self.keep_local=False`'))
|
FRESCO/src/ControlNet/annotator/uniformer/mmcv/runner/hooks/logger/wandb.py
ADDED
|
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
from ...dist_utils import master_only
|
| 3 |
+
from ..hook import HOOKS
|
| 4 |
+
from .base import LoggerHook
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
@HOOKS.register_module()
|
| 8 |
+
class WandbLoggerHook(LoggerHook):
|
| 9 |
+
|
| 10 |
+
def __init__(self,
|
| 11 |
+
init_kwargs=None,
|
| 12 |
+
interval=10,
|
| 13 |
+
ignore_last=True,
|
| 14 |
+
reset_flag=False,
|
| 15 |
+
commit=True,
|
| 16 |
+
by_epoch=True,
|
| 17 |
+
with_step=True):
|
| 18 |
+
super(WandbLoggerHook, self).__init__(interval, ignore_last,
|
| 19 |
+
reset_flag, by_epoch)
|
| 20 |
+
self.import_wandb()
|
| 21 |
+
self.init_kwargs = init_kwargs
|
| 22 |
+
self.commit = commit
|
| 23 |
+
self.with_step = with_step
|
| 24 |
+
|
| 25 |
+
def import_wandb(self):
|
| 26 |
+
try:
|
| 27 |
+
import wandb
|
| 28 |
+
except ImportError:
|
| 29 |
+
raise ImportError(
|
| 30 |
+
'Please run "pip install wandb" to install wandb')
|
| 31 |
+
self.wandb = wandb
|
| 32 |
+
|
| 33 |
+
@master_only
|
| 34 |
+
def before_run(self, runner):
|
| 35 |
+
super(WandbLoggerHook, self).before_run(runner)
|
| 36 |
+
if self.wandb is None:
|
| 37 |
+
self.import_wandb()
|
| 38 |
+
if self.init_kwargs:
|
| 39 |
+
self.wandb.init(**self.init_kwargs)
|
| 40 |
+
else:
|
| 41 |
+
self.wandb.init()
|
| 42 |
+
|
| 43 |
+
@master_only
|
| 44 |
+
def log(self, runner):
|
| 45 |
+
tags = self.get_loggable_tags(runner)
|
| 46 |
+
if tags:
|
| 47 |
+
if self.with_step:
|
| 48 |
+
self.wandb.log(
|
| 49 |
+
tags, step=self.get_iter(runner), commit=self.commit)
|
| 50 |
+
else:
|
| 51 |
+
tags['global_step'] = self.get_iter(runner)
|
| 52 |
+
self.wandb.log(tags, commit=self.commit)
|
| 53 |
+
|
| 54 |
+
@master_only
|
| 55 |
+
def after_run(self, runner):
|
| 56 |
+
self.wandb.join()
|
FRESCO/src/ControlNet/annotator/uniformer/mmcv/runner/hooks/lr_updater.py
ADDED
|
@@ -0,0 +1,670 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
import numbers
|
| 3 |
+
from math import cos, pi
|
| 4 |
+
|
| 5 |
+
import annotator.uniformer.mmcv as mmcv
|
| 6 |
+
from .hook import HOOKS, Hook
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
class LrUpdaterHook(Hook):
|
| 10 |
+
"""LR Scheduler in MMCV.
|
| 11 |
+
|
| 12 |
+
Args:
|
| 13 |
+
by_epoch (bool): LR changes epoch by epoch
|
| 14 |
+
warmup (string): Type of warmup used. It can be None(use no warmup),
|
| 15 |
+
'constant', 'linear' or 'exp'
|
| 16 |
+
warmup_iters (int): The number of iterations or epochs that warmup
|
| 17 |
+
lasts
|
| 18 |
+
warmup_ratio (float): LR used at the beginning of warmup equals to
|
| 19 |
+
warmup_ratio * initial_lr
|
| 20 |
+
warmup_by_epoch (bool): When warmup_by_epoch == True, warmup_iters
|
| 21 |
+
means the number of epochs that warmup lasts, otherwise means the
|
| 22 |
+
number of iteration that warmup lasts
|
| 23 |
+
"""
|
| 24 |
+
|
| 25 |
+
def __init__(self,
|
| 26 |
+
by_epoch=True,
|
| 27 |
+
warmup=None,
|
| 28 |
+
warmup_iters=0,
|
| 29 |
+
warmup_ratio=0.1,
|
| 30 |
+
warmup_by_epoch=False):
|
| 31 |
+
# validate the "warmup" argument
|
| 32 |
+
if warmup is not None:
|
| 33 |
+
if warmup not in ['constant', 'linear', 'exp']:
|
| 34 |
+
raise ValueError(
|
| 35 |
+
f'"{warmup}" is not a supported type for warming up, valid'
|
| 36 |
+
' types are "constant" and "linear"')
|
| 37 |
+
if warmup is not None:
|
| 38 |
+
assert warmup_iters > 0, \
|
| 39 |
+
'"warmup_iters" must be a positive integer'
|
| 40 |
+
assert 0 < warmup_ratio <= 1.0, \
|
| 41 |
+
'"warmup_ratio" must be in range (0,1]'
|
| 42 |
+
|
| 43 |
+
self.by_epoch = by_epoch
|
| 44 |
+
self.warmup = warmup
|
| 45 |
+
self.warmup_iters = warmup_iters
|
| 46 |
+
self.warmup_ratio = warmup_ratio
|
| 47 |
+
self.warmup_by_epoch = warmup_by_epoch
|
| 48 |
+
|
| 49 |
+
if self.warmup_by_epoch:
|
| 50 |
+
self.warmup_epochs = self.warmup_iters
|
| 51 |
+
self.warmup_iters = None
|
| 52 |
+
else:
|
| 53 |
+
self.warmup_epochs = None
|
| 54 |
+
|
| 55 |
+
self.base_lr = [] # initial lr for all param groups
|
| 56 |
+
self.regular_lr = [] # expected lr if no warming up is performed
|
| 57 |
+
|
| 58 |
+
def _set_lr(self, runner, lr_groups):
|
| 59 |
+
if isinstance(runner.optimizer, dict):
|
| 60 |
+
for k, optim in runner.optimizer.items():
|
| 61 |
+
for param_group, lr in zip(optim.param_groups, lr_groups[k]):
|
| 62 |
+
param_group['lr'] = lr
|
| 63 |
+
else:
|
| 64 |
+
for param_group, lr in zip(runner.optimizer.param_groups,
|
| 65 |
+
lr_groups):
|
| 66 |
+
param_group['lr'] = lr
|
| 67 |
+
|
| 68 |
+
def get_lr(self, runner, base_lr):
|
| 69 |
+
raise NotImplementedError
|
| 70 |
+
|
| 71 |
+
def get_regular_lr(self, runner):
|
| 72 |
+
if isinstance(runner.optimizer, dict):
|
| 73 |
+
lr_groups = {}
|
| 74 |
+
for k in runner.optimizer.keys():
|
| 75 |
+
_lr_group = [
|
| 76 |
+
self.get_lr(runner, _base_lr)
|
| 77 |
+
for _base_lr in self.base_lr[k]
|
| 78 |
+
]
|
| 79 |
+
lr_groups.update({k: _lr_group})
|
| 80 |
+
|
| 81 |
+
return lr_groups
|
| 82 |
+
else:
|
| 83 |
+
return [self.get_lr(runner, _base_lr) for _base_lr in self.base_lr]
|
| 84 |
+
|
| 85 |
+
def get_warmup_lr(self, cur_iters):
|
| 86 |
+
|
| 87 |
+
def _get_warmup_lr(cur_iters, regular_lr):
|
| 88 |
+
if self.warmup == 'constant':
|
| 89 |
+
warmup_lr = [_lr * self.warmup_ratio for _lr in regular_lr]
|
| 90 |
+
elif self.warmup == 'linear':
|
| 91 |
+
k = (1 - cur_iters / self.warmup_iters) * (1 -
|
| 92 |
+
self.warmup_ratio)
|
| 93 |
+
warmup_lr = [_lr * (1 - k) for _lr in regular_lr]
|
| 94 |
+
elif self.warmup == 'exp':
|
| 95 |
+
k = self.warmup_ratio**(1 - cur_iters / self.warmup_iters)
|
| 96 |
+
warmup_lr = [_lr * k for _lr in regular_lr]
|
| 97 |
+
return warmup_lr
|
| 98 |
+
|
| 99 |
+
if isinstance(self.regular_lr, dict):
|
| 100 |
+
lr_groups = {}
|
| 101 |
+
for key, regular_lr in self.regular_lr.items():
|
| 102 |
+
lr_groups[key] = _get_warmup_lr(cur_iters, regular_lr)
|
| 103 |
+
return lr_groups
|
| 104 |
+
else:
|
| 105 |
+
return _get_warmup_lr(cur_iters, self.regular_lr)
|
| 106 |
+
|
| 107 |
+
def before_run(self, runner):
|
| 108 |
+
# NOTE: when resuming from a checkpoint, if 'initial_lr' is not saved,
|
| 109 |
+
# it will be set according to the optimizer params
|
| 110 |
+
if isinstance(runner.optimizer, dict):
|
| 111 |
+
self.base_lr = {}
|
| 112 |
+
for k, optim in runner.optimizer.items():
|
| 113 |
+
for group in optim.param_groups:
|
| 114 |
+
group.setdefault('initial_lr', group['lr'])
|
| 115 |
+
_base_lr = [
|
| 116 |
+
group['initial_lr'] for group in optim.param_groups
|
| 117 |
+
]
|
| 118 |
+
self.base_lr.update({k: _base_lr})
|
| 119 |
+
else:
|
| 120 |
+
for group in runner.optimizer.param_groups:
|
| 121 |
+
group.setdefault('initial_lr', group['lr'])
|
| 122 |
+
self.base_lr = [
|
| 123 |
+
group['initial_lr'] for group in runner.optimizer.param_groups
|
| 124 |
+
]
|
| 125 |
+
|
| 126 |
+
def before_train_epoch(self, runner):
|
| 127 |
+
if self.warmup_iters is None:
|
| 128 |
+
epoch_len = len(runner.data_loader)
|
| 129 |
+
self.warmup_iters = self.warmup_epochs * epoch_len
|
| 130 |
+
|
| 131 |
+
if not self.by_epoch:
|
| 132 |
+
return
|
| 133 |
+
|
| 134 |
+
self.regular_lr = self.get_regular_lr(runner)
|
| 135 |
+
self._set_lr(runner, self.regular_lr)
|
| 136 |
+
|
| 137 |
+
def before_train_iter(self, runner):
|
| 138 |
+
cur_iter = runner.iter
|
| 139 |
+
if not self.by_epoch:
|
| 140 |
+
self.regular_lr = self.get_regular_lr(runner)
|
| 141 |
+
if self.warmup is None or cur_iter >= self.warmup_iters:
|
| 142 |
+
self._set_lr(runner, self.regular_lr)
|
| 143 |
+
else:
|
| 144 |
+
warmup_lr = self.get_warmup_lr(cur_iter)
|
| 145 |
+
self._set_lr(runner, warmup_lr)
|
| 146 |
+
elif self.by_epoch:
|
| 147 |
+
if self.warmup is None or cur_iter > self.warmup_iters:
|
| 148 |
+
return
|
| 149 |
+
elif cur_iter == self.warmup_iters:
|
| 150 |
+
self._set_lr(runner, self.regular_lr)
|
| 151 |
+
else:
|
| 152 |
+
warmup_lr = self.get_warmup_lr(cur_iter)
|
| 153 |
+
self._set_lr(runner, warmup_lr)
|
| 154 |
+
|
| 155 |
+
|
| 156 |
+
@HOOKS.register_module()
|
| 157 |
+
class FixedLrUpdaterHook(LrUpdaterHook):
|
| 158 |
+
|
| 159 |
+
def __init__(self, **kwargs):
|
| 160 |
+
super(FixedLrUpdaterHook, self).__init__(**kwargs)
|
| 161 |
+
|
| 162 |
+
def get_lr(self, runner, base_lr):
|
| 163 |
+
return base_lr
|
| 164 |
+
|
| 165 |
+
|
| 166 |
+
@HOOKS.register_module()
|
| 167 |
+
class StepLrUpdaterHook(LrUpdaterHook):
|
| 168 |
+
"""Step LR scheduler with min_lr clipping.
|
| 169 |
+
|
| 170 |
+
Args:
|
| 171 |
+
step (int | list[int]): Step to decay the LR. If an int value is given,
|
| 172 |
+
regard it as the decay interval. If a list is given, decay LR at
|
| 173 |
+
these steps.
|
| 174 |
+
gamma (float, optional): Decay LR ratio. Default: 0.1.
|
| 175 |
+
min_lr (float, optional): Minimum LR value to keep. If LR after decay
|
| 176 |
+
is lower than `min_lr`, it will be clipped to this value. If None
|
| 177 |
+
is given, we don't perform lr clipping. Default: None.
|
| 178 |
+
"""
|
| 179 |
+
|
| 180 |
+
def __init__(self, step, gamma=0.1, min_lr=None, **kwargs):
|
| 181 |
+
if isinstance(step, list):
|
| 182 |
+
assert mmcv.is_list_of(step, int)
|
| 183 |
+
assert all([s > 0 for s in step])
|
| 184 |
+
elif isinstance(step, int):
|
| 185 |
+
assert step > 0
|
| 186 |
+
else:
|
| 187 |
+
raise TypeError('"step" must be a list or integer')
|
| 188 |
+
self.step = step
|
| 189 |
+
self.gamma = gamma
|
| 190 |
+
self.min_lr = min_lr
|
| 191 |
+
super(StepLrUpdaterHook, self).__init__(**kwargs)
|
| 192 |
+
|
| 193 |
+
def get_lr(self, runner, base_lr):
|
| 194 |
+
progress = runner.epoch if self.by_epoch else runner.iter
|
| 195 |
+
|
| 196 |
+
# calculate exponential term
|
| 197 |
+
if isinstance(self.step, int):
|
| 198 |
+
exp = progress // self.step
|
| 199 |
+
else:
|
| 200 |
+
exp = len(self.step)
|
| 201 |
+
for i, s in enumerate(self.step):
|
| 202 |
+
if progress < s:
|
| 203 |
+
exp = i
|
| 204 |
+
break
|
| 205 |
+
|
| 206 |
+
lr = base_lr * (self.gamma**exp)
|
| 207 |
+
if self.min_lr is not None:
|
| 208 |
+
# clip to a minimum value
|
| 209 |
+
lr = max(lr, self.min_lr)
|
| 210 |
+
return lr
|
| 211 |
+
|
| 212 |
+
|
| 213 |
+
@HOOKS.register_module()
|
| 214 |
+
class ExpLrUpdaterHook(LrUpdaterHook):
|
| 215 |
+
|
| 216 |
+
def __init__(self, gamma, **kwargs):
|
| 217 |
+
self.gamma = gamma
|
| 218 |
+
super(ExpLrUpdaterHook, self).__init__(**kwargs)
|
| 219 |
+
|
| 220 |
+
def get_lr(self, runner, base_lr):
|
| 221 |
+
progress = runner.epoch if self.by_epoch else runner.iter
|
| 222 |
+
return base_lr * self.gamma**progress
|
| 223 |
+
|
| 224 |
+
|
| 225 |
+
@HOOKS.register_module()
|
| 226 |
+
class PolyLrUpdaterHook(LrUpdaterHook):
|
| 227 |
+
|
| 228 |
+
def __init__(self, power=1., min_lr=0., **kwargs):
|
| 229 |
+
self.power = power
|
| 230 |
+
self.min_lr = min_lr
|
| 231 |
+
super(PolyLrUpdaterHook, self).__init__(**kwargs)
|
| 232 |
+
|
| 233 |
+
def get_lr(self, runner, base_lr):
|
| 234 |
+
if self.by_epoch:
|
| 235 |
+
progress = runner.epoch
|
| 236 |
+
max_progress = runner.max_epochs
|
| 237 |
+
else:
|
| 238 |
+
progress = runner.iter
|
| 239 |
+
max_progress = runner.max_iters
|
| 240 |
+
coeff = (1 - progress / max_progress)**self.power
|
| 241 |
+
return (base_lr - self.min_lr) * coeff + self.min_lr
|
| 242 |
+
|
| 243 |
+
|
| 244 |
+
@HOOKS.register_module()
|
| 245 |
+
class InvLrUpdaterHook(LrUpdaterHook):
|
| 246 |
+
|
| 247 |
+
def __init__(self, gamma, power=1., **kwargs):
|
| 248 |
+
self.gamma = gamma
|
| 249 |
+
self.power = power
|
| 250 |
+
super(InvLrUpdaterHook, self).__init__(**kwargs)
|
| 251 |
+
|
| 252 |
+
def get_lr(self, runner, base_lr):
|
| 253 |
+
progress = runner.epoch if self.by_epoch else runner.iter
|
| 254 |
+
return base_lr * (1 + self.gamma * progress)**(-self.power)
|
| 255 |
+
|
| 256 |
+
|
| 257 |
+
@HOOKS.register_module()
|
| 258 |
+
class CosineAnnealingLrUpdaterHook(LrUpdaterHook):
|
| 259 |
+
|
| 260 |
+
def __init__(self, min_lr=None, min_lr_ratio=None, **kwargs):
|
| 261 |
+
assert (min_lr is None) ^ (min_lr_ratio is None)
|
| 262 |
+
self.min_lr = min_lr
|
| 263 |
+
self.min_lr_ratio = min_lr_ratio
|
| 264 |
+
super(CosineAnnealingLrUpdaterHook, self).__init__(**kwargs)
|
| 265 |
+
|
| 266 |
+
def get_lr(self, runner, base_lr):
|
| 267 |
+
if self.by_epoch:
|
| 268 |
+
progress = runner.epoch
|
| 269 |
+
max_progress = runner.max_epochs
|
| 270 |
+
else:
|
| 271 |
+
progress = runner.iter
|
| 272 |
+
max_progress = runner.max_iters
|
| 273 |
+
|
| 274 |
+
if self.min_lr_ratio is not None:
|
| 275 |
+
target_lr = base_lr * self.min_lr_ratio
|
| 276 |
+
else:
|
| 277 |
+
target_lr = self.min_lr
|
| 278 |
+
return annealing_cos(base_lr, target_lr, progress / max_progress)
|
| 279 |
+
|
| 280 |
+
|
| 281 |
+
@HOOKS.register_module()
|
| 282 |
+
class FlatCosineAnnealingLrUpdaterHook(LrUpdaterHook):
|
| 283 |
+
"""Flat + Cosine lr schedule.
|
| 284 |
+
|
| 285 |
+
Modified from https://github.com/fastai/fastai/blob/master/fastai/callback/schedule.py#L128 # noqa: E501
|
| 286 |
+
|
| 287 |
+
Args:
|
| 288 |
+
start_percent (float): When to start annealing the learning rate
|
| 289 |
+
after the percentage of the total training steps.
|
| 290 |
+
The value should be in range [0, 1).
|
| 291 |
+
Default: 0.75
|
| 292 |
+
min_lr (float, optional): The minimum lr. Default: None.
|
| 293 |
+
min_lr_ratio (float, optional): The ratio of minimum lr to the base lr.
|
| 294 |
+
Either `min_lr` or `min_lr_ratio` should be specified.
|
| 295 |
+
Default: None.
|
| 296 |
+
"""
|
| 297 |
+
|
| 298 |
+
def __init__(self,
|
| 299 |
+
start_percent=0.75,
|
| 300 |
+
min_lr=None,
|
| 301 |
+
min_lr_ratio=None,
|
| 302 |
+
**kwargs):
|
| 303 |
+
assert (min_lr is None) ^ (min_lr_ratio is None)
|
| 304 |
+
if start_percent < 0 or start_percent > 1 or not isinstance(
|
| 305 |
+
start_percent, float):
|
| 306 |
+
raise ValueError(
|
| 307 |
+
'expected float between 0 and 1 start_percent, but '
|
| 308 |
+
f'got {start_percent}')
|
| 309 |
+
self.start_percent = start_percent
|
| 310 |
+
self.min_lr = min_lr
|
| 311 |
+
self.min_lr_ratio = min_lr_ratio
|
| 312 |
+
super(FlatCosineAnnealingLrUpdaterHook, self).__init__(**kwargs)
|
| 313 |
+
|
| 314 |
+
def get_lr(self, runner, base_lr):
|
| 315 |
+
if self.by_epoch:
|
| 316 |
+
start = round(runner.max_epochs * self.start_percent)
|
| 317 |
+
progress = runner.epoch - start
|
| 318 |
+
max_progress = runner.max_epochs - start
|
| 319 |
+
else:
|
| 320 |
+
start = round(runner.max_iters * self.start_percent)
|
| 321 |
+
progress = runner.iter - start
|
| 322 |
+
max_progress = runner.max_iters - start
|
| 323 |
+
|
| 324 |
+
if self.min_lr_ratio is not None:
|
| 325 |
+
target_lr = base_lr * self.min_lr_ratio
|
| 326 |
+
else:
|
| 327 |
+
target_lr = self.min_lr
|
| 328 |
+
|
| 329 |
+
if progress < 0:
|
| 330 |
+
return base_lr
|
| 331 |
+
else:
|
| 332 |
+
return annealing_cos(base_lr, target_lr, progress / max_progress)
|
| 333 |
+
|
| 334 |
+
|
| 335 |
+
@HOOKS.register_module()
|
| 336 |
+
class CosineRestartLrUpdaterHook(LrUpdaterHook):
|
| 337 |
+
"""Cosine annealing with restarts learning rate scheme.
|
| 338 |
+
|
| 339 |
+
Args:
|
| 340 |
+
periods (list[int]): Periods for each cosine anneling cycle.
|
| 341 |
+
restart_weights (list[float], optional): Restart weights at each
|
| 342 |
+
restart iteration. Default: [1].
|
| 343 |
+
min_lr (float, optional): The minimum lr. Default: None.
|
| 344 |
+
min_lr_ratio (float, optional): The ratio of minimum lr to the base lr.
|
| 345 |
+
Either `min_lr` or `min_lr_ratio` should be specified.
|
| 346 |
+
Default: None.
|
| 347 |
+
"""
|
| 348 |
+
|
| 349 |
+
def __init__(self,
|
| 350 |
+
periods,
|
| 351 |
+
restart_weights=[1],
|
| 352 |
+
min_lr=None,
|
| 353 |
+
min_lr_ratio=None,
|
| 354 |
+
**kwargs):
|
| 355 |
+
assert (min_lr is None) ^ (min_lr_ratio is None)
|
| 356 |
+
self.periods = periods
|
| 357 |
+
self.min_lr = min_lr
|
| 358 |
+
self.min_lr_ratio = min_lr_ratio
|
| 359 |
+
self.restart_weights = restart_weights
|
| 360 |
+
assert (len(self.periods) == len(self.restart_weights)
|
| 361 |
+
), 'periods and restart_weights should have the same length.'
|
| 362 |
+
super(CosineRestartLrUpdaterHook, self).__init__(**kwargs)
|
| 363 |
+
|
| 364 |
+
self.cumulative_periods = [
|
| 365 |
+
sum(self.periods[0:i + 1]) for i in range(0, len(self.periods))
|
| 366 |
+
]
|
| 367 |
+
|
| 368 |
+
def get_lr(self, runner, base_lr):
|
| 369 |
+
if self.by_epoch:
|
| 370 |
+
progress = runner.epoch
|
| 371 |
+
else:
|
| 372 |
+
progress = runner.iter
|
| 373 |
+
|
| 374 |
+
if self.min_lr_ratio is not None:
|
| 375 |
+
target_lr = base_lr * self.min_lr_ratio
|
| 376 |
+
else:
|
| 377 |
+
target_lr = self.min_lr
|
| 378 |
+
|
| 379 |
+
idx = get_position_from_periods(progress, self.cumulative_periods)
|
| 380 |
+
current_weight = self.restart_weights[idx]
|
| 381 |
+
nearest_restart = 0 if idx == 0 else self.cumulative_periods[idx - 1]
|
| 382 |
+
current_periods = self.periods[idx]
|
| 383 |
+
|
| 384 |
+
alpha = min((progress - nearest_restart) / current_periods, 1)
|
| 385 |
+
return annealing_cos(base_lr, target_lr, alpha, current_weight)
|
| 386 |
+
|
| 387 |
+
|
| 388 |
+
def get_position_from_periods(iteration, cumulative_periods):
|
| 389 |
+
"""Get the position from a period list.
|
| 390 |
+
|
| 391 |
+
It will return the index of the right-closest number in the period list.
|
| 392 |
+
For example, the cumulative_periods = [100, 200, 300, 400],
|
| 393 |
+
if iteration == 50, return 0;
|
| 394 |
+
if iteration == 210, return 2;
|
| 395 |
+
if iteration == 300, return 3.
|
| 396 |
+
|
| 397 |
+
Args:
|
| 398 |
+
iteration (int): Current iteration.
|
| 399 |
+
cumulative_periods (list[int]): Cumulative period list.
|
| 400 |
+
|
| 401 |
+
Returns:
|
| 402 |
+
int: The position of the right-closest number in the period list.
|
| 403 |
+
"""
|
| 404 |
+
for i, period in enumerate(cumulative_periods):
|
| 405 |
+
if iteration < period:
|
| 406 |
+
return i
|
| 407 |
+
raise ValueError(f'Current iteration {iteration} exceeds '
|
| 408 |
+
f'cumulative_periods {cumulative_periods}')
|
| 409 |
+
|
| 410 |
+
|
| 411 |
+
@HOOKS.register_module()
|
| 412 |
+
class CyclicLrUpdaterHook(LrUpdaterHook):
|
| 413 |
+
"""Cyclic LR Scheduler.
|
| 414 |
+
|
| 415 |
+
Implement the cyclical learning rate policy (CLR) described in
|
| 416 |
+
https://arxiv.org/pdf/1506.01186.pdf
|
| 417 |
+
|
| 418 |
+
Different from the original paper, we use cosine annealing rather than
|
| 419 |
+
triangular policy inside a cycle. This improves the performance in the
|
| 420 |
+
3D detection area.
|
| 421 |
+
|
| 422 |
+
Args:
|
| 423 |
+
by_epoch (bool): Whether to update LR by epoch.
|
| 424 |
+
target_ratio (tuple[float]): Relative ratio of the highest LR and the
|
| 425 |
+
lowest LR to the initial LR.
|
| 426 |
+
cyclic_times (int): Number of cycles during training
|
| 427 |
+
step_ratio_up (float): The ratio of the increasing process of LR in
|
| 428 |
+
the total cycle.
|
| 429 |
+
anneal_strategy (str): {'cos', 'linear'}
|
| 430 |
+
Specifies the annealing strategy: 'cos' for cosine annealing,
|
| 431 |
+
'linear' for linear annealing. Default: 'cos'.
|
| 432 |
+
"""
|
| 433 |
+
|
| 434 |
+
def __init__(self,
|
| 435 |
+
by_epoch=False,
|
| 436 |
+
target_ratio=(10, 1e-4),
|
| 437 |
+
cyclic_times=1,
|
| 438 |
+
step_ratio_up=0.4,
|
| 439 |
+
anneal_strategy='cos',
|
| 440 |
+
**kwargs):
|
| 441 |
+
if isinstance(target_ratio, float):
|
| 442 |
+
target_ratio = (target_ratio, target_ratio / 1e5)
|
| 443 |
+
elif isinstance(target_ratio, tuple):
|
| 444 |
+
target_ratio = (target_ratio[0], target_ratio[0] / 1e5) \
|
| 445 |
+
if len(target_ratio) == 1 else target_ratio
|
| 446 |
+
else:
|
| 447 |
+
raise ValueError('target_ratio should be either float '
|
| 448 |
+
f'or tuple, got {type(target_ratio)}')
|
| 449 |
+
|
| 450 |
+
assert len(target_ratio) == 2, \
|
| 451 |
+
'"target_ratio" must be list or tuple of two floats'
|
| 452 |
+
assert 0 <= step_ratio_up < 1.0, \
|
| 453 |
+
'"step_ratio_up" must be in range [0,1)'
|
| 454 |
+
|
| 455 |
+
self.target_ratio = target_ratio
|
| 456 |
+
self.cyclic_times = cyclic_times
|
| 457 |
+
self.step_ratio_up = step_ratio_up
|
| 458 |
+
self.lr_phases = [] # init lr_phases
|
| 459 |
+
# validate anneal_strategy
|
| 460 |
+
if anneal_strategy not in ['cos', 'linear']:
|
| 461 |
+
raise ValueError('anneal_strategy must be one of "cos" or '
|
| 462 |
+
f'"linear", instead got {anneal_strategy}')
|
| 463 |
+
elif anneal_strategy == 'cos':
|
| 464 |
+
self.anneal_func = annealing_cos
|
| 465 |
+
elif anneal_strategy == 'linear':
|
| 466 |
+
self.anneal_func = annealing_linear
|
| 467 |
+
|
| 468 |
+
assert not by_epoch, \
|
| 469 |
+
'currently only support "by_epoch" = False'
|
| 470 |
+
super(CyclicLrUpdaterHook, self).__init__(by_epoch, **kwargs)
|
| 471 |
+
|
| 472 |
+
def before_run(self, runner):
|
| 473 |
+
super(CyclicLrUpdaterHook, self).before_run(runner)
|
| 474 |
+
# initiate lr_phases
|
| 475 |
+
# total lr_phases are separated as up and down
|
| 476 |
+
max_iter_per_phase = runner.max_iters // self.cyclic_times
|
| 477 |
+
iter_up_phase = int(self.step_ratio_up * max_iter_per_phase)
|
| 478 |
+
self.lr_phases.append(
|
| 479 |
+
[0, iter_up_phase, max_iter_per_phase, 1, self.target_ratio[0]])
|
| 480 |
+
self.lr_phases.append([
|
| 481 |
+
iter_up_phase, max_iter_per_phase, max_iter_per_phase,
|
| 482 |
+
self.target_ratio[0], self.target_ratio[1]
|
| 483 |
+
])
|
| 484 |
+
|
| 485 |
+
def get_lr(self, runner, base_lr):
|
| 486 |
+
curr_iter = runner.iter
|
| 487 |
+
for (start_iter, end_iter, max_iter_per_phase, start_ratio,
|
| 488 |
+
end_ratio) in self.lr_phases:
|
| 489 |
+
curr_iter %= max_iter_per_phase
|
| 490 |
+
if start_iter <= curr_iter < end_iter:
|
| 491 |
+
progress = curr_iter - start_iter
|
| 492 |
+
return self.anneal_func(base_lr * start_ratio,
|
| 493 |
+
base_lr * end_ratio,
|
| 494 |
+
progress / (end_iter - start_iter))
|
| 495 |
+
|
| 496 |
+
|
| 497 |
+
@HOOKS.register_module()
|
| 498 |
+
class OneCycleLrUpdaterHook(LrUpdaterHook):
|
| 499 |
+
"""One Cycle LR Scheduler.
|
| 500 |
+
|
| 501 |
+
The 1cycle learning rate policy changes the learning rate after every
|
| 502 |
+
batch. The one cycle learning rate policy is described in
|
| 503 |
+
https://arxiv.org/pdf/1708.07120.pdf
|
| 504 |
+
|
| 505 |
+
Args:
|
| 506 |
+
max_lr (float or list): Upper learning rate boundaries in the cycle
|
| 507 |
+
for each parameter group.
|
| 508 |
+
total_steps (int, optional): The total number of steps in the cycle.
|
| 509 |
+
Note that if a value is not provided here, it will be the max_iter
|
| 510 |
+
of runner. Default: None.
|
| 511 |
+
pct_start (float): The percentage of the cycle (in number of steps)
|
| 512 |
+
spent increasing the learning rate.
|
| 513 |
+
Default: 0.3
|
| 514 |
+
anneal_strategy (str): {'cos', 'linear'}
|
| 515 |
+
Specifies the annealing strategy: 'cos' for cosine annealing,
|
| 516 |
+
'linear' for linear annealing.
|
| 517 |
+
Default: 'cos'
|
| 518 |
+
div_factor (float): Determines the initial learning rate via
|
| 519 |
+
initial_lr = max_lr/div_factor
|
| 520 |
+
Default: 25
|
| 521 |
+
final_div_factor (float): Determines the minimum learning rate via
|
| 522 |
+
min_lr = initial_lr/final_div_factor
|
| 523 |
+
Default: 1e4
|
| 524 |
+
three_phase (bool): If three_phase is True, use a third phase of the
|
| 525 |
+
schedule to annihilate the learning rate according to
|
| 526 |
+
final_div_factor instead of modifying the second phase (the first
|
| 527 |
+
two phases will be symmetrical about the step indicated by
|
| 528 |
+
pct_start).
|
| 529 |
+
Default: False
|
| 530 |
+
"""
|
| 531 |
+
|
| 532 |
+
def __init__(self,
|
| 533 |
+
max_lr,
|
| 534 |
+
total_steps=None,
|
| 535 |
+
pct_start=0.3,
|
| 536 |
+
anneal_strategy='cos',
|
| 537 |
+
div_factor=25,
|
| 538 |
+
final_div_factor=1e4,
|
| 539 |
+
three_phase=False,
|
| 540 |
+
**kwargs):
|
| 541 |
+
# validate by_epoch, currently only support by_epoch = False
|
| 542 |
+
if 'by_epoch' not in kwargs:
|
| 543 |
+
kwargs['by_epoch'] = False
|
| 544 |
+
else:
|
| 545 |
+
assert not kwargs['by_epoch'], \
|
| 546 |
+
'currently only support "by_epoch" = False'
|
| 547 |
+
if not isinstance(max_lr, (numbers.Number, list, dict)):
|
| 548 |
+
raise ValueError('the type of max_lr must be the one of list or '
|
| 549 |
+
f'dict, but got {type(max_lr)}')
|
| 550 |
+
self._max_lr = max_lr
|
| 551 |
+
if total_steps is not None:
|
| 552 |
+
if not isinstance(total_steps, int):
|
| 553 |
+
raise ValueError('the type of total_steps must be int, but'
|
| 554 |
+
f'got {type(total_steps)}')
|
| 555 |
+
self.total_steps = total_steps
|
| 556 |
+
# validate pct_start
|
| 557 |
+
if pct_start < 0 or pct_start > 1 or not isinstance(pct_start, float):
|
| 558 |
+
raise ValueError('expected float between 0 and 1 pct_start, but '
|
| 559 |
+
f'got {pct_start}')
|
| 560 |
+
self.pct_start = pct_start
|
| 561 |
+
# validate anneal_strategy
|
| 562 |
+
if anneal_strategy not in ['cos', 'linear']:
|
| 563 |
+
raise ValueError('anneal_strategy must be one of "cos" or '
|
| 564 |
+
f'"linear", instead got {anneal_strategy}')
|
| 565 |
+
elif anneal_strategy == 'cos':
|
| 566 |
+
self.anneal_func = annealing_cos
|
| 567 |
+
elif anneal_strategy == 'linear':
|
| 568 |
+
self.anneal_func = annealing_linear
|
| 569 |
+
self.div_factor = div_factor
|
| 570 |
+
self.final_div_factor = final_div_factor
|
| 571 |
+
self.three_phase = three_phase
|
| 572 |
+
self.lr_phases = [] # init lr_phases
|
| 573 |
+
super(OneCycleLrUpdaterHook, self).__init__(**kwargs)
|
| 574 |
+
|
| 575 |
+
def before_run(self, runner):
|
| 576 |
+
if hasattr(self, 'total_steps'):
|
| 577 |
+
total_steps = self.total_steps
|
| 578 |
+
else:
|
| 579 |
+
total_steps = runner.max_iters
|
| 580 |
+
if total_steps < runner.max_iters:
|
| 581 |
+
raise ValueError(
|
| 582 |
+
'The total steps must be greater than or equal to max '
|
| 583 |
+
f'iterations {runner.max_iters} of runner, but total steps '
|
| 584 |
+
f'is {total_steps}.')
|
| 585 |
+
|
| 586 |
+
if isinstance(runner.optimizer, dict):
|
| 587 |
+
self.base_lr = {}
|
| 588 |
+
for k, optim in runner.optimizer.items():
|
| 589 |
+
_max_lr = format_param(k, optim, self._max_lr)
|
| 590 |
+
self.base_lr[k] = [lr / self.div_factor for lr in _max_lr]
|
| 591 |
+
for group, lr in zip(optim.param_groups, self.base_lr[k]):
|
| 592 |
+
group.setdefault('initial_lr', lr)
|
| 593 |
+
else:
|
| 594 |
+
k = type(runner.optimizer).__name__
|
| 595 |
+
_max_lr = format_param(k, runner.optimizer, self._max_lr)
|
| 596 |
+
self.base_lr = [lr / self.div_factor for lr in _max_lr]
|
| 597 |
+
for group, lr in zip(runner.optimizer.param_groups, self.base_lr):
|
| 598 |
+
group.setdefault('initial_lr', lr)
|
| 599 |
+
|
| 600 |
+
if self.three_phase:
|
| 601 |
+
self.lr_phases.append(
|
| 602 |
+
[float(self.pct_start * total_steps) - 1, 1, self.div_factor])
|
| 603 |
+
self.lr_phases.append([
|
| 604 |
+
float(2 * self.pct_start * total_steps) - 2, self.div_factor, 1
|
| 605 |
+
])
|
| 606 |
+
self.lr_phases.append(
|
| 607 |
+
[total_steps - 1, 1, 1 / self.final_div_factor])
|
| 608 |
+
else:
|
| 609 |
+
self.lr_phases.append(
|
| 610 |
+
[float(self.pct_start * total_steps) - 1, 1, self.div_factor])
|
| 611 |
+
self.lr_phases.append(
|
| 612 |
+
[total_steps - 1, self.div_factor, 1 / self.final_div_factor])
|
| 613 |
+
|
| 614 |
+
def get_lr(self, runner, base_lr):
|
| 615 |
+
curr_iter = runner.iter
|
| 616 |
+
start_iter = 0
|
| 617 |
+
for i, (end_iter, start_lr, end_lr) in enumerate(self.lr_phases):
|
| 618 |
+
if curr_iter <= end_iter:
|
| 619 |
+
pct = (curr_iter - start_iter) / (end_iter - start_iter)
|
| 620 |
+
lr = self.anneal_func(base_lr * start_lr, base_lr * end_lr,
|
| 621 |
+
pct)
|
| 622 |
+
break
|
| 623 |
+
start_iter = end_iter
|
| 624 |
+
return lr
|
| 625 |
+
|
| 626 |
+
|
| 627 |
+
def annealing_cos(start, end, factor, weight=1):
|
| 628 |
+
"""Calculate annealing cos learning rate.
|
| 629 |
+
|
| 630 |
+
Cosine anneal from `weight * start + (1 - weight) * end` to `end` as
|
| 631 |
+
percentage goes from 0.0 to 1.0.
|
| 632 |
+
|
| 633 |
+
Args:
|
| 634 |
+
start (float): The starting learning rate of the cosine annealing.
|
| 635 |
+
end (float): The ending learing rate of the cosine annealing.
|
| 636 |
+
factor (float): The coefficient of `pi` when calculating the current
|
| 637 |
+
percentage. Range from 0.0 to 1.0.
|
| 638 |
+
weight (float, optional): The combination factor of `start` and `end`
|
| 639 |
+
when calculating the actual starting learning rate. Default to 1.
|
| 640 |
+
"""
|
| 641 |
+
cos_out = cos(pi * factor) + 1
|
| 642 |
+
return end + 0.5 * weight * (start - end) * cos_out
|
| 643 |
+
|
| 644 |
+
|
| 645 |
+
def annealing_linear(start, end, factor):
|
| 646 |
+
"""Calculate annealing linear learning rate.
|
| 647 |
+
|
| 648 |
+
Linear anneal from `start` to `end` as percentage goes from 0.0 to 1.0.
|
| 649 |
+
|
| 650 |
+
Args:
|
| 651 |
+
start (float): The starting learning rate of the linear annealing.
|
| 652 |
+
end (float): The ending learing rate of the linear annealing.
|
| 653 |
+
factor (float): The coefficient of `pi` when calculating the current
|
| 654 |
+
percentage. Range from 0.0 to 1.0.
|
| 655 |
+
"""
|
| 656 |
+
return start + (end - start) * factor
|
| 657 |
+
|
| 658 |
+
|
| 659 |
+
def format_param(name, optim, param):
|
| 660 |
+
if isinstance(param, numbers.Number):
|
| 661 |
+
return [param] * len(optim.param_groups)
|
| 662 |
+
elif isinstance(param, (list, tuple)): # multi param groups
|
| 663 |
+
if len(param) != len(optim.param_groups):
|
| 664 |
+
raise ValueError(f'expected {len(optim.param_groups)} '
|
| 665 |
+
f'values for {name}, got {len(param)}')
|
| 666 |
+
return param
|
| 667 |
+
else: # multi optimizers
|
| 668 |
+
if name not in param:
|
| 669 |
+
raise KeyError(f'{name} is not found in {param.keys()}')
|
| 670 |
+
return param[name]
|
FRESCO/src/ControlNet/annotator/uniformer/mmcv/runner/hooks/memory.py
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
import torch
|
| 3 |
+
|
| 4 |
+
from .hook import HOOKS, Hook
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
@HOOKS.register_module()
|
| 8 |
+
class EmptyCacheHook(Hook):
|
| 9 |
+
|
| 10 |
+
def __init__(self, before_epoch=False, after_epoch=True, after_iter=False):
|
| 11 |
+
self._before_epoch = before_epoch
|
| 12 |
+
self._after_epoch = after_epoch
|
| 13 |
+
self._after_iter = after_iter
|
| 14 |
+
|
| 15 |
+
def after_iter(self, runner):
|
| 16 |
+
if self._after_iter:
|
| 17 |
+
torch.cuda.empty_cache()
|
| 18 |
+
|
| 19 |
+
def before_epoch(self, runner):
|
| 20 |
+
if self._before_epoch:
|
| 21 |
+
torch.cuda.empty_cache()
|
| 22 |
+
|
| 23 |
+
def after_epoch(self, runner):
|
| 24 |
+
if self._after_epoch:
|
| 25 |
+
torch.cuda.empty_cache()
|
FRESCO/src/ControlNet/annotator/uniformer/mmcv/runner/hooks/momentum_updater.py
ADDED
|
@@ -0,0 +1,493 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
import annotator.uniformer.mmcv as mmcv
|
| 3 |
+
from .hook import HOOKS, Hook
|
| 4 |
+
from .lr_updater import annealing_cos, annealing_linear, format_param
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
class MomentumUpdaterHook(Hook):
|
| 8 |
+
|
| 9 |
+
def __init__(self,
|
| 10 |
+
by_epoch=True,
|
| 11 |
+
warmup=None,
|
| 12 |
+
warmup_iters=0,
|
| 13 |
+
warmup_ratio=0.9):
|
| 14 |
+
# validate the "warmup" argument
|
| 15 |
+
if warmup is not None:
|
| 16 |
+
if warmup not in ['constant', 'linear', 'exp']:
|
| 17 |
+
raise ValueError(
|
| 18 |
+
f'"{warmup}" is not a supported type for warming up, valid'
|
| 19 |
+
' types are "constant" and "linear"')
|
| 20 |
+
if warmup is not None:
|
| 21 |
+
assert warmup_iters > 0, \
|
| 22 |
+
'"warmup_iters" must be a positive integer'
|
| 23 |
+
assert 0 < warmup_ratio <= 1.0, \
|
| 24 |
+
'"warmup_momentum" must be in range (0,1]'
|
| 25 |
+
|
| 26 |
+
self.by_epoch = by_epoch
|
| 27 |
+
self.warmup = warmup
|
| 28 |
+
self.warmup_iters = warmup_iters
|
| 29 |
+
self.warmup_ratio = warmup_ratio
|
| 30 |
+
|
| 31 |
+
self.base_momentum = [] # initial momentum for all param groups
|
| 32 |
+
self.regular_momentum = [
|
| 33 |
+
] # expected momentum if no warming up is performed
|
| 34 |
+
|
| 35 |
+
def _set_momentum(self, runner, momentum_groups):
|
| 36 |
+
if isinstance(runner.optimizer, dict):
|
| 37 |
+
for k, optim in runner.optimizer.items():
|
| 38 |
+
for param_group, mom in zip(optim.param_groups,
|
| 39 |
+
momentum_groups[k]):
|
| 40 |
+
if 'momentum' in param_group.keys():
|
| 41 |
+
param_group['momentum'] = mom
|
| 42 |
+
elif 'betas' in param_group.keys():
|
| 43 |
+
param_group['betas'] = (mom, param_group['betas'][1])
|
| 44 |
+
else:
|
| 45 |
+
for param_group, mom in zip(runner.optimizer.param_groups,
|
| 46 |
+
momentum_groups):
|
| 47 |
+
if 'momentum' in param_group.keys():
|
| 48 |
+
param_group['momentum'] = mom
|
| 49 |
+
elif 'betas' in param_group.keys():
|
| 50 |
+
param_group['betas'] = (mom, param_group['betas'][1])
|
| 51 |
+
|
| 52 |
+
def get_momentum(self, runner, base_momentum):
|
| 53 |
+
raise NotImplementedError
|
| 54 |
+
|
| 55 |
+
def get_regular_momentum(self, runner):
|
| 56 |
+
if isinstance(runner.optimizer, dict):
|
| 57 |
+
momentum_groups = {}
|
| 58 |
+
for k in runner.optimizer.keys():
|
| 59 |
+
_momentum_group = [
|
| 60 |
+
self.get_momentum(runner, _base_momentum)
|
| 61 |
+
for _base_momentum in self.base_momentum[k]
|
| 62 |
+
]
|
| 63 |
+
momentum_groups.update({k: _momentum_group})
|
| 64 |
+
return momentum_groups
|
| 65 |
+
else:
|
| 66 |
+
return [
|
| 67 |
+
self.get_momentum(runner, _base_momentum)
|
| 68 |
+
for _base_momentum in self.base_momentum
|
| 69 |
+
]
|
| 70 |
+
|
| 71 |
+
def get_warmup_momentum(self, cur_iters):
|
| 72 |
+
|
| 73 |
+
def _get_warmup_momentum(cur_iters, regular_momentum):
|
| 74 |
+
if self.warmup == 'constant':
|
| 75 |
+
warmup_momentum = [
|
| 76 |
+
_momentum / self.warmup_ratio
|
| 77 |
+
for _momentum in self.regular_momentum
|
| 78 |
+
]
|
| 79 |
+
elif self.warmup == 'linear':
|
| 80 |
+
k = (1 - cur_iters / self.warmup_iters) * (1 -
|
| 81 |
+
self.warmup_ratio)
|
| 82 |
+
warmup_momentum = [
|
| 83 |
+
_momentum / (1 - k) for _momentum in self.regular_mom
|
| 84 |
+
]
|
| 85 |
+
elif self.warmup == 'exp':
|
| 86 |
+
k = self.warmup_ratio**(1 - cur_iters / self.warmup_iters)
|
| 87 |
+
warmup_momentum = [
|
| 88 |
+
_momentum / k for _momentum in self.regular_mom
|
| 89 |
+
]
|
| 90 |
+
return warmup_momentum
|
| 91 |
+
|
| 92 |
+
if isinstance(self.regular_momentum, dict):
|
| 93 |
+
momentum_groups = {}
|
| 94 |
+
for key, regular_momentum in self.regular_momentum.items():
|
| 95 |
+
momentum_groups[key] = _get_warmup_momentum(
|
| 96 |
+
cur_iters, regular_momentum)
|
| 97 |
+
return momentum_groups
|
| 98 |
+
else:
|
| 99 |
+
return _get_warmup_momentum(cur_iters, self.regular_momentum)
|
| 100 |
+
|
| 101 |
+
def before_run(self, runner):
|
| 102 |
+
# NOTE: when resuming from a checkpoint,
|
| 103 |
+
# if 'initial_momentum' is not saved,
|
| 104 |
+
# it will be set according to the optimizer params
|
| 105 |
+
if isinstance(runner.optimizer, dict):
|
| 106 |
+
self.base_momentum = {}
|
| 107 |
+
for k, optim in runner.optimizer.items():
|
| 108 |
+
for group in optim.param_groups:
|
| 109 |
+
if 'momentum' in group.keys():
|
| 110 |
+
group.setdefault('initial_momentum', group['momentum'])
|
| 111 |
+
else:
|
| 112 |
+
group.setdefault('initial_momentum', group['betas'][0])
|
| 113 |
+
_base_momentum = [
|
| 114 |
+
group['initial_momentum'] for group in optim.param_groups
|
| 115 |
+
]
|
| 116 |
+
self.base_momentum.update({k: _base_momentum})
|
| 117 |
+
else:
|
| 118 |
+
for group in runner.optimizer.param_groups:
|
| 119 |
+
if 'momentum' in group.keys():
|
| 120 |
+
group.setdefault('initial_momentum', group['momentum'])
|
| 121 |
+
else:
|
| 122 |
+
group.setdefault('initial_momentum', group['betas'][0])
|
| 123 |
+
self.base_momentum = [
|
| 124 |
+
group['initial_momentum']
|
| 125 |
+
for group in runner.optimizer.param_groups
|
| 126 |
+
]
|
| 127 |
+
|
| 128 |
+
def before_train_epoch(self, runner):
|
| 129 |
+
if not self.by_epoch:
|
| 130 |
+
return
|
| 131 |
+
self.regular_mom = self.get_regular_momentum(runner)
|
| 132 |
+
self._set_momentum(runner, self.regular_mom)
|
| 133 |
+
|
| 134 |
+
def before_train_iter(self, runner):
|
| 135 |
+
cur_iter = runner.iter
|
| 136 |
+
if not self.by_epoch:
|
| 137 |
+
self.regular_mom = self.get_regular_momentum(runner)
|
| 138 |
+
if self.warmup is None or cur_iter >= self.warmup_iters:
|
| 139 |
+
self._set_momentum(runner, self.regular_mom)
|
| 140 |
+
else:
|
| 141 |
+
warmup_momentum = self.get_warmup_momentum(cur_iter)
|
| 142 |
+
self._set_momentum(runner, warmup_momentum)
|
| 143 |
+
elif self.by_epoch:
|
| 144 |
+
if self.warmup is None or cur_iter > self.warmup_iters:
|
| 145 |
+
return
|
| 146 |
+
elif cur_iter == self.warmup_iters:
|
| 147 |
+
self._set_momentum(runner, self.regular_mom)
|
| 148 |
+
else:
|
| 149 |
+
warmup_momentum = self.get_warmup_momentum(cur_iter)
|
| 150 |
+
self._set_momentum(runner, warmup_momentum)
|
| 151 |
+
|
| 152 |
+
|
| 153 |
+
@HOOKS.register_module()
|
| 154 |
+
class StepMomentumUpdaterHook(MomentumUpdaterHook):
|
| 155 |
+
"""Step momentum scheduler with min value clipping.
|
| 156 |
+
|
| 157 |
+
Args:
|
| 158 |
+
step (int | list[int]): Step to decay the momentum. If an int value is
|
| 159 |
+
given, regard it as the decay interval. If a list is given, decay
|
| 160 |
+
momentum at these steps.
|
| 161 |
+
gamma (float, optional): Decay momentum ratio. Default: 0.5.
|
| 162 |
+
min_momentum (float, optional): Minimum momentum value to keep. If
|
| 163 |
+
momentum after decay is lower than this value, it will be clipped
|
| 164 |
+
accordingly. If None is given, we don't perform lr clipping.
|
| 165 |
+
Default: None.
|
| 166 |
+
"""
|
| 167 |
+
|
| 168 |
+
def __init__(self, step, gamma=0.5, min_momentum=None, **kwargs):
|
| 169 |
+
if isinstance(step, list):
|
| 170 |
+
assert mmcv.is_list_of(step, int)
|
| 171 |
+
assert all([s > 0 for s in step])
|
| 172 |
+
elif isinstance(step, int):
|
| 173 |
+
assert step > 0
|
| 174 |
+
else:
|
| 175 |
+
raise TypeError('"step" must be a list or integer')
|
| 176 |
+
self.step = step
|
| 177 |
+
self.gamma = gamma
|
| 178 |
+
self.min_momentum = min_momentum
|
| 179 |
+
super(StepMomentumUpdaterHook, self).__init__(**kwargs)
|
| 180 |
+
|
| 181 |
+
def get_momentum(self, runner, base_momentum):
|
| 182 |
+
progress = runner.epoch if self.by_epoch else runner.iter
|
| 183 |
+
|
| 184 |
+
# calculate exponential term
|
| 185 |
+
if isinstance(self.step, int):
|
| 186 |
+
exp = progress // self.step
|
| 187 |
+
else:
|
| 188 |
+
exp = len(self.step)
|
| 189 |
+
for i, s in enumerate(self.step):
|
| 190 |
+
if progress < s:
|
| 191 |
+
exp = i
|
| 192 |
+
break
|
| 193 |
+
|
| 194 |
+
momentum = base_momentum * (self.gamma**exp)
|
| 195 |
+
if self.min_momentum is not None:
|
| 196 |
+
# clip to a minimum value
|
| 197 |
+
momentum = max(momentum, self.min_momentum)
|
| 198 |
+
return momentum
|
| 199 |
+
|
| 200 |
+
|
| 201 |
+
@HOOKS.register_module()
|
| 202 |
+
class CosineAnnealingMomentumUpdaterHook(MomentumUpdaterHook):
|
| 203 |
+
|
| 204 |
+
def __init__(self, min_momentum=None, min_momentum_ratio=None, **kwargs):
|
| 205 |
+
assert (min_momentum is None) ^ (min_momentum_ratio is None)
|
| 206 |
+
self.min_momentum = min_momentum
|
| 207 |
+
self.min_momentum_ratio = min_momentum_ratio
|
| 208 |
+
super(CosineAnnealingMomentumUpdaterHook, self).__init__(**kwargs)
|
| 209 |
+
|
| 210 |
+
def get_momentum(self, runner, base_momentum):
|
| 211 |
+
if self.by_epoch:
|
| 212 |
+
progress = runner.epoch
|
| 213 |
+
max_progress = runner.max_epochs
|
| 214 |
+
else:
|
| 215 |
+
progress = runner.iter
|
| 216 |
+
max_progress = runner.max_iters
|
| 217 |
+
if self.min_momentum_ratio is not None:
|
| 218 |
+
target_momentum = base_momentum * self.min_momentum_ratio
|
| 219 |
+
else:
|
| 220 |
+
target_momentum = self.min_momentum
|
| 221 |
+
return annealing_cos(base_momentum, target_momentum,
|
| 222 |
+
progress / max_progress)
|
| 223 |
+
|
| 224 |
+
|
| 225 |
+
@HOOKS.register_module()
|
| 226 |
+
class CyclicMomentumUpdaterHook(MomentumUpdaterHook):
|
| 227 |
+
"""Cyclic momentum Scheduler.
|
| 228 |
+
|
| 229 |
+
Implement the cyclical momentum scheduler policy described in
|
| 230 |
+
https://arxiv.org/pdf/1708.07120.pdf
|
| 231 |
+
|
| 232 |
+
This momentum scheduler usually used together with the CyclicLRUpdater
|
| 233 |
+
to improve the performance in the 3D detection area.
|
| 234 |
+
|
| 235 |
+
Attributes:
|
| 236 |
+
target_ratio (tuple[float]): Relative ratio of the lowest momentum and
|
| 237 |
+
the highest momentum to the initial momentum.
|
| 238 |
+
cyclic_times (int): Number of cycles during training
|
| 239 |
+
step_ratio_up (float): The ratio of the increasing process of momentum
|
| 240 |
+
in the total cycle.
|
| 241 |
+
by_epoch (bool): Whether to update momentum by epoch.
|
| 242 |
+
"""
|
| 243 |
+
|
| 244 |
+
def __init__(self,
|
| 245 |
+
by_epoch=False,
|
| 246 |
+
target_ratio=(0.85 / 0.95, 1),
|
| 247 |
+
cyclic_times=1,
|
| 248 |
+
step_ratio_up=0.4,
|
| 249 |
+
**kwargs):
|
| 250 |
+
if isinstance(target_ratio, float):
|
| 251 |
+
target_ratio = (target_ratio, target_ratio / 1e5)
|
| 252 |
+
elif isinstance(target_ratio, tuple):
|
| 253 |
+
target_ratio = (target_ratio[0], target_ratio[0] / 1e5) \
|
| 254 |
+
if len(target_ratio) == 1 else target_ratio
|
| 255 |
+
else:
|
| 256 |
+
raise ValueError('target_ratio should be either float '
|
| 257 |
+
f'or tuple, got {type(target_ratio)}')
|
| 258 |
+
|
| 259 |
+
assert len(target_ratio) == 2, \
|
| 260 |
+
'"target_ratio" must be list or tuple of two floats'
|
| 261 |
+
assert 0 <= step_ratio_up < 1.0, \
|
| 262 |
+
'"step_ratio_up" must be in range [0,1)'
|
| 263 |
+
|
| 264 |
+
self.target_ratio = target_ratio
|
| 265 |
+
self.cyclic_times = cyclic_times
|
| 266 |
+
self.step_ratio_up = step_ratio_up
|
| 267 |
+
self.momentum_phases = [] # init momentum_phases
|
| 268 |
+
# currently only support by_epoch=False
|
| 269 |
+
assert not by_epoch, \
|
| 270 |
+
'currently only support "by_epoch" = False'
|
| 271 |
+
super(CyclicMomentumUpdaterHook, self).__init__(by_epoch, **kwargs)
|
| 272 |
+
|
| 273 |
+
def before_run(self, runner):
|
| 274 |
+
super(CyclicMomentumUpdaterHook, self).before_run(runner)
|
| 275 |
+
# initiate momentum_phases
|
| 276 |
+
# total momentum_phases are separated as up and down
|
| 277 |
+
max_iter_per_phase = runner.max_iters // self.cyclic_times
|
| 278 |
+
iter_up_phase = int(self.step_ratio_up * max_iter_per_phase)
|
| 279 |
+
self.momentum_phases.append(
|
| 280 |
+
[0, iter_up_phase, max_iter_per_phase, 1, self.target_ratio[0]])
|
| 281 |
+
self.momentum_phases.append([
|
| 282 |
+
iter_up_phase, max_iter_per_phase, max_iter_per_phase,
|
| 283 |
+
self.target_ratio[0], self.target_ratio[1]
|
| 284 |
+
])
|
| 285 |
+
|
| 286 |
+
def get_momentum(self, runner, base_momentum):
|
| 287 |
+
curr_iter = runner.iter
|
| 288 |
+
for (start_iter, end_iter, max_iter_per_phase, start_ratio,
|
| 289 |
+
end_ratio) in self.momentum_phases:
|
| 290 |
+
curr_iter %= max_iter_per_phase
|
| 291 |
+
if start_iter <= curr_iter < end_iter:
|
| 292 |
+
progress = curr_iter - start_iter
|
| 293 |
+
return annealing_cos(base_momentum * start_ratio,
|
| 294 |
+
base_momentum * end_ratio,
|
| 295 |
+
progress / (end_iter - start_iter))
|
| 296 |
+
|
| 297 |
+
|
| 298 |
+
@HOOKS.register_module()
|
| 299 |
+
class OneCycleMomentumUpdaterHook(MomentumUpdaterHook):
|
| 300 |
+
"""OneCycle momentum Scheduler.
|
| 301 |
+
|
| 302 |
+
This momentum scheduler usually used together with the OneCycleLrUpdater
|
| 303 |
+
to improve the performance.
|
| 304 |
+
|
| 305 |
+
Args:
|
| 306 |
+
base_momentum (float or list): Lower momentum boundaries in the cycle
|
| 307 |
+
for each parameter group. Note that momentum is cycled inversely
|
| 308 |
+
to learning rate; at the peak of a cycle, momentum is
|
| 309 |
+
'base_momentum' and learning rate is 'max_lr'.
|
| 310 |
+
Default: 0.85
|
| 311 |
+
max_momentum (float or list): Upper momentum boundaries in the cycle
|
| 312 |
+
for each parameter group. Functionally,
|
| 313 |
+
it defines the cycle amplitude (max_momentum - base_momentum).
|
| 314 |
+
Note that momentum is cycled inversely
|
| 315 |
+
to learning rate; at the start of a cycle, momentum is
|
| 316 |
+
'max_momentum' and learning rate is 'base_lr'
|
| 317 |
+
Default: 0.95
|
| 318 |
+
pct_start (float): The percentage of the cycle (in number of steps)
|
| 319 |
+
spent increasing the learning rate.
|
| 320 |
+
Default: 0.3
|
| 321 |
+
anneal_strategy (str): {'cos', 'linear'}
|
| 322 |
+
Specifies the annealing strategy: 'cos' for cosine annealing,
|
| 323 |
+
'linear' for linear annealing.
|
| 324 |
+
Default: 'cos'
|
| 325 |
+
three_phase (bool): If three_phase is True, use a third phase of the
|
| 326 |
+
schedule to annihilate the learning rate according to
|
| 327 |
+
final_div_factor instead of modifying the second phase (the first
|
| 328 |
+
two phases will be symmetrical about the step indicated by
|
| 329 |
+
pct_start).
|
| 330 |
+
Default: False
|
| 331 |
+
"""
|
| 332 |
+
|
| 333 |
+
def __init__(self,
|
| 334 |
+
base_momentum=0.85,
|
| 335 |
+
max_momentum=0.95,
|
| 336 |
+
pct_start=0.3,
|
| 337 |
+
anneal_strategy='cos',
|
| 338 |
+
three_phase=False,
|
| 339 |
+
**kwargs):
|
| 340 |
+
# validate by_epoch, currently only support by_epoch=False
|
| 341 |
+
if 'by_epoch' not in kwargs:
|
| 342 |
+
kwargs['by_epoch'] = False
|
| 343 |
+
else:
|
| 344 |
+
assert not kwargs['by_epoch'], \
|
| 345 |
+
'currently only support "by_epoch" = False'
|
| 346 |
+
if not isinstance(base_momentum, (float, list, dict)):
|
| 347 |
+
raise ValueError('base_momentum must be the type among of float,'
|
| 348 |
+
'list or dict.')
|
| 349 |
+
self._base_momentum = base_momentum
|
| 350 |
+
if not isinstance(max_momentum, (float, list, dict)):
|
| 351 |
+
raise ValueError('max_momentum must be the type among of float,'
|
| 352 |
+
'list or dict.')
|
| 353 |
+
self._max_momentum = max_momentum
|
| 354 |
+
# validate pct_start
|
| 355 |
+
if pct_start < 0 or pct_start > 1 or not isinstance(pct_start, float):
|
| 356 |
+
raise ValueError('Expected float between 0 and 1 pct_start, but '
|
| 357 |
+
f'got {pct_start}')
|
| 358 |
+
self.pct_start = pct_start
|
| 359 |
+
# validate anneal_strategy
|
| 360 |
+
if anneal_strategy not in ['cos', 'linear']:
|
| 361 |
+
raise ValueError('anneal_strategy must by one of "cos" or '
|
| 362 |
+
f'"linear", instead got {anneal_strategy}')
|
| 363 |
+
elif anneal_strategy == 'cos':
|
| 364 |
+
self.anneal_func = annealing_cos
|
| 365 |
+
elif anneal_strategy == 'linear':
|
| 366 |
+
self.anneal_func = annealing_linear
|
| 367 |
+
self.three_phase = three_phase
|
| 368 |
+
self.momentum_phases = [] # init momentum_phases
|
| 369 |
+
super(OneCycleMomentumUpdaterHook, self).__init__(**kwargs)
|
| 370 |
+
|
| 371 |
+
def before_run(self, runner):
|
| 372 |
+
if isinstance(runner.optimizer, dict):
|
| 373 |
+
for k, optim in runner.optimizer.items():
|
| 374 |
+
if ('momentum' not in optim.defaults
|
| 375 |
+
and 'betas' not in optim.defaults):
|
| 376 |
+
raise ValueError('optimizer must support momentum with'
|
| 377 |
+
'option enabled')
|
| 378 |
+
self.use_beta1 = 'betas' in optim.defaults
|
| 379 |
+
_base_momentum = format_param(k, optim, self._base_momentum)
|
| 380 |
+
_max_momentum = format_param(k, optim, self._max_momentum)
|
| 381 |
+
for group, b_momentum, m_momentum in zip(
|
| 382 |
+
optim.param_groups, _base_momentum, _max_momentum):
|
| 383 |
+
if self.use_beta1:
|
| 384 |
+
_, beta2 = group['betas']
|
| 385 |
+
group['betas'] = (m_momentum, beta2)
|
| 386 |
+
else:
|
| 387 |
+
group['momentum'] = m_momentum
|
| 388 |
+
group['base_momentum'] = b_momentum
|
| 389 |
+
group['max_momentum'] = m_momentum
|
| 390 |
+
else:
|
| 391 |
+
optim = runner.optimizer
|
| 392 |
+
if ('momentum' not in optim.defaults
|
| 393 |
+
and 'betas' not in optim.defaults):
|
| 394 |
+
raise ValueError('optimizer must support momentum with'
|
| 395 |
+
'option enabled')
|
| 396 |
+
self.use_beta1 = 'betas' in optim.defaults
|
| 397 |
+
k = type(optim).__name__
|
| 398 |
+
_base_momentum = format_param(k, optim, self._base_momentum)
|
| 399 |
+
_max_momentum = format_param(k, optim, self._max_momentum)
|
| 400 |
+
for group, b_momentum, m_momentum in zip(optim.param_groups,
|
| 401 |
+
_base_momentum,
|
| 402 |
+
_max_momentum):
|
| 403 |
+
if self.use_beta1:
|
| 404 |
+
_, beta2 = group['betas']
|
| 405 |
+
group['betas'] = (m_momentum, beta2)
|
| 406 |
+
else:
|
| 407 |
+
group['momentum'] = m_momentum
|
| 408 |
+
group['base_momentum'] = b_momentum
|
| 409 |
+
group['max_momentum'] = m_momentum
|
| 410 |
+
|
| 411 |
+
if self.three_phase:
|
| 412 |
+
self.momentum_phases.append({
|
| 413 |
+
'end_iter':
|
| 414 |
+
float(self.pct_start * runner.max_iters) - 1,
|
| 415 |
+
'start_momentum':
|
| 416 |
+
'max_momentum',
|
| 417 |
+
'end_momentum':
|
| 418 |
+
'base_momentum'
|
| 419 |
+
})
|
| 420 |
+
self.momentum_phases.append({
|
| 421 |
+
'end_iter':
|
| 422 |
+
float(2 * self.pct_start * runner.max_iters) - 2,
|
| 423 |
+
'start_momentum':
|
| 424 |
+
'base_momentum',
|
| 425 |
+
'end_momentum':
|
| 426 |
+
'max_momentum'
|
| 427 |
+
})
|
| 428 |
+
self.momentum_phases.append({
|
| 429 |
+
'end_iter': runner.max_iters - 1,
|
| 430 |
+
'start_momentum': 'max_momentum',
|
| 431 |
+
'end_momentum': 'max_momentum'
|
| 432 |
+
})
|
| 433 |
+
else:
|
| 434 |
+
self.momentum_phases.append({
|
| 435 |
+
'end_iter':
|
| 436 |
+
float(self.pct_start * runner.max_iters) - 1,
|
| 437 |
+
'start_momentum':
|
| 438 |
+
'max_momentum',
|
| 439 |
+
'end_momentum':
|
| 440 |
+
'base_momentum'
|
| 441 |
+
})
|
| 442 |
+
self.momentum_phases.append({
|
| 443 |
+
'end_iter': runner.max_iters - 1,
|
| 444 |
+
'start_momentum': 'base_momentum',
|
| 445 |
+
'end_momentum': 'max_momentum'
|
| 446 |
+
})
|
| 447 |
+
|
| 448 |
+
def _set_momentum(self, runner, momentum_groups):
|
| 449 |
+
if isinstance(runner.optimizer, dict):
|
| 450 |
+
for k, optim in runner.optimizer.items():
|
| 451 |
+
for param_group, mom in zip(optim.param_groups,
|
| 452 |
+
momentum_groups[k]):
|
| 453 |
+
if 'momentum' in param_group.keys():
|
| 454 |
+
param_group['momentum'] = mom
|
| 455 |
+
elif 'betas' in param_group.keys():
|
| 456 |
+
param_group['betas'] = (mom, param_group['betas'][1])
|
| 457 |
+
else:
|
| 458 |
+
for param_group, mom in zip(runner.optimizer.param_groups,
|
| 459 |
+
momentum_groups):
|
| 460 |
+
if 'momentum' in param_group.keys():
|
| 461 |
+
param_group['momentum'] = mom
|
| 462 |
+
elif 'betas' in param_group.keys():
|
| 463 |
+
param_group['betas'] = (mom, param_group['betas'][1])
|
| 464 |
+
|
| 465 |
+
def get_momentum(self, runner, param_group):
|
| 466 |
+
curr_iter = runner.iter
|
| 467 |
+
start_iter = 0
|
| 468 |
+
for i, phase in enumerate(self.momentum_phases):
|
| 469 |
+
end_iter = phase['end_iter']
|
| 470 |
+
if curr_iter <= end_iter or i == len(self.momentum_phases) - 1:
|
| 471 |
+
pct = (curr_iter - start_iter) / (end_iter - start_iter)
|
| 472 |
+
momentum = self.anneal_func(
|
| 473 |
+
param_group[phase['start_momentum']],
|
| 474 |
+
param_group[phase['end_momentum']], pct)
|
| 475 |
+
break
|
| 476 |
+
start_iter = end_iter
|
| 477 |
+
return momentum
|
| 478 |
+
|
| 479 |
+
def get_regular_momentum(self, runner):
|
| 480 |
+
if isinstance(runner.optimizer, dict):
|
| 481 |
+
momentum_groups = {}
|
| 482 |
+
for k, optim in runner.optimizer.items():
|
| 483 |
+
_momentum_group = [
|
| 484 |
+
self.get_momentum(runner, param_group)
|
| 485 |
+
for param_group in optim.param_groups
|
| 486 |
+
]
|
| 487 |
+
momentum_groups.update({k: _momentum_group})
|
| 488 |
+
return momentum_groups
|
| 489 |
+
else:
|
| 490 |
+
momentum_groups = []
|
| 491 |
+
for param_group in runner.optimizer.param_groups:
|
| 492 |
+
momentum_groups.append(self.get_momentum(runner, param_group))
|
| 493 |
+
return momentum_groups
|
FRESCO/src/ControlNet/annotator/uniformer/mmcv/runner/hooks/optimizer.py
ADDED
|
@@ -0,0 +1,508 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
import copy
|
| 3 |
+
from collections import defaultdict
|
| 4 |
+
from itertools import chain
|
| 5 |
+
|
| 6 |
+
from torch.nn.utils import clip_grad
|
| 7 |
+
|
| 8 |
+
from annotator.uniformer.mmcv.utils import TORCH_VERSION, _BatchNorm, digit_version
|
| 9 |
+
from ..dist_utils import allreduce_grads
|
| 10 |
+
from ..fp16_utils import LossScaler, wrap_fp16_model
|
| 11 |
+
from .hook import HOOKS, Hook
|
| 12 |
+
|
| 13 |
+
try:
|
| 14 |
+
# If PyTorch version >= 1.6.0, torch.cuda.amp.GradScaler would be imported
|
| 15 |
+
# and used; otherwise, auto fp16 will adopt mmcv's implementation.
|
| 16 |
+
from torch.cuda.amp import GradScaler
|
| 17 |
+
except ImportError:
|
| 18 |
+
pass
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
@HOOKS.register_module()
|
| 22 |
+
class OptimizerHook(Hook):
|
| 23 |
+
|
| 24 |
+
def __init__(self, grad_clip=None):
|
| 25 |
+
self.grad_clip = grad_clip
|
| 26 |
+
|
| 27 |
+
def clip_grads(self, params):
|
| 28 |
+
params = list(
|
| 29 |
+
filter(lambda p: p.requires_grad and p.grad is not None, params))
|
| 30 |
+
if len(params) > 0:
|
| 31 |
+
return clip_grad.clip_grad_norm_(params, **self.grad_clip)
|
| 32 |
+
|
| 33 |
+
def after_train_iter(self, runner):
|
| 34 |
+
runner.optimizer.zero_grad()
|
| 35 |
+
runner.outputs['loss'].backward()
|
| 36 |
+
if self.grad_clip is not None:
|
| 37 |
+
grad_norm = self.clip_grads(runner.model.parameters())
|
| 38 |
+
if grad_norm is not None:
|
| 39 |
+
# Add grad norm to the logger
|
| 40 |
+
runner.log_buffer.update({'grad_norm': float(grad_norm)},
|
| 41 |
+
runner.outputs['num_samples'])
|
| 42 |
+
runner.optimizer.step()
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
@HOOKS.register_module()
|
| 46 |
+
class GradientCumulativeOptimizerHook(OptimizerHook):
|
| 47 |
+
"""Optimizer Hook implements multi-iters gradient cumulating.
|
| 48 |
+
|
| 49 |
+
Args:
|
| 50 |
+
cumulative_iters (int, optional): Num of gradient cumulative iters.
|
| 51 |
+
The optimizer will step every `cumulative_iters` iters.
|
| 52 |
+
Defaults to 1.
|
| 53 |
+
|
| 54 |
+
Examples:
|
| 55 |
+
>>> # Use cumulative_iters to simulate a large batch size
|
| 56 |
+
>>> # It is helpful when the hardware cannot handle a large batch size.
|
| 57 |
+
>>> loader = DataLoader(data, batch_size=64)
|
| 58 |
+
>>> optim_hook = GradientCumulativeOptimizerHook(cumulative_iters=4)
|
| 59 |
+
>>> # almost equals to
|
| 60 |
+
>>> loader = DataLoader(data, batch_size=256)
|
| 61 |
+
>>> optim_hook = OptimizerHook()
|
| 62 |
+
"""
|
| 63 |
+
|
| 64 |
+
def __init__(self, cumulative_iters=1, **kwargs):
|
| 65 |
+
super(GradientCumulativeOptimizerHook, self).__init__(**kwargs)
|
| 66 |
+
|
| 67 |
+
assert isinstance(cumulative_iters, int) and cumulative_iters > 0, \
|
| 68 |
+
f'cumulative_iters only accepts positive int, but got ' \
|
| 69 |
+
f'{type(cumulative_iters)} instead.'
|
| 70 |
+
|
| 71 |
+
self.cumulative_iters = cumulative_iters
|
| 72 |
+
self.divisible_iters = 0
|
| 73 |
+
self.remainder_iters = 0
|
| 74 |
+
self.initialized = False
|
| 75 |
+
|
| 76 |
+
def has_batch_norm(self, module):
|
| 77 |
+
if isinstance(module, _BatchNorm):
|
| 78 |
+
return True
|
| 79 |
+
for m in module.children():
|
| 80 |
+
if self.has_batch_norm(m):
|
| 81 |
+
return True
|
| 82 |
+
return False
|
| 83 |
+
|
| 84 |
+
def _init(self, runner):
|
| 85 |
+
if runner.iter % self.cumulative_iters != 0:
|
| 86 |
+
runner.logger.warning(
|
| 87 |
+
'Resume iter number is not divisible by cumulative_iters in '
|
| 88 |
+
'GradientCumulativeOptimizerHook, which means the gradient of '
|
| 89 |
+
'some iters is lost and the result may be influenced slightly.'
|
| 90 |
+
)
|
| 91 |
+
|
| 92 |
+
if self.has_batch_norm(runner.model) and self.cumulative_iters > 1:
|
| 93 |
+
runner.logger.warning(
|
| 94 |
+
'GradientCumulativeOptimizerHook may slightly decrease '
|
| 95 |
+
'performance if the model has BatchNorm layers.')
|
| 96 |
+
|
| 97 |
+
residual_iters = runner.max_iters - runner.iter
|
| 98 |
+
|
| 99 |
+
self.divisible_iters = (
|
| 100 |
+
residual_iters // self.cumulative_iters * self.cumulative_iters)
|
| 101 |
+
self.remainder_iters = residual_iters - self.divisible_iters
|
| 102 |
+
|
| 103 |
+
self.initialized = True
|
| 104 |
+
|
| 105 |
+
def after_train_iter(self, runner):
|
| 106 |
+
if not self.initialized:
|
| 107 |
+
self._init(runner)
|
| 108 |
+
|
| 109 |
+
if runner.iter < self.divisible_iters:
|
| 110 |
+
loss_factor = self.cumulative_iters
|
| 111 |
+
else:
|
| 112 |
+
loss_factor = self.remainder_iters
|
| 113 |
+
loss = runner.outputs['loss']
|
| 114 |
+
loss = loss / loss_factor
|
| 115 |
+
loss.backward()
|
| 116 |
+
|
| 117 |
+
if (self.every_n_iters(runner, self.cumulative_iters)
|
| 118 |
+
or self.is_last_iter(runner)):
|
| 119 |
+
|
| 120 |
+
if self.grad_clip is not None:
|
| 121 |
+
grad_norm = self.clip_grads(runner.model.parameters())
|
| 122 |
+
if grad_norm is not None:
|
| 123 |
+
# Add grad norm to the logger
|
| 124 |
+
runner.log_buffer.update({'grad_norm': float(grad_norm)},
|
| 125 |
+
runner.outputs['num_samples'])
|
| 126 |
+
runner.optimizer.step()
|
| 127 |
+
runner.optimizer.zero_grad()
|
| 128 |
+
|
| 129 |
+
|
| 130 |
+
if (TORCH_VERSION != 'parrots'
|
| 131 |
+
and digit_version(TORCH_VERSION) >= digit_version('1.6.0')):
|
| 132 |
+
|
| 133 |
+
@HOOKS.register_module()
|
| 134 |
+
class Fp16OptimizerHook(OptimizerHook):
|
| 135 |
+
"""FP16 optimizer hook (using PyTorch's implementation).
|
| 136 |
+
|
| 137 |
+
If you are using PyTorch >= 1.6, torch.cuda.amp is used as the backend,
|
| 138 |
+
to take care of the optimization procedure.
|
| 139 |
+
|
| 140 |
+
Args:
|
| 141 |
+
loss_scale (float | str | dict): Scale factor configuration.
|
| 142 |
+
If loss_scale is a float, static loss scaling will be used with
|
| 143 |
+
the specified scale. If loss_scale is a string, it must be
|
| 144 |
+
'dynamic', then dynamic loss scaling will be used.
|
| 145 |
+
It can also be a dict containing arguments of GradScalar.
|
| 146 |
+
Defaults to 512. For Pytorch >= 1.6, mmcv uses official
|
| 147 |
+
implementation of GradScaler. If you use a dict version of
|
| 148 |
+
loss_scale to create GradScaler, please refer to:
|
| 149 |
+
https://pytorch.org/docs/stable/amp.html#torch.cuda.amp.GradScaler
|
| 150 |
+
for the parameters.
|
| 151 |
+
|
| 152 |
+
Examples:
|
| 153 |
+
>>> loss_scale = dict(
|
| 154 |
+
... init_scale=65536.0,
|
| 155 |
+
... growth_factor=2.0,
|
| 156 |
+
... backoff_factor=0.5,
|
| 157 |
+
... growth_interval=2000
|
| 158 |
+
... )
|
| 159 |
+
>>> optimizer_hook = Fp16OptimizerHook(loss_scale=loss_scale)
|
| 160 |
+
"""
|
| 161 |
+
|
| 162 |
+
def __init__(self,
|
| 163 |
+
grad_clip=None,
|
| 164 |
+
coalesce=True,
|
| 165 |
+
bucket_size_mb=-1,
|
| 166 |
+
loss_scale=512.,
|
| 167 |
+
distributed=True):
|
| 168 |
+
self.grad_clip = grad_clip
|
| 169 |
+
self.coalesce = coalesce
|
| 170 |
+
self.bucket_size_mb = bucket_size_mb
|
| 171 |
+
self.distributed = distributed
|
| 172 |
+
self._scale_update_param = None
|
| 173 |
+
if loss_scale == 'dynamic':
|
| 174 |
+
self.loss_scaler = GradScaler()
|
| 175 |
+
elif isinstance(loss_scale, float):
|
| 176 |
+
self._scale_update_param = loss_scale
|
| 177 |
+
self.loss_scaler = GradScaler(init_scale=loss_scale)
|
| 178 |
+
elif isinstance(loss_scale, dict):
|
| 179 |
+
self.loss_scaler = GradScaler(**loss_scale)
|
| 180 |
+
else:
|
| 181 |
+
raise ValueError('loss_scale must be of type float, dict, or '
|
| 182 |
+
f'"dynamic", got {loss_scale}')
|
| 183 |
+
|
| 184 |
+
def before_run(self, runner):
|
| 185 |
+
"""Preparing steps before Mixed Precision Training."""
|
| 186 |
+
# wrap model mode to fp16
|
| 187 |
+
wrap_fp16_model(runner.model)
|
| 188 |
+
# resume from state dict
|
| 189 |
+
if 'fp16' in runner.meta and 'loss_scaler' in runner.meta['fp16']:
|
| 190 |
+
scaler_state_dict = runner.meta['fp16']['loss_scaler']
|
| 191 |
+
self.loss_scaler.load_state_dict(scaler_state_dict)
|
| 192 |
+
|
| 193 |
+
def copy_grads_to_fp32(self, fp16_net, fp32_weights):
|
| 194 |
+
"""Copy gradients from fp16 model to fp32 weight copy."""
|
| 195 |
+
for fp32_param, fp16_param in zip(fp32_weights,
|
| 196 |
+
fp16_net.parameters()):
|
| 197 |
+
if fp16_param.grad is not None:
|
| 198 |
+
if fp32_param.grad is None:
|
| 199 |
+
fp32_param.grad = fp32_param.data.new(
|
| 200 |
+
fp32_param.size())
|
| 201 |
+
fp32_param.grad.copy_(fp16_param.grad)
|
| 202 |
+
|
| 203 |
+
def copy_params_to_fp16(self, fp16_net, fp32_weights):
|
| 204 |
+
"""Copy updated params from fp32 weight copy to fp16 model."""
|
| 205 |
+
for fp16_param, fp32_param in zip(fp16_net.parameters(),
|
| 206 |
+
fp32_weights):
|
| 207 |
+
fp16_param.data.copy_(fp32_param.data)
|
| 208 |
+
|
| 209 |
+
def after_train_iter(self, runner):
|
| 210 |
+
"""Backward optimization steps for Mixed Precision Training. For
|
| 211 |
+
dynamic loss scaling, please refer to
|
| 212 |
+
https://pytorch.org/docs/stable/amp.html#torch.cuda.amp.GradScaler.
|
| 213 |
+
|
| 214 |
+
1. Scale the loss by a scale factor.
|
| 215 |
+
2. Backward the loss to obtain the gradients.
|
| 216 |
+
3. Unscale the optimizer’s gradient tensors.
|
| 217 |
+
4. Call optimizer.step() and update scale factor.
|
| 218 |
+
5. Save loss_scaler state_dict for resume purpose.
|
| 219 |
+
"""
|
| 220 |
+
# clear grads of last iteration
|
| 221 |
+
runner.model.zero_grad()
|
| 222 |
+
runner.optimizer.zero_grad()
|
| 223 |
+
|
| 224 |
+
self.loss_scaler.scale(runner.outputs['loss']).backward()
|
| 225 |
+
self.loss_scaler.unscale_(runner.optimizer)
|
| 226 |
+
# grad clip
|
| 227 |
+
if self.grad_clip is not None:
|
| 228 |
+
grad_norm = self.clip_grads(runner.model.parameters())
|
| 229 |
+
if grad_norm is not None:
|
| 230 |
+
# Add grad norm to the logger
|
| 231 |
+
runner.log_buffer.update({'grad_norm': float(grad_norm)},
|
| 232 |
+
runner.outputs['num_samples'])
|
| 233 |
+
# backward and update scaler
|
| 234 |
+
self.loss_scaler.step(runner.optimizer)
|
| 235 |
+
self.loss_scaler.update(self._scale_update_param)
|
| 236 |
+
|
| 237 |
+
# save state_dict of loss_scaler
|
| 238 |
+
runner.meta.setdefault(
|
| 239 |
+
'fp16', {})['loss_scaler'] = self.loss_scaler.state_dict()
|
| 240 |
+
|
| 241 |
+
@HOOKS.register_module()
|
| 242 |
+
class GradientCumulativeFp16OptimizerHook(GradientCumulativeOptimizerHook,
|
| 243 |
+
Fp16OptimizerHook):
|
| 244 |
+
"""Fp16 optimizer Hook (using PyTorch's implementation) implements
|
| 245 |
+
multi-iters gradient cumulating.
|
| 246 |
+
|
| 247 |
+
If you are using PyTorch >= 1.6, torch.cuda.amp is used as the backend,
|
| 248 |
+
to take care of the optimization procedure.
|
| 249 |
+
"""
|
| 250 |
+
|
| 251 |
+
def __init__(self, *args, **kwargs):
|
| 252 |
+
super(GradientCumulativeFp16OptimizerHook,
|
| 253 |
+
self).__init__(*args, **kwargs)
|
| 254 |
+
|
| 255 |
+
def after_train_iter(self, runner):
|
| 256 |
+
if not self.initialized:
|
| 257 |
+
self._init(runner)
|
| 258 |
+
|
| 259 |
+
if runner.iter < self.divisible_iters:
|
| 260 |
+
loss_factor = self.cumulative_iters
|
| 261 |
+
else:
|
| 262 |
+
loss_factor = self.remainder_iters
|
| 263 |
+
loss = runner.outputs['loss']
|
| 264 |
+
loss = loss / loss_factor
|
| 265 |
+
|
| 266 |
+
self.loss_scaler.scale(loss).backward()
|
| 267 |
+
|
| 268 |
+
if (self.every_n_iters(runner, self.cumulative_iters)
|
| 269 |
+
or self.is_last_iter(runner)):
|
| 270 |
+
|
| 271 |
+
# copy fp16 grads in the model to fp32 params in the optimizer
|
| 272 |
+
self.loss_scaler.unscale_(runner.optimizer)
|
| 273 |
+
|
| 274 |
+
if self.grad_clip is not None:
|
| 275 |
+
grad_norm = self.clip_grads(runner.model.parameters())
|
| 276 |
+
if grad_norm is not None:
|
| 277 |
+
# Add grad norm to the logger
|
| 278 |
+
runner.log_buffer.update(
|
| 279 |
+
{'grad_norm': float(grad_norm)},
|
| 280 |
+
runner.outputs['num_samples'])
|
| 281 |
+
|
| 282 |
+
# backward and update scaler
|
| 283 |
+
self.loss_scaler.step(runner.optimizer)
|
| 284 |
+
self.loss_scaler.update(self._scale_update_param)
|
| 285 |
+
|
| 286 |
+
# save state_dict of loss_scaler
|
| 287 |
+
runner.meta.setdefault(
|
| 288 |
+
'fp16', {})['loss_scaler'] = self.loss_scaler.state_dict()
|
| 289 |
+
|
| 290 |
+
# clear grads
|
| 291 |
+
runner.model.zero_grad()
|
| 292 |
+
runner.optimizer.zero_grad()
|
| 293 |
+
|
| 294 |
+
else:
|
| 295 |
+
|
| 296 |
+
@HOOKS.register_module()
|
| 297 |
+
class Fp16OptimizerHook(OptimizerHook):
|
| 298 |
+
"""FP16 optimizer hook (mmcv's implementation).
|
| 299 |
+
|
| 300 |
+
The steps of fp16 optimizer is as follows.
|
| 301 |
+
1. Scale the loss value.
|
| 302 |
+
2. BP in the fp16 model.
|
| 303 |
+
2. Copy gradients from fp16 model to fp32 weights.
|
| 304 |
+
3. Update fp32 weights.
|
| 305 |
+
4. Copy updated parameters from fp32 weights to fp16 model.
|
| 306 |
+
|
| 307 |
+
Refer to https://arxiv.org/abs/1710.03740 for more details.
|
| 308 |
+
|
| 309 |
+
Args:
|
| 310 |
+
loss_scale (float | str | dict): Scale factor configuration.
|
| 311 |
+
If loss_scale is a float, static loss scaling will be used with
|
| 312 |
+
the specified scale. If loss_scale is a string, it must be
|
| 313 |
+
'dynamic', then dynamic loss scaling will be used.
|
| 314 |
+
It can also be a dict containing arguments of LossScaler.
|
| 315 |
+
Defaults to 512.
|
| 316 |
+
"""
|
| 317 |
+
|
| 318 |
+
def __init__(self,
|
| 319 |
+
grad_clip=None,
|
| 320 |
+
coalesce=True,
|
| 321 |
+
bucket_size_mb=-1,
|
| 322 |
+
loss_scale=512.,
|
| 323 |
+
distributed=True):
|
| 324 |
+
self.grad_clip = grad_clip
|
| 325 |
+
self.coalesce = coalesce
|
| 326 |
+
self.bucket_size_mb = bucket_size_mb
|
| 327 |
+
self.distributed = distributed
|
| 328 |
+
if loss_scale == 'dynamic':
|
| 329 |
+
self.loss_scaler = LossScaler(mode='dynamic')
|
| 330 |
+
elif isinstance(loss_scale, float):
|
| 331 |
+
self.loss_scaler = LossScaler(
|
| 332 |
+
init_scale=loss_scale, mode='static')
|
| 333 |
+
elif isinstance(loss_scale, dict):
|
| 334 |
+
self.loss_scaler = LossScaler(**loss_scale)
|
| 335 |
+
else:
|
| 336 |
+
raise ValueError('loss_scale must be of type float, dict, or '
|
| 337 |
+
f'"dynamic", got {loss_scale}')
|
| 338 |
+
|
| 339 |
+
def before_run(self, runner):
|
| 340 |
+
"""Preparing steps before Mixed Precision Training.
|
| 341 |
+
|
| 342 |
+
1. Make a master copy of fp32 weights for optimization.
|
| 343 |
+
2. Convert the main model from fp32 to fp16.
|
| 344 |
+
"""
|
| 345 |
+
# keep a copy of fp32 weights
|
| 346 |
+
old_groups = runner.optimizer.param_groups
|
| 347 |
+
runner.optimizer.param_groups = copy.deepcopy(
|
| 348 |
+
runner.optimizer.param_groups)
|
| 349 |
+
state = defaultdict(dict)
|
| 350 |
+
p_map = {
|
| 351 |
+
old_p: p
|
| 352 |
+
for old_p, p in zip(
|
| 353 |
+
chain(*(g['params'] for g in old_groups)),
|
| 354 |
+
chain(*(g['params']
|
| 355 |
+
for g in runner.optimizer.param_groups)))
|
| 356 |
+
}
|
| 357 |
+
for k, v in runner.optimizer.state.items():
|
| 358 |
+
state[p_map[k]] = v
|
| 359 |
+
runner.optimizer.state = state
|
| 360 |
+
# convert model to fp16
|
| 361 |
+
wrap_fp16_model(runner.model)
|
| 362 |
+
# resume from state dict
|
| 363 |
+
if 'fp16' in runner.meta and 'loss_scaler' in runner.meta['fp16']:
|
| 364 |
+
scaler_state_dict = runner.meta['fp16']['loss_scaler']
|
| 365 |
+
self.loss_scaler.load_state_dict(scaler_state_dict)
|
| 366 |
+
|
| 367 |
+
def copy_grads_to_fp32(self, fp16_net, fp32_weights):
|
| 368 |
+
"""Copy gradients from fp16 model to fp32 weight copy."""
|
| 369 |
+
for fp32_param, fp16_param in zip(fp32_weights,
|
| 370 |
+
fp16_net.parameters()):
|
| 371 |
+
if fp16_param.grad is not None:
|
| 372 |
+
if fp32_param.grad is None:
|
| 373 |
+
fp32_param.grad = fp32_param.data.new(
|
| 374 |
+
fp32_param.size())
|
| 375 |
+
fp32_param.grad.copy_(fp16_param.grad)
|
| 376 |
+
|
| 377 |
+
def copy_params_to_fp16(self, fp16_net, fp32_weights):
|
| 378 |
+
"""Copy updated params from fp32 weight copy to fp16 model."""
|
| 379 |
+
for fp16_param, fp32_param in zip(fp16_net.parameters(),
|
| 380 |
+
fp32_weights):
|
| 381 |
+
fp16_param.data.copy_(fp32_param.data)
|
| 382 |
+
|
| 383 |
+
def after_train_iter(self, runner):
|
| 384 |
+
"""Backward optimization steps for Mixed Precision Training. For
|
| 385 |
+
dynamic loss scaling, please refer `loss_scalar.py`
|
| 386 |
+
|
| 387 |
+
1. Scale the loss by a scale factor.
|
| 388 |
+
2. Backward the loss to obtain the gradients (fp16).
|
| 389 |
+
3. Copy gradients from the model to the fp32 weight copy.
|
| 390 |
+
4. Scale the gradients back and update the fp32 weight copy.
|
| 391 |
+
5. Copy back the params from fp32 weight copy to the fp16 model.
|
| 392 |
+
6. Save loss_scaler state_dict for resume purpose.
|
| 393 |
+
"""
|
| 394 |
+
# clear grads of last iteration
|
| 395 |
+
runner.model.zero_grad()
|
| 396 |
+
runner.optimizer.zero_grad()
|
| 397 |
+
# scale the loss value
|
| 398 |
+
scaled_loss = runner.outputs['loss'] * self.loss_scaler.loss_scale
|
| 399 |
+
scaled_loss.backward()
|
| 400 |
+
# copy fp16 grads in the model to fp32 params in the optimizer
|
| 401 |
+
|
| 402 |
+
fp32_weights = []
|
| 403 |
+
for param_group in runner.optimizer.param_groups:
|
| 404 |
+
fp32_weights += param_group['params']
|
| 405 |
+
self.copy_grads_to_fp32(runner.model, fp32_weights)
|
| 406 |
+
# allreduce grads
|
| 407 |
+
if self.distributed:
|
| 408 |
+
allreduce_grads(fp32_weights, self.coalesce,
|
| 409 |
+
self.bucket_size_mb)
|
| 410 |
+
|
| 411 |
+
has_overflow = self.loss_scaler.has_overflow(fp32_weights)
|
| 412 |
+
# if has overflow, skip this iteration
|
| 413 |
+
if not has_overflow:
|
| 414 |
+
# scale the gradients back
|
| 415 |
+
for param in fp32_weights:
|
| 416 |
+
if param.grad is not None:
|
| 417 |
+
param.grad.div_(self.loss_scaler.loss_scale)
|
| 418 |
+
if self.grad_clip is not None:
|
| 419 |
+
grad_norm = self.clip_grads(fp32_weights)
|
| 420 |
+
if grad_norm is not None:
|
| 421 |
+
# Add grad norm to the logger
|
| 422 |
+
runner.log_buffer.update(
|
| 423 |
+
{'grad_norm': float(grad_norm)},
|
| 424 |
+
runner.outputs['num_samples'])
|
| 425 |
+
# update fp32 params
|
| 426 |
+
runner.optimizer.step()
|
| 427 |
+
# copy fp32 params to the fp16 model
|
| 428 |
+
self.copy_params_to_fp16(runner.model, fp32_weights)
|
| 429 |
+
self.loss_scaler.update_scale(has_overflow)
|
| 430 |
+
if has_overflow:
|
| 431 |
+
runner.logger.warning('Check overflow, downscale loss scale '
|
| 432 |
+
f'to {self.loss_scaler.cur_scale}')
|
| 433 |
+
|
| 434 |
+
# save state_dict of loss_scaler
|
| 435 |
+
runner.meta.setdefault(
|
| 436 |
+
'fp16', {})['loss_scaler'] = self.loss_scaler.state_dict()
|
| 437 |
+
|
| 438 |
+
@HOOKS.register_module()
|
| 439 |
+
class GradientCumulativeFp16OptimizerHook(GradientCumulativeOptimizerHook,
|
| 440 |
+
Fp16OptimizerHook):
|
| 441 |
+
"""Fp16 optimizer Hook (using mmcv implementation) implements multi-
|
| 442 |
+
iters gradient cumulating."""
|
| 443 |
+
|
| 444 |
+
def __init__(self, *args, **kwargs):
|
| 445 |
+
super(GradientCumulativeFp16OptimizerHook,
|
| 446 |
+
self).__init__(*args, **kwargs)
|
| 447 |
+
|
| 448 |
+
def after_train_iter(self, runner):
|
| 449 |
+
if not self.initialized:
|
| 450 |
+
self._init(runner)
|
| 451 |
+
|
| 452 |
+
if runner.iter < self.divisible_iters:
|
| 453 |
+
loss_factor = self.cumulative_iters
|
| 454 |
+
else:
|
| 455 |
+
loss_factor = self.remainder_iters
|
| 456 |
+
|
| 457 |
+
loss = runner.outputs['loss']
|
| 458 |
+
loss = loss / loss_factor
|
| 459 |
+
|
| 460 |
+
# scale the loss value
|
| 461 |
+
scaled_loss = loss * self.loss_scaler.loss_scale
|
| 462 |
+
scaled_loss.backward()
|
| 463 |
+
|
| 464 |
+
if (self.every_n_iters(runner, self.cumulative_iters)
|
| 465 |
+
or self.is_last_iter(runner)):
|
| 466 |
+
|
| 467 |
+
# copy fp16 grads in the model to fp32 params in the optimizer
|
| 468 |
+
fp32_weights = []
|
| 469 |
+
for param_group in runner.optimizer.param_groups:
|
| 470 |
+
fp32_weights += param_group['params']
|
| 471 |
+
self.copy_grads_to_fp32(runner.model, fp32_weights)
|
| 472 |
+
# allreduce grads
|
| 473 |
+
if self.distributed:
|
| 474 |
+
allreduce_grads(fp32_weights, self.coalesce,
|
| 475 |
+
self.bucket_size_mb)
|
| 476 |
+
|
| 477 |
+
has_overflow = self.loss_scaler.has_overflow(fp32_weights)
|
| 478 |
+
# if has overflow, skip this iteration
|
| 479 |
+
if not has_overflow:
|
| 480 |
+
# scale the gradients back
|
| 481 |
+
for param in fp32_weights:
|
| 482 |
+
if param.grad is not None:
|
| 483 |
+
param.grad.div_(self.loss_scaler.loss_scale)
|
| 484 |
+
if self.grad_clip is not None:
|
| 485 |
+
grad_norm = self.clip_grads(fp32_weights)
|
| 486 |
+
if grad_norm is not None:
|
| 487 |
+
# Add grad norm to the logger
|
| 488 |
+
runner.log_buffer.update(
|
| 489 |
+
{'grad_norm': float(grad_norm)},
|
| 490 |
+
runner.outputs['num_samples'])
|
| 491 |
+
# update fp32 params
|
| 492 |
+
runner.optimizer.step()
|
| 493 |
+
# copy fp32 params to the fp16 model
|
| 494 |
+
self.copy_params_to_fp16(runner.model, fp32_weights)
|
| 495 |
+
else:
|
| 496 |
+
runner.logger.warning(
|
| 497 |
+
'Check overflow, downscale loss scale '
|
| 498 |
+
f'to {self.loss_scaler.cur_scale}')
|
| 499 |
+
|
| 500 |
+
self.loss_scaler.update_scale(has_overflow)
|
| 501 |
+
|
| 502 |
+
# save state_dict of loss_scaler
|
| 503 |
+
runner.meta.setdefault(
|
| 504 |
+
'fp16', {})['loss_scaler'] = self.loss_scaler.state_dict()
|
| 505 |
+
|
| 506 |
+
# clear grads
|
| 507 |
+
runner.model.zero_grad()
|
| 508 |
+
runner.optimizer.zero_grad()
|
FRESCO/src/ControlNet/annotator/uniformer/mmcv/runner/hooks/profiler.py
ADDED
|
@@ -0,0 +1,180 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
import warnings
|
| 3 |
+
from typing import Callable, List, Optional, Union
|
| 4 |
+
|
| 5 |
+
import torch
|
| 6 |
+
|
| 7 |
+
from ..dist_utils import master_only
|
| 8 |
+
from .hook import HOOKS, Hook
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
@HOOKS.register_module()
|
| 12 |
+
class ProfilerHook(Hook):
|
| 13 |
+
"""Profiler to analyze performance during training.
|
| 14 |
+
|
| 15 |
+
PyTorch Profiler is a tool that allows the collection of the performance
|
| 16 |
+
metrics during the training. More details on Profiler can be found at
|
| 17 |
+
https://pytorch.org/docs/1.8.1/profiler.html#torch.profiler.profile
|
| 18 |
+
|
| 19 |
+
Args:
|
| 20 |
+
by_epoch (bool): Profile performance by epoch or by iteration.
|
| 21 |
+
Default: True.
|
| 22 |
+
profile_iters (int): Number of iterations for profiling.
|
| 23 |
+
If ``by_epoch=True``, profile_iters indicates that they are the
|
| 24 |
+
first profile_iters epochs at the beginning of the
|
| 25 |
+
training, otherwise it indicates the first profile_iters
|
| 26 |
+
iterations. Default: 1.
|
| 27 |
+
activities (list[str]): List of activity groups (CPU, CUDA) to use in
|
| 28 |
+
profiling. Default: ['cpu', 'cuda'].
|
| 29 |
+
schedule (dict, optional): Config of generating the callable schedule.
|
| 30 |
+
if schedule is None, profiler will not add step markers into the
|
| 31 |
+
trace and table view. Default: None.
|
| 32 |
+
on_trace_ready (callable, dict): Either a handler or a dict of generate
|
| 33 |
+
handler. Default: None.
|
| 34 |
+
record_shapes (bool): Save information about operator's input shapes.
|
| 35 |
+
Default: False.
|
| 36 |
+
profile_memory (bool): Track tensor memory allocation/deallocation.
|
| 37 |
+
Default: False.
|
| 38 |
+
with_stack (bool): Record source information (file and line number)
|
| 39 |
+
for the ops. Default: False.
|
| 40 |
+
with_flops (bool): Use formula to estimate the FLOPS of specific
|
| 41 |
+
operators (matrix multiplication and 2D convolution).
|
| 42 |
+
Default: False.
|
| 43 |
+
json_trace_path (str, optional): Exports the collected trace in Chrome
|
| 44 |
+
JSON format. Default: None.
|
| 45 |
+
|
| 46 |
+
Example:
|
| 47 |
+
>>> runner = ... # instantiate a Runner
|
| 48 |
+
>>> # tensorboard trace
|
| 49 |
+
>>> trace_config = dict(type='tb_trace', dir_name='work_dir')
|
| 50 |
+
>>> profiler_config = dict(on_trace_ready=trace_config)
|
| 51 |
+
>>> runner.register_profiler_hook(profiler_config)
|
| 52 |
+
>>> runner.run(data_loaders=[trainloader], workflow=[('train', 1)])
|
| 53 |
+
"""
|
| 54 |
+
|
| 55 |
+
def __init__(self,
|
| 56 |
+
by_epoch: bool = True,
|
| 57 |
+
profile_iters: int = 1,
|
| 58 |
+
activities: List[str] = ['cpu', 'cuda'],
|
| 59 |
+
schedule: Optional[dict] = None,
|
| 60 |
+
on_trace_ready: Optional[Union[Callable, dict]] = None,
|
| 61 |
+
record_shapes: bool = False,
|
| 62 |
+
profile_memory: bool = False,
|
| 63 |
+
with_stack: bool = False,
|
| 64 |
+
with_flops: bool = False,
|
| 65 |
+
json_trace_path: Optional[str] = None) -> None:
|
| 66 |
+
try:
|
| 67 |
+
from torch import profiler # torch version >= 1.8.1
|
| 68 |
+
except ImportError:
|
| 69 |
+
raise ImportError('profiler is the new feature of torch1.8.1, '
|
| 70 |
+
f'but your version is {torch.__version__}')
|
| 71 |
+
|
| 72 |
+
assert isinstance(by_epoch, bool), '``by_epoch`` should be a boolean.'
|
| 73 |
+
self.by_epoch = by_epoch
|
| 74 |
+
|
| 75 |
+
if profile_iters < 1:
|
| 76 |
+
raise ValueError('profile_iters should be greater than 0, but got '
|
| 77 |
+
f'{profile_iters}')
|
| 78 |
+
self.profile_iters = profile_iters
|
| 79 |
+
|
| 80 |
+
if not isinstance(activities, list):
|
| 81 |
+
raise ValueError(
|
| 82 |
+
f'activities should be list, but got {type(activities)}')
|
| 83 |
+
self.activities = []
|
| 84 |
+
for activity in activities:
|
| 85 |
+
activity = activity.lower()
|
| 86 |
+
if activity == 'cpu':
|
| 87 |
+
self.activities.append(profiler.ProfilerActivity.CPU)
|
| 88 |
+
elif activity == 'cuda':
|
| 89 |
+
self.activities.append(profiler.ProfilerActivity.CUDA)
|
| 90 |
+
else:
|
| 91 |
+
raise ValueError(
|
| 92 |
+
f'activity should be "cpu" or "cuda", but got {activity}')
|
| 93 |
+
|
| 94 |
+
if schedule is not None:
|
| 95 |
+
self.schedule = profiler.schedule(**schedule)
|
| 96 |
+
else:
|
| 97 |
+
self.schedule = None
|
| 98 |
+
|
| 99 |
+
self.on_trace_ready = on_trace_ready
|
| 100 |
+
self.record_shapes = record_shapes
|
| 101 |
+
self.profile_memory = profile_memory
|
| 102 |
+
self.with_stack = with_stack
|
| 103 |
+
self.with_flops = with_flops
|
| 104 |
+
self.json_trace_path = json_trace_path
|
| 105 |
+
|
| 106 |
+
@master_only
|
| 107 |
+
def before_run(self, runner):
|
| 108 |
+
if self.by_epoch and runner.max_epochs < self.profile_iters:
|
| 109 |
+
raise ValueError('self.profile_iters should not be greater than '
|
| 110 |
+
f'{runner.max_epochs}')
|
| 111 |
+
|
| 112 |
+
if not self.by_epoch and runner.max_iters < self.profile_iters:
|
| 113 |
+
raise ValueError('self.profile_iters should not be greater than '
|
| 114 |
+
f'{runner.max_iters}')
|
| 115 |
+
|
| 116 |
+
if callable(self.on_trace_ready): # handler
|
| 117 |
+
_on_trace_ready = self.on_trace_ready
|
| 118 |
+
elif isinstance(self.on_trace_ready, dict): # config of handler
|
| 119 |
+
trace_cfg = self.on_trace_ready.copy()
|
| 120 |
+
trace_type = trace_cfg.pop('type') # log_trace handler
|
| 121 |
+
if trace_type == 'log_trace':
|
| 122 |
+
|
| 123 |
+
def _log_handler(prof):
|
| 124 |
+
print(prof.key_averages().table(**trace_cfg))
|
| 125 |
+
|
| 126 |
+
_on_trace_ready = _log_handler
|
| 127 |
+
elif trace_type == 'tb_trace': # tensorboard_trace handler
|
| 128 |
+
try:
|
| 129 |
+
import torch_tb_profiler # noqa: F401
|
| 130 |
+
except ImportError:
|
| 131 |
+
raise ImportError('please run "pip install '
|
| 132 |
+
'torch-tb-profiler" to install '
|
| 133 |
+
'torch_tb_profiler')
|
| 134 |
+
_on_trace_ready = torch.profiler.tensorboard_trace_handler(
|
| 135 |
+
**trace_cfg)
|
| 136 |
+
else:
|
| 137 |
+
raise ValueError('trace_type should be "log_trace" or '
|
| 138 |
+
f'"tb_trace", but got {trace_type}')
|
| 139 |
+
elif self.on_trace_ready is None:
|
| 140 |
+
_on_trace_ready = None # type: ignore
|
| 141 |
+
else:
|
| 142 |
+
raise ValueError('on_trace_ready should be handler, dict or None, '
|
| 143 |
+
f'but got {type(self.on_trace_ready)}')
|
| 144 |
+
|
| 145 |
+
if runner.max_epochs > 1:
|
| 146 |
+
warnings.warn(f'profiler will profile {runner.max_epochs} epochs '
|
| 147 |
+
'instead of 1 epoch. Since profiler will slow down '
|
| 148 |
+
'the training, it is recommended to train 1 epoch '
|
| 149 |
+
'with ProfilerHook and adjust your setting according'
|
| 150 |
+
' to the profiler summary. During normal training '
|
| 151 |
+
'(epoch > 1), you may disable the ProfilerHook.')
|
| 152 |
+
|
| 153 |
+
self.profiler = torch.profiler.profile(
|
| 154 |
+
activities=self.activities,
|
| 155 |
+
schedule=self.schedule,
|
| 156 |
+
on_trace_ready=_on_trace_ready,
|
| 157 |
+
record_shapes=self.record_shapes,
|
| 158 |
+
profile_memory=self.profile_memory,
|
| 159 |
+
with_stack=self.with_stack,
|
| 160 |
+
with_flops=self.with_flops)
|
| 161 |
+
|
| 162 |
+
self.profiler.__enter__()
|
| 163 |
+
runner.logger.info('profiler is profiling...')
|
| 164 |
+
|
| 165 |
+
@master_only
|
| 166 |
+
def after_train_epoch(self, runner):
|
| 167 |
+
if self.by_epoch and runner.epoch == self.profile_iters - 1:
|
| 168 |
+
runner.logger.info('profiler may take a few minutes...')
|
| 169 |
+
self.profiler.__exit__(None, None, None)
|
| 170 |
+
if self.json_trace_path is not None:
|
| 171 |
+
self.profiler.export_chrome_trace(self.json_trace_path)
|
| 172 |
+
|
| 173 |
+
@master_only
|
| 174 |
+
def after_train_iter(self, runner):
|
| 175 |
+
self.profiler.step()
|
| 176 |
+
if not self.by_epoch and runner.iter == self.profile_iters - 1:
|
| 177 |
+
runner.logger.info('profiler may take a few minutes...')
|
| 178 |
+
self.profiler.__exit__(None, None, None)
|
| 179 |
+
if self.json_trace_path is not None:
|
| 180 |
+
self.profiler.export_chrome_trace(self.json_trace_path)
|
FRESCO/src/ControlNet/annotator/uniformer/mmcv/runner/hooks/sampler_seed.py
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
from .hook import HOOKS, Hook
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
@HOOKS.register_module()
|
| 6 |
+
class DistSamplerSeedHook(Hook):
|
| 7 |
+
"""Data-loading sampler for distributed training.
|
| 8 |
+
|
| 9 |
+
When distributed training, it is only useful in conjunction with
|
| 10 |
+
:obj:`EpochBasedRunner`, while :obj:`IterBasedRunner` achieves the same
|
| 11 |
+
purpose with :obj:`IterLoader`.
|
| 12 |
+
"""
|
| 13 |
+
|
| 14 |
+
def before_epoch(self, runner):
|
| 15 |
+
if hasattr(runner.data_loader.sampler, 'set_epoch'):
|
| 16 |
+
# in case the data loader uses `SequentialSampler` in Pytorch
|
| 17 |
+
runner.data_loader.sampler.set_epoch(runner.epoch)
|
| 18 |
+
elif hasattr(runner.data_loader.batch_sampler.sampler, 'set_epoch'):
|
| 19 |
+
# batch sampler in pytorch warps the sampler as its attributes.
|
| 20 |
+
runner.data_loader.batch_sampler.sampler.set_epoch(runner.epoch)
|
FRESCO/src/ControlNet/annotator/uniformer/mmcv/runner/hooks/sync_buffer.py
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
from ..dist_utils import allreduce_params
|
| 3 |
+
from .hook import HOOKS, Hook
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
@HOOKS.register_module()
|
| 7 |
+
class SyncBuffersHook(Hook):
|
| 8 |
+
"""Synchronize model buffers such as running_mean and running_var in BN at
|
| 9 |
+
the end of each epoch.
|
| 10 |
+
|
| 11 |
+
Args:
|
| 12 |
+
distributed (bool): Whether distributed training is used. It is
|
| 13 |
+
effective only for distributed training. Defaults to True.
|
| 14 |
+
"""
|
| 15 |
+
|
| 16 |
+
def __init__(self, distributed=True):
|
| 17 |
+
self.distributed = distributed
|
| 18 |
+
|
| 19 |
+
def after_epoch(self, runner):
|
| 20 |
+
"""All-reduce model buffers at the end of each epoch."""
|
| 21 |
+
if self.distributed:
|
| 22 |
+
allreduce_params(runner.model.buffers())
|
FRESCO/src/ControlNet/annotator/uniformer/mmcv/runner/priority.py
ADDED
|
@@ -0,0 +1,60 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
from enum import Enum
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
class Priority(Enum):
|
| 6 |
+
"""Hook priority levels.
|
| 7 |
+
|
| 8 |
+
+--------------+------------+
|
| 9 |
+
| Level | Value |
|
| 10 |
+
+==============+============+
|
| 11 |
+
| HIGHEST | 0 |
|
| 12 |
+
+--------------+------------+
|
| 13 |
+
| VERY_HIGH | 10 |
|
| 14 |
+
+--------------+------------+
|
| 15 |
+
| HIGH | 30 |
|
| 16 |
+
+--------------+------------+
|
| 17 |
+
| ABOVE_NORMAL | 40 |
|
| 18 |
+
+--------------+------------+
|
| 19 |
+
| NORMAL | 50 |
|
| 20 |
+
+--------------+------------+
|
| 21 |
+
| BELOW_NORMAL | 60 |
|
| 22 |
+
+--------------+------------+
|
| 23 |
+
| LOW | 70 |
|
| 24 |
+
+--------------+------------+
|
| 25 |
+
| VERY_LOW | 90 |
|
| 26 |
+
+--------------+------------+
|
| 27 |
+
| LOWEST | 100 |
|
| 28 |
+
+--------------+------------+
|
| 29 |
+
"""
|
| 30 |
+
|
| 31 |
+
HIGHEST = 0
|
| 32 |
+
VERY_HIGH = 10
|
| 33 |
+
HIGH = 30
|
| 34 |
+
ABOVE_NORMAL = 40
|
| 35 |
+
NORMAL = 50
|
| 36 |
+
BELOW_NORMAL = 60
|
| 37 |
+
LOW = 70
|
| 38 |
+
VERY_LOW = 90
|
| 39 |
+
LOWEST = 100
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
def get_priority(priority):
|
| 43 |
+
"""Get priority value.
|
| 44 |
+
|
| 45 |
+
Args:
|
| 46 |
+
priority (int or str or :obj:`Priority`): Priority.
|
| 47 |
+
|
| 48 |
+
Returns:
|
| 49 |
+
int: The priority value.
|
| 50 |
+
"""
|
| 51 |
+
if isinstance(priority, int):
|
| 52 |
+
if priority < 0 or priority > 100:
|
| 53 |
+
raise ValueError('priority must be between 0 and 100')
|
| 54 |
+
return priority
|
| 55 |
+
elif isinstance(priority, Priority):
|
| 56 |
+
return priority.value
|
| 57 |
+
elif isinstance(priority, str):
|
| 58 |
+
return Priority[priority.upper()].value
|
| 59 |
+
else:
|
| 60 |
+
raise TypeError('priority must be an integer or Priority enum value')
|
FRESCO/src/ControlNet/annotator/uniformer/mmseg/models/__init__.py
ADDED
|
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .backbones import * # noqa: F401,F403
|
| 2 |
+
from .builder import (BACKBONES, HEADS, LOSSES, SEGMENTORS, build_backbone,
|
| 3 |
+
build_head, build_loss, build_segmentor)
|
| 4 |
+
from .decode_heads import * # noqa: F401,F403
|
| 5 |
+
from .losses import * # noqa: F401,F403
|
| 6 |
+
from .necks import * # noqa: F401,F403
|
| 7 |
+
from .segmentors import * # noqa: F401,F403
|
| 8 |
+
|
| 9 |
+
__all__ = [
|
| 10 |
+
'BACKBONES', 'HEADS', 'LOSSES', 'SEGMENTORS', 'build_backbone',
|
| 11 |
+
'build_head', 'build_loss', 'build_segmentor'
|
| 12 |
+
]
|
FRESCO/src/ControlNet/annotator/uniformer/mmseg/models/builder.py
ADDED
|
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import warnings
|
| 2 |
+
|
| 3 |
+
from annotator.uniformer.mmcv.cnn import MODELS as MMCV_MODELS
|
| 4 |
+
from annotator.uniformer.mmcv.utils import Registry
|
| 5 |
+
|
| 6 |
+
MODELS = Registry('models', parent=MMCV_MODELS)
|
| 7 |
+
|
| 8 |
+
BACKBONES = MODELS
|
| 9 |
+
NECKS = MODELS
|
| 10 |
+
HEADS = MODELS
|
| 11 |
+
LOSSES = MODELS
|
| 12 |
+
SEGMENTORS = MODELS
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
def build_backbone(cfg):
|
| 16 |
+
"""Build backbone."""
|
| 17 |
+
return BACKBONES.build(cfg)
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
def build_neck(cfg):
|
| 21 |
+
"""Build neck."""
|
| 22 |
+
return NECKS.build(cfg)
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
def build_head(cfg):
|
| 26 |
+
"""Build head."""
|
| 27 |
+
return HEADS.build(cfg)
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
def build_loss(cfg):
|
| 31 |
+
"""Build loss."""
|
| 32 |
+
return LOSSES.build(cfg)
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
def build_segmentor(cfg, train_cfg=None, test_cfg=None):
|
| 36 |
+
"""Build segmentor."""
|
| 37 |
+
if train_cfg is not None or test_cfg is not None:
|
| 38 |
+
warnings.warn(
|
| 39 |
+
'train_cfg and test_cfg is deprecated, '
|
| 40 |
+
'please specify them in model', UserWarning)
|
| 41 |
+
assert cfg.get('train_cfg') is None or train_cfg is None, \
|
| 42 |
+
'train_cfg specified in both outer field and model field '
|
| 43 |
+
assert cfg.get('test_cfg') is None or test_cfg is None, \
|
| 44 |
+
'test_cfg specified in both outer field and model field '
|
| 45 |
+
return SEGMENTORS.build(
|
| 46 |
+
cfg, default_args=dict(train_cfg=train_cfg, test_cfg=test_cfg))
|
FRESCO/src/ControlNet/annotator/uniformer/mmseg/models/decode_heads/__init__.py
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .ann_head import ANNHead
|
| 2 |
+
from .apc_head import APCHead
|
| 3 |
+
from .aspp_head import ASPPHead
|
| 4 |
+
from .cc_head import CCHead
|
| 5 |
+
from .da_head import DAHead
|
| 6 |
+
from .dm_head import DMHead
|
| 7 |
+
from .dnl_head import DNLHead
|
| 8 |
+
from .ema_head import EMAHead
|
| 9 |
+
from .enc_head import EncHead
|
| 10 |
+
from .fcn_head import FCNHead
|
| 11 |
+
from .fpn_head import FPNHead
|
| 12 |
+
from .gc_head import GCHead
|
| 13 |
+
from .lraspp_head import LRASPPHead
|
| 14 |
+
from .nl_head import NLHead
|
| 15 |
+
from .ocr_head import OCRHead
|
| 16 |
+
# from .point_head import PointHead
|
| 17 |
+
from .psa_head import PSAHead
|
| 18 |
+
from .psp_head import PSPHead
|
| 19 |
+
from .sep_aspp_head import DepthwiseSeparableASPPHead
|
| 20 |
+
from .sep_fcn_head import DepthwiseSeparableFCNHead
|
| 21 |
+
from .uper_head import UPerHead
|
| 22 |
+
|
| 23 |
+
__all__ = [
|
| 24 |
+
'FCNHead', 'PSPHead', 'ASPPHead', 'PSAHead', 'NLHead', 'GCHead', 'CCHead',
|
| 25 |
+
'UPerHead', 'DepthwiseSeparableASPPHead', 'ANNHead', 'DAHead', 'OCRHead',
|
| 26 |
+
'EncHead', 'DepthwiseSeparableFCNHead', 'FPNHead', 'EMAHead', 'DNLHead',
|
| 27 |
+
'APCHead', 'DMHead', 'LRASPPHead'
|
| 28 |
+
]
|
FRESCO/src/ControlNet/annotator/uniformer/mmseg/models/decode_heads/ann_head.py
ADDED
|
@@ -0,0 +1,245 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import torch.nn as nn
|
| 3 |
+
from annotator.uniformer.mmcv.cnn import ConvModule
|
| 4 |
+
|
| 5 |
+
from ..builder import HEADS
|
| 6 |
+
from ..utils import SelfAttentionBlock as _SelfAttentionBlock
|
| 7 |
+
from .decode_head import BaseDecodeHead
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
class PPMConcat(nn.ModuleList):
|
| 11 |
+
"""Pyramid Pooling Module that only concat the features of each layer.
|
| 12 |
+
|
| 13 |
+
Args:
|
| 14 |
+
pool_scales (tuple[int]): Pooling scales used in Pooling Pyramid
|
| 15 |
+
Module.
|
| 16 |
+
"""
|
| 17 |
+
|
| 18 |
+
def __init__(self, pool_scales=(1, 3, 6, 8)):
|
| 19 |
+
super(PPMConcat, self).__init__(
|
| 20 |
+
[nn.AdaptiveAvgPool2d(pool_scale) for pool_scale in pool_scales])
|
| 21 |
+
|
| 22 |
+
def forward(self, feats):
|
| 23 |
+
"""Forward function."""
|
| 24 |
+
ppm_outs = []
|
| 25 |
+
for ppm in self:
|
| 26 |
+
ppm_out = ppm(feats)
|
| 27 |
+
ppm_outs.append(ppm_out.view(*feats.shape[:2], -1))
|
| 28 |
+
concat_outs = torch.cat(ppm_outs, dim=2)
|
| 29 |
+
return concat_outs
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
class SelfAttentionBlock(_SelfAttentionBlock):
|
| 33 |
+
"""Make a ANN used SelfAttentionBlock.
|
| 34 |
+
|
| 35 |
+
Args:
|
| 36 |
+
low_in_channels (int): Input channels of lower level feature,
|
| 37 |
+
which is the key feature for self-attention.
|
| 38 |
+
high_in_channels (int): Input channels of higher level feature,
|
| 39 |
+
which is the query feature for self-attention.
|
| 40 |
+
channels (int): Output channels of key/query transform.
|
| 41 |
+
out_channels (int): Output channels.
|
| 42 |
+
share_key_query (bool): Whether share projection weight between key
|
| 43 |
+
and query projection.
|
| 44 |
+
query_scale (int): The scale of query feature map.
|
| 45 |
+
key_pool_scales (tuple[int]): Pooling scales used in Pooling Pyramid
|
| 46 |
+
Module of key feature.
|
| 47 |
+
conv_cfg (dict|None): Config of conv layers.
|
| 48 |
+
norm_cfg (dict|None): Config of norm layers.
|
| 49 |
+
act_cfg (dict|None): Config of activation layers.
|
| 50 |
+
"""
|
| 51 |
+
|
| 52 |
+
def __init__(self, low_in_channels, high_in_channels, channels,
|
| 53 |
+
out_channels, share_key_query, query_scale, key_pool_scales,
|
| 54 |
+
conv_cfg, norm_cfg, act_cfg):
|
| 55 |
+
key_psp = PPMConcat(key_pool_scales)
|
| 56 |
+
if query_scale > 1:
|
| 57 |
+
query_downsample = nn.MaxPool2d(kernel_size=query_scale)
|
| 58 |
+
else:
|
| 59 |
+
query_downsample = None
|
| 60 |
+
super(SelfAttentionBlock, self).__init__(
|
| 61 |
+
key_in_channels=low_in_channels,
|
| 62 |
+
query_in_channels=high_in_channels,
|
| 63 |
+
channels=channels,
|
| 64 |
+
out_channels=out_channels,
|
| 65 |
+
share_key_query=share_key_query,
|
| 66 |
+
query_downsample=query_downsample,
|
| 67 |
+
key_downsample=key_psp,
|
| 68 |
+
key_query_num_convs=1,
|
| 69 |
+
key_query_norm=True,
|
| 70 |
+
value_out_num_convs=1,
|
| 71 |
+
value_out_norm=False,
|
| 72 |
+
matmul_norm=True,
|
| 73 |
+
with_out=True,
|
| 74 |
+
conv_cfg=conv_cfg,
|
| 75 |
+
norm_cfg=norm_cfg,
|
| 76 |
+
act_cfg=act_cfg)
|
| 77 |
+
|
| 78 |
+
|
| 79 |
+
class AFNB(nn.Module):
|
| 80 |
+
"""Asymmetric Fusion Non-local Block(AFNB)
|
| 81 |
+
|
| 82 |
+
Args:
|
| 83 |
+
low_in_channels (int): Input channels of lower level feature,
|
| 84 |
+
which is the key feature for self-attention.
|
| 85 |
+
high_in_channels (int): Input channels of higher level feature,
|
| 86 |
+
which is the query feature for self-attention.
|
| 87 |
+
channels (int): Output channels of key/query transform.
|
| 88 |
+
out_channels (int): Output channels.
|
| 89 |
+
and query projection.
|
| 90 |
+
query_scales (tuple[int]): The scales of query feature map.
|
| 91 |
+
Default: (1,)
|
| 92 |
+
key_pool_scales (tuple[int]): Pooling scales used in Pooling Pyramid
|
| 93 |
+
Module of key feature.
|
| 94 |
+
conv_cfg (dict|None): Config of conv layers.
|
| 95 |
+
norm_cfg (dict|None): Config of norm layers.
|
| 96 |
+
act_cfg (dict|None): Config of activation layers.
|
| 97 |
+
"""
|
| 98 |
+
|
| 99 |
+
def __init__(self, low_in_channels, high_in_channels, channels,
|
| 100 |
+
out_channels, query_scales, key_pool_scales, conv_cfg,
|
| 101 |
+
norm_cfg, act_cfg):
|
| 102 |
+
super(AFNB, self).__init__()
|
| 103 |
+
self.stages = nn.ModuleList()
|
| 104 |
+
for query_scale in query_scales:
|
| 105 |
+
self.stages.append(
|
| 106 |
+
SelfAttentionBlock(
|
| 107 |
+
low_in_channels=low_in_channels,
|
| 108 |
+
high_in_channels=high_in_channels,
|
| 109 |
+
channels=channels,
|
| 110 |
+
out_channels=out_channels,
|
| 111 |
+
share_key_query=False,
|
| 112 |
+
query_scale=query_scale,
|
| 113 |
+
key_pool_scales=key_pool_scales,
|
| 114 |
+
conv_cfg=conv_cfg,
|
| 115 |
+
norm_cfg=norm_cfg,
|
| 116 |
+
act_cfg=act_cfg))
|
| 117 |
+
self.bottleneck = ConvModule(
|
| 118 |
+
out_channels + high_in_channels,
|
| 119 |
+
out_channels,
|
| 120 |
+
1,
|
| 121 |
+
conv_cfg=conv_cfg,
|
| 122 |
+
norm_cfg=norm_cfg,
|
| 123 |
+
act_cfg=None)
|
| 124 |
+
|
| 125 |
+
def forward(self, low_feats, high_feats):
|
| 126 |
+
"""Forward function."""
|
| 127 |
+
priors = [stage(high_feats, low_feats) for stage in self.stages]
|
| 128 |
+
context = torch.stack(priors, dim=0).sum(dim=0)
|
| 129 |
+
output = self.bottleneck(torch.cat([context, high_feats], 1))
|
| 130 |
+
return output
|
| 131 |
+
|
| 132 |
+
|
| 133 |
+
class APNB(nn.Module):
|
| 134 |
+
"""Asymmetric Pyramid Non-local Block (APNB)
|
| 135 |
+
|
| 136 |
+
Args:
|
| 137 |
+
in_channels (int): Input channels of key/query feature,
|
| 138 |
+
which is the key feature for self-attention.
|
| 139 |
+
channels (int): Output channels of key/query transform.
|
| 140 |
+
out_channels (int): Output channels.
|
| 141 |
+
query_scales (tuple[int]): The scales of query feature map.
|
| 142 |
+
Default: (1,)
|
| 143 |
+
key_pool_scales (tuple[int]): Pooling scales used in Pooling Pyramid
|
| 144 |
+
Module of key feature.
|
| 145 |
+
conv_cfg (dict|None): Config of conv layers.
|
| 146 |
+
norm_cfg (dict|None): Config of norm layers.
|
| 147 |
+
act_cfg (dict|None): Config of activation layers.
|
| 148 |
+
"""
|
| 149 |
+
|
| 150 |
+
def __init__(self, in_channels, channels, out_channels, query_scales,
|
| 151 |
+
key_pool_scales, conv_cfg, norm_cfg, act_cfg):
|
| 152 |
+
super(APNB, self).__init__()
|
| 153 |
+
self.stages = nn.ModuleList()
|
| 154 |
+
for query_scale in query_scales:
|
| 155 |
+
self.stages.append(
|
| 156 |
+
SelfAttentionBlock(
|
| 157 |
+
low_in_channels=in_channels,
|
| 158 |
+
high_in_channels=in_channels,
|
| 159 |
+
channels=channels,
|
| 160 |
+
out_channels=out_channels,
|
| 161 |
+
share_key_query=True,
|
| 162 |
+
query_scale=query_scale,
|
| 163 |
+
key_pool_scales=key_pool_scales,
|
| 164 |
+
conv_cfg=conv_cfg,
|
| 165 |
+
norm_cfg=norm_cfg,
|
| 166 |
+
act_cfg=act_cfg))
|
| 167 |
+
self.bottleneck = ConvModule(
|
| 168 |
+
2 * in_channels,
|
| 169 |
+
out_channels,
|
| 170 |
+
1,
|
| 171 |
+
conv_cfg=conv_cfg,
|
| 172 |
+
norm_cfg=norm_cfg,
|
| 173 |
+
act_cfg=act_cfg)
|
| 174 |
+
|
| 175 |
+
def forward(self, feats):
|
| 176 |
+
"""Forward function."""
|
| 177 |
+
priors = [stage(feats, feats) for stage in self.stages]
|
| 178 |
+
context = torch.stack(priors, dim=0).sum(dim=0)
|
| 179 |
+
output = self.bottleneck(torch.cat([context, feats], 1))
|
| 180 |
+
return output
|
| 181 |
+
|
| 182 |
+
|
| 183 |
+
@HEADS.register_module()
|
| 184 |
+
class ANNHead(BaseDecodeHead):
|
| 185 |
+
"""Asymmetric Non-local Neural Networks for Semantic Segmentation.
|
| 186 |
+
|
| 187 |
+
This head is the implementation of `ANNNet
|
| 188 |
+
<https://arxiv.org/abs/1908.07678>`_.
|
| 189 |
+
|
| 190 |
+
Args:
|
| 191 |
+
project_channels (int): Projection channels for Nonlocal.
|
| 192 |
+
query_scales (tuple[int]): The scales of query feature map.
|
| 193 |
+
Default: (1,)
|
| 194 |
+
key_pool_scales (tuple[int]): The pooling scales of key feature map.
|
| 195 |
+
Default: (1, 3, 6, 8).
|
| 196 |
+
"""
|
| 197 |
+
|
| 198 |
+
def __init__(self,
|
| 199 |
+
project_channels,
|
| 200 |
+
query_scales=(1, ),
|
| 201 |
+
key_pool_scales=(1, 3, 6, 8),
|
| 202 |
+
**kwargs):
|
| 203 |
+
super(ANNHead, self).__init__(
|
| 204 |
+
input_transform='multiple_select', **kwargs)
|
| 205 |
+
assert len(self.in_channels) == 2
|
| 206 |
+
low_in_channels, high_in_channels = self.in_channels
|
| 207 |
+
self.project_channels = project_channels
|
| 208 |
+
self.fusion = AFNB(
|
| 209 |
+
low_in_channels=low_in_channels,
|
| 210 |
+
high_in_channels=high_in_channels,
|
| 211 |
+
out_channels=high_in_channels,
|
| 212 |
+
channels=project_channels,
|
| 213 |
+
query_scales=query_scales,
|
| 214 |
+
key_pool_scales=key_pool_scales,
|
| 215 |
+
conv_cfg=self.conv_cfg,
|
| 216 |
+
norm_cfg=self.norm_cfg,
|
| 217 |
+
act_cfg=self.act_cfg)
|
| 218 |
+
self.bottleneck = ConvModule(
|
| 219 |
+
high_in_channels,
|
| 220 |
+
self.channels,
|
| 221 |
+
3,
|
| 222 |
+
padding=1,
|
| 223 |
+
conv_cfg=self.conv_cfg,
|
| 224 |
+
norm_cfg=self.norm_cfg,
|
| 225 |
+
act_cfg=self.act_cfg)
|
| 226 |
+
self.context = APNB(
|
| 227 |
+
in_channels=self.channels,
|
| 228 |
+
out_channels=self.channels,
|
| 229 |
+
channels=project_channels,
|
| 230 |
+
query_scales=query_scales,
|
| 231 |
+
key_pool_scales=key_pool_scales,
|
| 232 |
+
conv_cfg=self.conv_cfg,
|
| 233 |
+
norm_cfg=self.norm_cfg,
|
| 234 |
+
act_cfg=self.act_cfg)
|
| 235 |
+
|
| 236 |
+
def forward(self, inputs):
|
| 237 |
+
"""Forward function."""
|
| 238 |
+
low_feats, high_feats = self._transform_inputs(inputs)
|
| 239 |
+
output = self.fusion(low_feats, high_feats)
|
| 240 |
+
output = self.dropout(output)
|
| 241 |
+
output = self.bottleneck(output)
|
| 242 |
+
output = self.context(output)
|
| 243 |
+
output = self.cls_seg(output)
|
| 244 |
+
|
| 245 |
+
return output
|
FRESCO/src/ControlNet/annotator/uniformer/mmseg/models/decode_heads/apc_head.py
ADDED
|
@@ -0,0 +1,158 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import torch.nn as nn
|
| 3 |
+
import torch.nn.functional as F
|
| 4 |
+
from annotator.uniformer.mmcv.cnn import ConvModule
|
| 5 |
+
|
| 6 |
+
from annotator.uniformer.mmseg.ops import resize
|
| 7 |
+
from ..builder import HEADS
|
| 8 |
+
from .decode_head import BaseDecodeHead
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
class ACM(nn.Module):
|
| 12 |
+
"""Adaptive Context Module used in APCNet.
|
| 13 |
+
|
| 14 |
+
Args:
|
| 15 |
+
pool_scale (int): Pooling scale used in Adaptive Context
|
| 16 |
+
Module to extract region features.
|
| 17 |
+
fusion (bool): Add one conv to fuse residual feature.
|
| 18 |
+
in_channels (int): Input channels.
|
| 19 |
+
channels (int): Channels after modules, before conv_seg.
|
| 20 |
+
conv_cfg (dict | None): Config of conv layers.
|
| 21 |
+
norm_cfg (dict | None): Config of norm layers.
|
| 22 |
+
act_cfg (dict): Config of activation layers.
|
| 23 |
+
"""
|
| 24 |
+
|
| 25 |
+
def __init__(self, pool_scale, fusion, in_channels, channels, conv_cfg,
|
| 26 |
+
norm_cfg, act_cfg):
|
| 27 |
+
super(ACM, self).__init__()
|
| 28 |
+
self.pool_scale = pool_scale
|
| 29 |
+
self.fusion = fusion
|
| 30 |
+
self.in_channels = in_channels
|
| 31 |
+
self.channels = channels
|
| 32 |
+
self.conv_cfg = conv_cfg
|
| 33 |
+
self.norm_cfg = norm_cfg
|
| 34 |
+
self.act_cfg = act_cfg
|
| 35 |
+
self.pooled_redu_conv = ConvModule(
|
| 36 |
+
self.in_channels,
|
| 37 |
+
self.channels,
|
| 38 |
+
1,
|
| 39 |
+
conv_cfg=self.conv_cfg,
|
| 40 |
+
norm_cfg=self.norm_cfg,
|
| 41 |
+
act_cfg=self.act_cfg)
|
| 42 |
+
|
| 43 |
+
self.input_redu_conv = ConvModule(
|
| 44 |
+
self.in_channels,
|
| 45 |
+
self.channels,
|
| 46 |
+
1,
|
| 47 |
+
conv_cfg=self.conv_cfg,
|
| 48 |
+
norm_cfg=self.norm_cfg,
|
| 49 |
+
act_cfg=self.act_cfg)
|
| 50 |
+
|
| 51 |
+
self.global_info = ConvModule(
|
| 52 |
+
self.channels,
|
| 53 |
+
self.channels,
|
| 54 |
+
1,
|
| 55 |
+
conv_cfg=self.conv_cfg,
|
| 56 |
+
norm_cfg=self.norm_cfg,
|
| 57 |
+
act_cfg=self.act_cfg)
|
| 58 |
+
|
| 59 |
+
self.gla = nn.Conv2d(self.channels, self.pool_scale**2, 1, 1, 0)
|
| 60 |
+
|
| 61 |
+
self.residual_conv = ConvModule(
|
| 62 |
+
self.channels,
|
| 63 |
+
self.channels,
|
| 64 |
+
1,
|
| 65 |
+
conv_cfg=self.conv_cfg,
|
| 66 |
+
norm_cfg=self.norm_cfg,
|
| 67 |
+
act_cfg=self.act_cfg)
|
| 68 |
+
|
| 69 |
+
if self.fusion:
|
| 70 |
+
self.fusion_conv = ConvModule(
|
| 71 |
+
self.channels,
|
| 72 |
+
self.channels,
|
| 73 |
+
1,
|
| 74 |
+
conv_cfg=self.conv_cfg,
|
| 75 |
+
norm_cfg=self.norm_cfg,
|
| 76 |
+
act_cfg=self.act_cfg)
|
| 77 |
+
|
| 78 |
+
def forward(self, x):
|
| 79 |
+
"""Forward function."""
|
| 80 |
+
pooled_x = F.adaptive_avg_pool2d(x, self.pool_scale)
|
| 81 |
+
# [batch_size, channels, h, w]
|
| 82 |
+
x = self.input_redu_conv(x)
|
| 83 |
+
# [batch_size, channels, pool_scale, pool_scale]
|
| 84 |
+
pooled_x = self.pooled_redu_conv(pooled_x)
|
| 85 |
+
batch_size = x.size(0)
|
| 86 |
+
# [batch_size, pool_scale * pool_scale, channels]
|
| 87 |
+
pooled_x = pooled_x.view(batch_size, self.channels,
|
| 88 |
+
-1).permute(0, 2, 1).contiguous()
|
| 89 |
+
# [batch_size, h * w, pool_scale * pool_scale]
|
| 90 |
+
affinity_matrix = self.gla(x + resize(
|
| 91 |
+
self.global_info(F.adaptive_avg_pool2d(x, 1)), size=x.shape[2:])
|
| 92 |
+
).permute(0, 2, 3, 1).reshape(
|
| 93 |
+
batch_size, -1, self.pool_scale**2)
|
| 94 |
+
affinity_matrix = F.sigmoid(affinity_matrix)
|
| 95 |
+
# [batch_size, h * w, channels]
|
| 96 |
+
z_out = torch.matmul(affinity_matrix, pooled_x)
|
| 97 |
+
# [batch_size, channels, h * w]
|
| 98 |
+
z_out = z_out.permute(0, 2, 1).contiguous()
|
| 99 |
+
# [batch_size, channels, h, w]
|
| 100 |
+
z_out = z_out.view(batch_size, self.channels, x.size(2), x.size(3))
|
| 101 |
+
z_out = self.residual_conv(z_out)
|
| 102 |
+
z_out = F.relu(z_out + x)
|
| 103 |
+
if self.fusion:
|
| 104 |
+
z_out = self.fusion_conv(z_out)
|
| 105 |
+
|
| 106 |
+
return z_out
|
| 107 |
+
|
| 108 |
+
|
| 109 |
+
@HEADS.register_module()
|
| 110 |
+
class APCHead(BaseDecodeHead):
|
| 111 |
+
"""Adaptive Pyramid Context Network for Semantic Segmentation.
|
| 112 |
+
|
| 113 |
+
This head is the implementation of
|
| 114 |
+
`APCNet <https://openaccess.thecvf.com/content_CVPR_2019/papers/\
|
| 115 |
+
He_Adaptive_Pyramid_Context_Network_for_Semantic_Segmentation_\
|
| 116 |
+
CVPR_2019_paper.pdf>`_.
|
| 117 |
+
|
| 118 |
+
Args:
|
| 119 |
+
pool_scales (tuple[int]): Pooling scales used in Adaptive Context
|
| 120 |
+
Module. Default: (1, 2, 3, 6).
|
| 121 |
+
fusion (bool): Add one conv to fuse residual feature.
|
| 122 |
+
"""
|
| 123 |
+
|
| 124 |
+
def __init__(self, pool_scales=(1, 2, 3, 6), fusion=True, **kwargs):
|
| 125 |
+
super(APCHead, self).__init__(**kwargs)
|
| 126 |
+
assert isinstance(pool_scales, (list, tuple))
|
| 127 |
+
self.pool_scales = pool_scales
|
| 128 |
+
self.fusion = fusion
|
| 129 |
+
acm_modules = []
|
| 130 |
+
for pool_scale in self.pool_scales:
|
| 131 |
+
acm_modules.append(
|
| 132 |
+
ACM(pool_scale,
|
| 133 |
+
self.fusion,
|
| 134 |
+
self.in_channels,
|
| 135 |
+
self.channels,
|
| 136 |
+
conv_cfg=self.conv_cfg,
|
| 137 |
+
norm_cfg=self.norm_cfg,
|
| 138 |
+
act_cfg=self.act_cfg))
|
| 139 |
+
self.acm_modules = nn.ModuleList(acm_modules)
|
| 140 |
+
self.bottleneck = ConvModule(
|
| 141 |
+
self.in_channels + len(pool_scales) * self.channels,
|
| 142 |
+
self.channels,
|
| 143 |
+
3,
|
| 144 |
+
padding=1,
|
| 145 |
+
conv_cfg=self.conv_cfg,
|
| 146 |
+
norm_cfg=self.norm_cfg,
|
| 147 |
+
act_cfg=self.act_cfg)
|
| 148 |
+
|
| 149 |
+
def forward(self, inputs):
|
| 150 |
+
"""Forward function."""
|
| 151 |
+
x = self._transform_inputs(inputs)
|
| 152 |
+
acm_outs = [x]
|
| 153 |
+
for acm_module in self.acm_modules:
|
| 154 |
+
acm_outs.append(acm_module(x))
|
| 155 |
+
acm_outs = torch.cat(acm_outs, dim=1)
|
| 156 |
+
output = self.bottleneck(acm_outs)
|
| 157 |
+
output = self.cls_seg(output)
|
| 158 |
+
return output
|
FRESCO/src/ControlNet/annotator/uniformer/mmseg/models/decode_heads/aspp_head.py
ADDED
|
@@ -0,0 +1,107 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import torch.nn as nn
|
| 3 |
+
from annotator.uniformer.mmcv.cnn import ConvModule
|
| 4 |
+
|
| 5 |
+
from annotator.uniformer.mmseg.ops import resize
|
| 6 |
+
from ..builder import HEADS
|
| 7 |
+
from .decode_head import BaseDecodeHead
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
class ASPPModule(nn.ModuleList):
|
| 11 |
+
"""Atrous Spatial Pyramid Pooling (ASPP) Module.
|
| 12 |
+
|
| 13 |
+
Args:
|
| 14 |
+
dilations (tuple[int]): Dilation rate of each layer.
|
| 15 |
+
in_channels (int): Input channels.
|
| 16 |
+
channels (int): Channels after modules, before conv_seg.
|
| 17 |
+
conv_cfg (dict|None): Config of conv layers.
|
| 18 |
+
norm_cfg (dict|None): Config of norm layers.
|
| 19 |
+
act_cfg (dict): Config of activation layers.
|
| 20 |
+
"""
|
| 21 |
+
|
| 22 |
+
def __init__(self, dilations, in_channels, channels, conv_cfg, norm_cfg,
|
| 23 |
+
act_cfg):
|
| 24 |
+
super(ASPPModule, self).__init__()
|
| 25 |
+
self.dilations = dilations
|
| 26 |
+
self.in_channels = in_channels
|
| 27 |
+
self.channels = channels
|
| 28 |
+
self.conv_cfg = conv_cfg
|
| 29 |
+
self.norm_cfg = norm_cfg
|
| 30 |
+
self.act_cfg = act_cfg
|
| 31 |
+
for dilation in dilations:
|
| 32 |
+
self.append(
|
| 33 |
+
ConvModule(
|
| 34 |
+
self.in_channels,
|
| 35 |
+
self.channels,
|
| 36 |
+
1 if dilation == 1 else 3,
|
| 37 |
+
dilation=dilation,
|
| 38 |
+
padding=0 if dilation == 1 else dilation,
|
| 39 |
+
conv_cfg=self.conv_cfg,
|
| 40 |
+
norm_cfg=self.norm_cfg,
|
| 41 |
+
act_cfg=self.act_cfg))
|
| 42 |
+
|
| 43 |
+
def forward(self, x):
|
| 44 |
+
"""Forward function."""
|
| 45 |
+
aspp_outs = []
|
| 46 |
+
for aspp_module in self:
|
| 47 |
+
aspp_outs.append(aspp_module(x))
|
| 48 |
+
|
| 49 |
+
return aspp_outs
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
@HEADS.register_module()
|
| 53 |
+
class ASPPHead(BaseDecodeHead):
|
| 54 |
+
"""Rethinking Atrous Convolution for Semantic Image Segmentation.
|
| 55 |
+
|
| 56 |
+
This head is the implementation of `DeepLabV3
|
| 57 |
+
<https://arxiv.org/abs/1706.05587>`_.
|
| 58 |
+
|
| 59 |
+
Args:
|
| 60 |
+
dilations (tuple[int]): Dilation rates for ASPP module.
|
| 61 |
+
Default: (1, 6, 12, 18).
|
| 62 |
+
"""
|
| 63 |
+
|
| 64 |
+
def __init__(self, dilations=(1, 6, 12, 18), **kwargs):
|
| 65 |
+
super(ASPPHead, self).__init__(**kwargs)
|
| 66 |
+
assert isinstance(dilations, (list, tuple))
|
| 67 |
+
self.dilations = dilations
|
| 68 |
+
self.image_pool = nn.Sequential(
|
| 69 |
+
nn.AdaptiveAvgPool2d(1),
|
| 70 |
+
ConvModule(
|
| 71 |
+
self.in_channels,
|
| 72 |
+
self.channels,
|
| 73 |
+
1,
|
| 74 |
+
conv_cfg=self.conv_cfg,
|
| 75 |
+
norm_cfg=self.norm_cfg,
|
| 76 |
+
act_cfg=self.act_cfg))
|
| 77 |
+
self.aspp_modules = ASPPModule(
|
| 78 |
+
dilations,
|
| 79 |
+
self.in_channels,
|
| 80 |
+
self.channels,
|
| 81 |
+
conv_cfg=self.conv_cfg,
|
| 82 |
+
norm_cfg=self.norm_cfg,
|
| 83 |
+
act_cfg=self.act_cfg)
|
| 84 |
+
self.bottleneck = ConvModule(
|
| 85 |
+
(len(dilations) + 1) * self.channels,
|
| 86 |
+
self.channels,
|
| 87 |
+
3,
|
| 88 |
+
padding=1,
|
| 89 |
+
conv_cfg=self.conv_cfg,
|
| 90 |
+
norm_cfg=self.norm_cfg,
|
| 91 |
+
act_cfg=self.act_cfg)
|
| 92 |
+
|
| 93 |
+
def forward(self, inputs):
|
| 94 |
+
"""Forward function."""
|
| 95 |
+
x = self._transform_inputs(inputs)
|
| 96 |
+
aspp_outs = [
|
| 97 |
+
resize(
|
| 98 |
+
self.image_pool(x),
|
| 99 |
+
size=x.size()[2:],
|
| 100 |
+
mode='bilinear',
|
| 101 |
+
align_corners=self.align_corners)
|
| 102 |
+
]
|
| 103 |
+
aspp_outs.extend(self.aspp_modules(x))
|
| 104 |
+
aspp_outs = torch.cat(aspp_outs, dim=1)
|
| 105 |
+
output = self.bottleneck(aspp_outs)
|
| 106 |
+
output = self.cls_seg(output)
|
| 107 |
+
return output
|
FRESCO/src/ControlNet/annotator/uniformer/mmseg/models/decode_heads/cascade_decode_head.py
ADDED
|
@@ -0,0 +1,57 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from abc import ABCMeta, abstractmethod
|
| 2 |
+
|
| 3 |
+
from .decode_head import BaseDecodeHead
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
class BaseCascadeDecodeHead(BaseDecodeHead, metaclass=ABCMeta):
|
| 7 |
+
"""Base class for cascade decode head used in
|
| 8 |
+
:class:`CascadeEncoderDecoder."""
|
| 9 |
+
|
| 10 |
+
def __init__(self, *args, **kwargs):
|
| 11 |
+
super(BaseCascadeDecodeHead, self).__init__(*args, **kwargs)
|
| 12 |
+
|
| 13 |
+
@abstractmethod
|
| 14 |
+
def forward(self, inputs, prev_output):
|
| 15 |
+
"""Placeholder of forward function."""
|
| 16 |
+
pass
|
| 17 |
+
|
| 18 |
+
def forward_train(self, inputs, prev_output, img_metas, gt_semantic_seg,
|
| 19 |
+
train_cfg):
|
| 20 |
+
"""Forward function for training.
|
| 21 |
+
Args:
|
| 22 |
+
inputs (list[Tensor]): List of multi-level img features.
|
| 23 |
+
prev_output (Tensor): The output of previous decode head.
|
| 24 |
+
img_metas (list[dict]): List of image info dict where each dict
|
| 25 |
+
has: 'img_shape', 'scale_factor', 'flip', and may also contain
|
| 26 |
+
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
|
| 27 |
+
For details on the values of these keys see
|
| 28 |
+
`mmseg/datasets/pipelines/formatting.py:Collect`.
|
| 29 |
+
gt_semantic_seg (Tensor): Semantic segmentation masks
|
| 30 |
+
used if the architecture supports semantic segmentation task.
|
| 31 |
+
train_cfg (dict): The training config.
|
| 32 |
+
|
| 33 |
+
Returns:
|
| 34 |
+
dict[str, Tensor]: a dictionary of loss components
|
| 35 |
+
"""
|
| 36 |
+
seg_logits = self.forward(inputs, prev_output)
|
| 37 |
+
losses = self.losses(seg_logits, gt_semantic_seg)
|
| 38 |
+
|
| 39 |
+
return losses
|
| 40 |
+
|
| 41 |
+
def forward_test(self, inputs, prev_output, img_metas, test_cfg):
|
| 42 |
+
"""Forward function for testing.
|
| 43 |
+
|
| 44 |
+
Args:
|
| 45 |
+
inputs (list[Tensor]): List of multi-level img features.
|
| 46 |
+
prev_output (Tensor): The output of previous decode head.
|
| 47 |
+
img_metas (list[dict]): List of image info dict where each dict
|
| 48 |
+
has: 'img_shape', 'scale_factor', 'flip', and may also contain
|
| 49 |
+
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
|
| 50 |
+
For details on the values of these keys see
|
| 51 |
+
`mmseg/datasets/pipelines/formatting.py:Collect`.
|
| 52 |
+
test_cfg (dict): The testing config.
|
| 53 |
+
|
| 54 |
+
Returns:
|
| 55 |
+
Tensor: Output segmentation map.
|
| 56 |
+
"""
|
| 57 |
+
return self.forward(inputs, prev_output)
|
FRESCO/src/ControlNet/annotator/uniformer/mmseg/models/decode_heads/cc_head.py
ADDED
|
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
|
| 3 |
+
from ..builder import HEADS
|
| 4 |
+
from .fcn_head import FCNHead
|
| 5 |
+
|
| 6 |
+
try:
|
| 7 |
+
from annotator.uniformer.mmcv.ops import CrissCrossAttention
|
| 8 |
+
except ModuleNotFoundError:
|
| 9 |
+
CrissCrossAttention = None
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
@HEADS.register_module()
|
| 13 |
+
class CCHead(FCNHead):
|
| 14 |
+
"""CCNet: Criss-Cross Attention for Semantic Segmentation.
|
| 15 |
+
|
| 16 |
+
This head is the implementation of `CCNet
|
| 17 |
+
<https://arxiv.org/abs/1811.11721>`_.
|
| 18 |
+
|
| 19 |
+
Args:
|
| 20 |
+
recurrence (int): Number of recurrence of Criss Cross Attention
|
| 21 |
+
module. Default: 2.
|
| 22 |
+
"""
|
| 23 |
+
|
| 24 |
+
def __init__(self, recurrence=2, **kwargs):
|
| 25 |
+
if CrissCrossAttention is None:
|
| 26 |
+
raise RuntimeError('Please install mmcv-full for '
|
| 27 |
+
'CrissCrossAttention ops')
|
| 28 |
+
super(CCHead, self).__init__(num_convs=2, **kwargs)
|
| 29 |
+
self.recurrence = recurrence
|
| 30 |
+
self.cca = CrissCrossAttention(self.channels)
|
| 31 |
+
|
| 32 |
+
def forward(self, inputs):
|
| 33 |
+
"""Forward function."""
|
| 34 |
+
x = self._transform_inputs(inputs)
|
| 35 |
+
output = self.convs[0](x)
|
| 36 |
+
for _ in range(self.recurrence):
|
| 37 |
+
output = self.cca(output)
|
| 38 |
+
output = self.convs[1](output)
|
| 39 |
+
if self.concat_input:
|
| 40 |
+
output = self.conv_cat(torch.cat([x, output], dim=1))
|
| 41 |
+
output = self.cls_seg(output)
|
| 42 |
+
return output
|
FRESCO/src/ControlNet/annotator/uniformer/mmseg/models/decode_heads/da_head.py
ADDED
|
@@ -0,0 +1,178 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import torch.nn.functional as F
|
| 3 |
+
from annotator.uniformer.mmcv.cnn import ConvModule, Scale
|
| 4 |
+
from torch import nn
|
| 5 |
+
|
| 6 |
+
from annotator.uniformer.mmseg.core import add_prefix
|
| 7 |
+
from ..builder import HEADS
|
| 8 |
+
from ..utils import SelfAttentionBlock as _SelfAttentionBlock
|
| 9 |
+
from .decode_head import BaseDecodeHead
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
class PAM(_SelfAttentionBlock):
|
| 13 |
+
"""Position Attention Module (PAM)
|
| 14 |
+
|
| 15 |
+
Args:
|
| 16 |
+
in_channels (int): Input channels of key/query feature.
|
| 17 |
+
channels (int): Output channels of key/query transform.
|
| 18 |
+
"""
|
| 19 |
+
|
| 20 |
+
def __init__(self, in_channels, channels):
|
| 21 |
+
super(PAM, self).__init__(
|
| 22 |
+
key_in_channels=in_channels,
|
| 23 |
+
query_in_channels=in_channels,
|
| 24 |
+
channels=channels,
|
| 25 |
+
out_channels=in_channels,
|
| 26 |
+
share_key_query=False,
|
| 27 |
+
query_downsample=None,
|
| 28 |
+
key_downsample=None,
|
| 29 |
+
key_query_num_convs=1,
|
| 30 |
+
key_query_norm=False,
|
| 31 |
+
value_out_num_convs=1,
|
| 32 |
+
value_out_norm=False,
|
| 33 |
+
matmul_norm=False,
|
| 34 |
+
with_out=False,
|
| 35 |
+
conv_cfg=None,
|
| 36 |
+
norm_cfg=None,
|
| 37 |
+
act_cfg=None)
|
| 38 |
+
|
| 39 |
+
self.gamma = Scale(0)
|
| 40 |
+
|
| 41 |
+
def forward(self, x):
|
| 42 |
+
"""Forward function."""
|
| 43 |
+
out = super(PAM, self).forward(x, x)
|
| 44 |
+
|
| 45 |
+
out = self.gamma(out) + x
|
| 46 |
+
return out
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
class CAM(nn.Module):
|
| 50 |
+
"""Channel Attention Module (CAM)"""
|
| 51 |
+
|
| 52 |
+
def __init__(self):
|
| 53 |
+
super(CAM, self).__init__()
|
| 54 |
+
self.gamma = Scale(0)
|
| 55 |
+
|
| 56 |
+
def forward(self, x):
|
| 57 |
+
"""Forward function."""
|
| 58 |
+
batch_size, channels, height, width = x.size()
|
| 59 |
+
proj_query = x.view(batch_size, channels, -1)
|
| 60 |
+
proj_key = x.view(batch_size, channels, -1).permute(0, 2, 1)
|
| 61 |
+
energy = torch.bmm(proj_query, proj_key)
|
| 62 |
+
energy_new = torch.max(
|
| 63 |
+
energy, -1, keepdim=True)[0].expand_as(energy) - energy
|
| 64 |
+
attention = F.softmax(energy_new, dim=-1)
|
| 65 |
+
proj_value = x.view(batch_size, channels, -1)
|
| 66 |
+
|
| 67 |
+
out = torch.bmm(attention, proj_value)
|
| 68 |
+
out = out.view(batch_size, channels, height, width)
|
| 69 |
+
|
| 70 |
+
out = self.gamma(out) + x
|
| 71 |
+
return out
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
@HEADS.register_module()
|
| 75 |
+
class DAHead(BaseDecodeHead):
|
| 76 |
+
"""Dual Attention Network for Scene Segmentation.
|
| 77 |
+
|
| 78 |
+
This head is the implementation of `DANet
|
| 79 |
+
<https://arxiv.org/abs/1809.02983>`_.
|
| 80 |
+
|
| 81 |
+
Args:
|
| 82 |
+
pam_channels (int): The channels of Position Attention Module(PAM).
|
| 83 |
+
"""
|
| 84 |
+
|
| 85 |
+
def __init__(self, pam_channels, **kwargs):
|
| 86 |
+
super(DAHead, self).__init__(**kwargs)
|
| 87 |
+
self.pam_channels = pam_channels
|
| 88 |
+
self.pam_in_conv = ConvModule(
|
| 89 |
+
self.in_channels,
|
| 90 |
+
self.channels,
|
| 91 |
+
3,
|
| 92 |
+
padding=1,
|
| 93 |
+
conv_cfg=self.conv_cfg,
|
| 94 |
+
norm_cfg=self.norm_cfg,
|
| 95 |
+
act_cfg=self.act_cfg)
|
| 96 |
+
self.pam = PAM(self.channels, pam_channels)
|
| 97 |
+
self.pam_out_conv = ConvModule(
|
| 98 |
+
self.channels,
|
| 99 |
+
self.channels,
|
| 100 |
+
3,
|
| 101 |
+
padding=1,
|
| 102 |
+
conv_cfg=self.conv_cfg,
|
| 103 |
+
norm_cfg=self.norm_cfg,
|
| 104 |
+
act_cfg=self.act_cfg)
|
| 105 |
+
self.pam_conv_seg = nn.Conv2d(
|
| 106 |
+
self.channels, self.num_classes, kernel_size=1)
|
| 107 |
+
|
| 108 |
+
self.cam_in_conv = ConvModule(
|
| 109 |
+
self.in_channels,
|
| 110 |
+
self.channels,
|
| 111 |
+
3,
|
| 112 |
+
padding=1,
|
| 113 |
+
conv_cfg=self.conv_cfg,
|
| 114 |
+
norm_cfg=self.norm_cfg,
|
| 115 |
+
act_cfg=self.act_cfg)
|
| 116 |
+
self.cam = CAM()
|
| 117 |
+
self.cam_out_conv = ConvModule(
|
| 118 |
+
self.channels,
|
| 119 |
+
self.channels,
|
| 120 |
+
3,
|
| 121 |
+
padding=1,
|
| 122 |
+
conv_cfg=self.conv_cfg,
|
| 123 |
+
norm_cfg=self.norm_cfg,
|
| 124 |
+
act_cfg=self.act_cfg)
|
| 125 |
+
self.cam_conv_seg = nn.Conv2d(
|
| 126 |
+
self.channels, self.num_classes, kernel_size=1)
|
| 127 |
+
|
| 128 |
+
def pam_cls_seg(self, feat):
|
| 129 |
+
"""PAM feature classification."""
|
| 130 |
+
if self.dropout is not None:
|
| 131 |
+
feat = self.dropout(feat)
|
| 132 |
+
output = self.pam_conv_seg(feat)
|
| 133 |
+
return output
|
| 134 |
+
|
| 135 |
+
def cam_cls_seg(self, feat):
|
| 136 |
+
"""CAM feature classification."""
|
| 137 |
+
if self.dropout is not None:
|
| 138 |
+
feat = self.dropout(feat)
|
| 139 |
+
output = self.cam_conv_seg(feat)
|
| 140 |
+
return output
|
| 141 |
+
|
| 142 |
+
def forward(self, inputs):
|
| 143 |
+
"""Forward function."""
|
| 144 |
+
x = self._transform_inputs(inputs)
|
| 145 |
+
pam_feat = self.pam_in_conv(x)
|
| 146 |
+
pam_feat = self.pam(pam_feat)
|
| 147 |
+
pam_feat = self.pam_out_conv(pam_feat)
|
| 148 |
+
pam_out = self.pam_cls_seg(pam_feat)
|
| 149 |
+
|
| 150 |
+
cam_feat = self.cam_in_conv(x)
|
| 151 |
+
cam_feat = self.cam(cam_feat)
|
| 152 |
+
cam_feat = self.cam_out_conv(cam_feat)
|
| 153 |
+
cam_out = self.cam_cls_seg(cam_feat)
|
| 154 |
+
|
| 155 |
+
feat_sum = pam_feat + cam_feat
|
| 156 |
+
pam_cam_out = self.cls_seg(feat_sum)
|
| 157 |
+
|
| 158 |
+
return pam_cam_out, pam_out, cam_out
|
| 159 |
+
|
| 160 |
+
def forward_test(self, inputs, img_metas, test_cfg):
|
| 161 |
+
"""Forward function for testing, only ``pam_cam`` is used."""
|
| 162 |
+
return self.forward(inputs)[0]
|
| 163 |
+
|
| 164 |
+
def losses(self, seg_logit, seg_label):
|
| 165 |
+
"""Compute ``pam_cam``, ``pam``, ``cam`` loss."""
|
| 166 |
+
pam_cam_seg_logit, pam_seg_logit, cam_seg_logit = seg_logit
|
| 167 |
+
loss = dict()
|
| 168 |
+
loss.update(
|
| 169 |
+
add_prefix(
|
| 170 |
+
super(DAHead, self).losses(pam_cam_seg_logit, seg_label),
|
| 171 |
+
'pam_cam'))
|
| 172 |
+
loss.update(
|
| 173 |
+
add_prefix(
|
| 174 |
+
super(DAHead, self).losses(pam_seg_logit, seg_label), 'pam'))
|
| 175 |
+
loss.update(
|
| 176 |
+
add_prefix(
|
| 177 |
+
super(DAHead, self).losses(cam_seg_logit, seg_label), 'cam'))
|
| 178 |
+
return loss
|
FRESCO/src/ControlNet/annotator/uniformer/mmseg/models/decode_heads/decode_head.py
ADDED
|
@@ -0,0 +1,234 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from abc import ABCMeta, abstractmethod
|
| 2 |
+
|
| 3 |
+
import torch
|
| 4 |
+
import torch.nn as nn
|
| 5 |
+
from annotator.uniformer.mmcv.cnn import normal_init
|
| 6 |
+
from annotator.uniformer.mmcv.runner import auto_fp16, force_fp32
|
| 7 |
+
|
| 8 |
+
from annotator.uniformer.mmseg.core import build_pixel_sampler
|
| 9 |
+
from annotator.uniformer.mmseg.ops import resize
|
| 10 |
+
from ..builder import build_loss
|
| 11 |
+
from ..losses import accuracy
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
class BaseDecodeHead(nn.Module, metaclass=ABCMeta):
|
| 15 |
+
"""Base class for BaseDecodeHead.
|
| 16 |
+
|
| 17 |
+
Args:
|
| 18 |
+
in_channels (int|Sequence[int]): Input channels.
|
| 19 |
+
channels (int): Channels after modules, before conv_seg.
|
| 20 |
+
num_classes (int): Number of classes.
|
| 21 |
+
dropout_ratio (float): Ratio of dropout layer. Default: 0.1.
|
| 22 |
+
conv_cfg (dict|None): Config of conv layers. Default: None.
|
| 23 |
+
norm_cfg (dict|None): Config of norm layers. Default: None.
|
| 24 |
+
act_cfg (dict): Config of activation layers.
|
| 25 |
+
Default: dict(type='ReLU')
|
| 26 |
+
in_index (int|Sequence[int]): Input feature index. Default: -1
|
| 27 |
+
input_transform (str|None): Transformation type of input features.
|
| 28 |
+
Options: 'resize_concat', 'multiple_select', None.
|
| 29 |
+
'resize_concat': Multiple feature maps will be resize to the
|
| 30 |
+
same size as first one and than concat together.
|
| 31 |
+
Usually used in FCN head of HRNet.
|
| 32 |
+
'multiple_select': Multiple feature maps will be bundle into
|
| 33 |
+
a list and passed into decode head.
|
| 34 |
+
None: Only one select feature map is allowed.
|
| 35 |
+
Default: None.
|
| 36 |
+
loss_decode (dict): Config of decode loss.
|
| 37 |
+
Default: dict(type='CrossEntropyLoss').
|
| 38 |
+
ignore_index (int | None): The label index to be ignored. When using
|
| 39 |
+
masked BCE loss, ignore_index should be set to None. Default: 255
|
| 40 |
+
sampler (dict|None): The config of segmentation map sampler.
|
| 41 |
+
Default: None.
|
| 42 |
+
align_corners (bool): align_corners argument of F.interpolate.
|
| 43 |
+
Default: False.
|
| 44 |
+
"""
|
| 45 |
+
|
| 46 |
+
def __init__(self,
|
| 47 |
+
in_channels,
|
| 48 |
+
channels,
|
| 49 |
+
*,
|
| 50 |
+
num_classes,
|
| 51 |
+
dropout_ratio=0.1,
|
| 52 |
+
conv_cfg=None,
|
| 53 |
+
norm_cfg=None,
|
| 54 |
+
act_cfg=dict(type='ReLU'),
|
| 55 |
+
in_index=-1,
|
| 56 |
+
input_transform=None,
|
| 57 |
+
loss_decode=dict(
|
| 58 |
+
type='CrossEntropyLoss',
|
| 59 |
+
use_sigmoid=False,
|
| 60 |
+
loss_weight=1.0),
|
| 61 |
+
ignore_index=255,
|
| 62 |
+
sampler=None,
|
| 63 |
+
align_corners=False):
|
| 64 |
+
super(BaseDecodeHead, self).__init__()
|
| 65 |
+
self._init_inputs(in_channels, in_index, input_transform)
|
| 66 |
+
self.channels = channels
|
| 67 |
+
self.num_classes = num_classes
|
| 68 |
+
self.dropout_ratio = dropout_ratio
|
| 69 |
+
self.conv_cfg = conv_cfg
|
| 70 |
+
self.norm_cfg = norm_cfg
|
| 71 |
+
self.act_cfg = act_cfg
|
| 72 |
+
self.in_index = in_index
|
| 73 |
+
self.loss_decode = build_loss(loss_decode)
|
| 74 |
+
self.ignore_index = ignore_index
|
| 75 |
+
self.align_corners = align_corners
|
| 76 |
+
if sampler is not None:
|
| 77 |
+
self.sampler = build_pixel_sampler(sampler, context=self)
|
| 78 |
+
else:
|
| 79 |
+
self.sampler = None
|
| 80 |
+
|
| 81 |
+
self.conv_seg = nn.Conv2d(channels, num_classes, kernel_size=1)
|
| 82 |
+
if dropout_ratio > 0:
|
| 83 |
+
self.dropout = nn.Dropout2d(dropout_ratio)
|
| 84 |
+
else:
|
| 85 |
+
self.dropout = None
|
| 86 |
+
self.fp16_enabled = False
|
| 87 |
+
|
| 88 |
+
def extra_repr(self):
|
| 89 |
+
"""Extra repr."""
|
| 90 |
+
s = f'input_transform={self.input_transform}, ' \
|
| 91 |
+
f'ignore_index={self.ignore_index}, ' \
|
| 92 |
+
f'align_corners={self.align_corners}'
|
| 93 |
+
return s
|
| 94 |
+
|
| 95 |
+
def _init_inputs(self, in_channels, in_index, input_transform):
|
| 96 |
+
"""Check and initialize input transforms.
|
| 97 |
+
|
| 98 |
+
The in_channels, in_index and input_transform must match.
|
| 99 |
+
Specifically, when input_transform is None, only single feature map
|
| 100 |
+
will be selected. So in_channels and in_index must be of type int.
|
| 101 |
+
When input_transform
|
| 102 |
+
|
| 103 |
+
Args:
|
| 104 |
+
in_channels (int|Sequence[int]): Input channels.
|
| 105 |
+
in_index (int|Sequence[int]): Input feature index.
|
| 106 |
+
input_transform (str|None): Transformation type of input features.
|
| 107 |
+
Options: 'resize_concat', 'multiple_select', None.
|
| 108 |
+
'resize_concat': Multiple feature maps will be resize to the
|
| 109 |
+
same size as first one and than concat together.
|
| 110 |
+
Usually used in FCN head of HRNet.
|
| 111 |
+
'multiple_select': Multiple feature maps will be bundle into
|
| 112 |
+
a list and passed into decode head.
|
| 113 |
+
None: Only one select feature map is allowed.
|
| 114 |
+
"""
|
| 115 |
+
|
| 116 |
+
if input_transform is not None:
|
| 117 |
+
assert input_transform in ['resize_concat', 'multiple_select']
|
| 118 |
+
self.input_transform = input_transform
|
| 119 |
+
self.in_index = in_index
|
| 120 |
+
if input_transform is not None:
|
| 121 |
+
assert isinstance(in_channels, (list, tuple))
|
| 122 |
+
assert isinstance(in_index, (list, tuple))
|
| 123 |
+
assert len(in_channels) == len(in_index)
|
| 124 |
+
if input_transform == 'resize_concat':
|
| 125 |
+
self.in_channels = sum(in_channels)
|
| 126 |
+
else:
|
| 127 |
+
self.in_channels = in_channels
|
| 128 |
+
else:
|
| 129 |
+
assert isinstance(in_channels, int)
|
| 130 |
+
assert isinstance(in_index, int)
|
| 131 |
+
self.in_channels = in_channels
|
| 132 |
+
|
| 133 |
+
def init_weights(self):
|
| 134 |
+
"""Initialize weights of classification layer."""
|
| 135 |
+
normal_init(self.conv_seg, mean=0, std=0.01)
|
| 136 |
+
|
| 137 |
+
def _transform_inputs(self, inputs):
|
| 138 |
+
"""Transform inputs for decoder.
|
| 139 |
+
|
| 140 |
+
Args:
|
| 141 |
+
inputs (list[Tensor]): List of multi-level img features.
|
| 142 |
+
|
| 143 |
+
Returns:
|
| 144 |
+
Tensor: The transformed inputs
|
| 145 |
+
"""
|
| 146 |
+
|
| 147 |
+
if self.input_transform == 'resize_concat':
|
| 148 |
+
inputs = [inputs[i] for i in self.in_index]
|
| 149 |
+
upsampled_inputs = [
|
| 150 |
+
resize(
|
| 151 |
+
input=x,
|
| 152 |
+
size=inputs[0].shape[2:],
|
| 153 |
+
mode='bilinear',
|
| 154 |
+
align_corners=self.align_corners) for x in inputs
|
| 155 |
+
]
|
| 156 |
+
inputs = torch.cat(upsampled_inputs, dim=1)
|
| 157 |
+
elif self.input_transform == 'multiple_select':
|
| 158 |
+
inputs = [inputs[i] for i in self.in_index]
|
| 159 |
+
else:
|
| 160 |
+
inputs = inputs[self.in_index]
|
| 161 |
+
|
| 162 |
+
return inputs
|
| 163 |
+
|
| 164 |
+
@auto_fp16()
|
| 165 |
+
@abstractmethod
|
| 166 |
+
def forward(self, inputs):
|
| 167 |
+
"""Placeholder of forward function."""
|
| 168 |
+
pass
|
| 169 |
+
|
| 170 |
+
def forward_train(self, inputs, img_metas, gt_semantic_seg, train_cfg):
|
| 171 |
+
"""Forward function for training.
|
| 172 |
+
Args:
|
| 173 |
+
inputs (list[Tensor]): List of multi-level img features.
|
| 174 |
+
img_metas (list[dict]): List of image info dict where each dict
|
| 175 |
+
has: 'img_shape', 'scale_factor', 'flip', and may also contain
|
| 176 |
+
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
|
| 177 |
+
For details on the values of these keys see
|
| 178 |
+
`mmseg/datasets/pipelines/formatting.py:Collect`.
|
| 179 |
+
gt_semantic_seg (Tensor): Semantic segmentation masks
|
| 180 |
+
used if the architecture supports semantic segmentation task.
|
| 181 |
+
train_cfg (dict): The training config.
|
| 182 |
+
|
| 183 |
+
Returns:
|
| 184 |
+
dict[str, Tensor]: a dictionary of loss components
|
| 185 |
+
"""
|
| 186 |
+
seg_logits = self.forward(inputs)
|
| 187 |
+
losses = self.losses(seg_logits, gt_semantic_seg)
|
| 188 |
+
return losses
|
| 189 |
+
|
| 190 |
+
def forward_test(self, inputs, img_metas, test_cfg):
|
| 191 |
+
"""Forward function for testing.
|
| 192 |
+
|
| 193 |
+
Args:
|
| 194 |
+
inputs (list[Tensor]): List of multi-level img features.
|
| 195 |
+
img_metas (list[dict]): List of image info dict where each dict
|
| 196 |
+
has: 'img_shape', 'scale_factor', 'flip', and may also contain
|
| 197 |
+
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
|
| 198 |
+
For details on the values of these keys see
|
| 199 |
+
`mmseg/datasets/pipelines/formatting.py:Collect`.
|
| 200 |
+
test_cfg (dict): The testing config.
|
| 201 |
+
|
| 202 |
+
Returns:
|
| 203 |
+
Tensor: Output segmentation map.
|
| 204 |
+
"""
|
| 205 |
+
return self.forward(inputs)
|
| 206 |
+
|
| 207 |
+
def cls_seg(self, feat):
|
| 208 |
+
"""Classify each pixel."""
|
| 209 |
+
if self.dropout is not None:
|
| 210 |
+
feat = self.dropout(feat)
|
| 211 |
+
output = self.conv_seg(feat)
|
| 212 |
+
return output
|
| 213 |
+
|
| 214 |
+
@force_fp32(apply_to=('seg_logit', ))
|
| 215 |
+
def losses(self, seg_logit, seg_label):
|
| 216 |
+
"""Compute segmentation loss."""
|
| 217 |
+
loss = dict()
|
| 218 |
+
seg_logit = resize(
|
| 219 |
+
input=seg_logit,
|
| 220 |
+
size=seg_label.shape[2:],
|
| 221 |
+
mode='bilinear',
|
| 222 |
+
align_corners=self.align_corners)
|
| 223 |
+
if self.sampler is not None:
|
| 224 |
+
seg_weight = self.sampler.sample(seg_logit, seg_label)
|
| 225 |
+
else:
|
| 226 |
+
seg_weight = None
|
| 227 |
+
seg_label = seg_label.squeeze(1)
|
| 228 |
+
loss['loss_seg'] = self.loss_decode(
|
| 229 |
+
seg_logit,
|
| 230 |
+
seg_label,
|
| 231 |
+
weight=seg_weight,
|
| 232 |
+
ignore_index=self.ignore_index)
|
| 233 |
+
loss['acc_seg'] = accuracy(seg_logit, seg_label)
|
| 234 |
+
return loss
|
FRESCO/src/ControlNet/annotator/uniformer/mmseg/models/decode_heads/dm_head.py
ADDED
|
@@ -0,0 +1,140 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import torch.nn as nn
|
| 3 |
+
import torch.nn.functional as F
|
| 4 |
+
from annotator.uniformer.mmcv.cnn import ConvModule, build_activation_layer, build_norm_layer
|
| 5 |
+
|
| 6 |
+
from ..builder import HEADS
|
| 7 |
+
from .decode_head import BaseDecodeHead
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
class DCM(nn.Module):
|
| 11 |
+
"""Dynamic Convolutional Module used in DMNet.
|
| 12 |
+
|
| 13 |
+
Args:
|
| 14 |
+
filter_size (int): The filter size of generated convolution kernel
|
| 15 |
+
used in Dynamic Convolutional Module.
|
| 16 |
+
fusion (bool): Add one conv to fuse DCM output feature.
|
| 17 |
+
in_channels (int): Input channels.
|
| 18 |
+
channels (int): Channels after modules, before conv_seg.
|
| 19 |
+
conv_cfg (dict | None): Config of conv layers.
|
| 20 |
+
norm_cfg (dict | None): Config of norm layers.
|
| 21 |
+
act_cfg (dict): Config of activation layers.
|
| 22 |
+
"""
|
| 23 |
+
|
| 24 |
+
def __init__(self, filter_size, fusion, in_channels, channels, conv_cfg,
|
| 25 |
+
norm_cfg, act_cfg):
|
| 26 |
+
super(DCM, self).__init__()
|
| 27 |
+
self.filter_size = filter_size
|
| 28 |
+
self.fusion = fusion
|
| 29 |
+
self.in_channels = in_channels
|
| 30 |
+
self.channels = channels
|
| 31 |
+
self.conv_cfg = conv_cfg
|
| 32 |
+
self.norm_cfg = norm_cfg
|
| 33 |
+
self.act_cfg = act_cfg
|
| 34 |
+
self.filter_gen_conv = nn.Conv2d(self.in_channels, self.channels, 1, 1,
|
| 35 |
+
0)
|
| 36 |
+
|
| 37 |
+
self.input_redu_conv = ConvModule(
|
| 38 |
+
self.in_channels,
|
| 39 |
+
self.channels,
|
| 40 |
+
1,
|
| 41 |
+
conv_cfg=self.conv_cfg,
|
| 42 |
+
norm_cfg=self.norm_cfg,
|
| 43 |
+
act_cfg=self.act_cfg)
|
| 44 |
+
|
| 45 |
+
if self.norm_cfg is not None:
|
| 46 |
+
self.norm = build_norm_layer(self.norm_cfg, self.channels)[1]
|
| 47 |
+
else:
|
| 48 |
+
self.norm = None
|
| 49 |
+
self.activate = build_activation_layer(self.act_cfg)
|
| 50 |
+
|
| 51 |
+
if self.fusion:
|
| 52 |
+
self.fusion_conv = ConvModule(
|
| 53 |
+
self.channels,
|
| 54 |
+
self.channels,
|
| 55 |
+
1,
|
| 56 |
+
conv_cfg=self.conv_cfg,
|
| 57 |
+
norm_cfg=self.norm_cfg,
|
| 58 |
+
act_cfg=self.act_cfg)
|
| 59 |
+
|
| 60 |
+
def forward(self, x):
|
| 61 |
+
"""Forward function."""
|
| 62 |
+
generated_filter = self.filter_gen_conv(
|
| 63 |
+
F.adaptive_avg_pool2d(x, self.filter_size))
|
| 64 |
+
x = self.input_redu_conv(x)
|
| 65 |
+
b, c, h, w = x.shape
|
| 66 |
+
# [1, b * c, h, w], c = self.channels
|
| 67 |
+
x = x.view(1, b * c, h, w)
|
| 68 |
+
# [b * c, 1, filter_size, filter_size]
|
| 69 |
+
generated_filter = generated_filter.view(b * c, 1, self.filter_size,
|
| 70 |
+
self.filter_size)
|
| 71 |
+
pad = (self.filter_size - 1) // 2
|
| 72 |
+
if (self.filter_size - 1) % 2 == 0:
|
| 73 |
+
p2d = (pad, pad, pad, pad)
|
| 74 |
+
else:
|
| 75 |
+
p2d = (pad + 1, pad, pad + 1, pad)
|
| 76 |
+
x = F.pad(input=x, pad=p2d, mode='constant', value=0)
|
| 77 |
+
# [1, b * c, h, w]
|
| 78 |
+
output = F.conv2d(input=x, weight=generated_filter, groups=b * c)
|
| 79 |
+
# [b, c, h, w]
|
| 80 |
+
output = output.view(b, c, h, w)
|
| 81 |
+
if self.norm is not None:
|
| 82 |
+
output = self.norm(output)
|
| 83 |
+
output = self.activate(output)
|
| 84 |
+
|
| 85 |
+
if self.fusion:
|
| 86 |
+
output = self.fusion_conv(output)
|
| 87 |
+
|
| 88 |
+
return output
|
| 89 |
+
|
| 90 |
+
|
| 91 |
+
@HEADS.register_module()
|
| 92 |
+
class DMHead(BaseDecodeHead):
|
| 93 |
+
"""Dynamic Multi-scale Filters for Semantic Segmentation.
|
| 94 |
+
|
| 95 |
+
This head is the implementation of
|
| 96 |
+
`DMNet <https://openaccess.thecvf.com/content_ICCV_2019/papers/\
|
| 97 |
+
He_Dynamic_Multi-Scale_Filters_for_Semantic_Segmentation_\
|
| 98 |
+
ICCV_2019_paper.pdf>`_.
|
| 99 |
+
|
| 100 |
+
Args:
|
| 101 |
+
filter_sizes (tuple[int]): The size of generated convolutional filters
|
| 102 |
+
used in Dynamic Convolutional Module. Default: (1, 3, 5, 7).
|
| 103 |
+
fusion (bool): Add one conv to fuse DCM output feature.
|
| 104 |
+
"""
|
| 105 |
+
|
| 106 |
+
def __init__(self, filter_sizes=(1, 3, 5, 7), fusion=False, **kwargs):
|
| 107 |
+
super(DMHead, self).__init__(**kwargs)
|
| 108 |
+
assert isinstance(filter_sizes, (list, tuple))
|
| 109 |
+
self.filter_sizes = filter_sizes
|
| 110 |
+
self.fusion = fusion
|
| 111 |
+
dcm_modules = []
|
| 112 |
+
for filter_size in self.filter_sizes:
|
| 113 |
+
dcm_modules.append(
|
| 114 |
+
DCM(filter_size,
|
| 115 |
+
self.fusion,
|
| 116 |
+
self.in_channels,
|
| 117 |
+
self.channels,
|
| 118 |
+
conv_cfg=self.conv_cfg,
|
| 119 |
+
norm_cfg=self.norm_cfg,
|
| 120 |
+
act_cfg=self.act_cfg))
|
| 121 |
+
self.dcm_modules = nn.ModuleList(dcm_modules)
|
| 122 |
+
self.bottleneck = ConvModule(
|
| 123 |
+
self.in_channels + len(filter_sizes) * self.channels,
|
| 124 |
+
self.channels,
|
| 125 |
+
3,
|
| 126 |
+
padding=1,
|
| 127 |
+
conv_cfg=self.conv_cfg,
|
| 128 |
+
norm_cfg=self.norm_cfg,
|
| 129 |
+
act_cfg=self.act_cfg)
|
| 130 |
+
|
| 131 |
+
def forward(self, inputs):
|
| 132 |
+
"""Forward function."""
|
| 133 |
+
x = self._transform_inputs(inputs)
|
| 134 |
+
dcm_outs = [x]
|
| 135 |
+
for dcm_module in self.dcm_modules:
|
| 136 |
+
dcm_outs.append(dcm_module(x))
|
| 137 |
+
dcm_outs = torch.cat(dcm_outs, dim=1)
|
| 138 |
+
output = self.bottleneck(dcm_outs)
|
| 139 |
+
output = self.cls_seg(output)
|
| 140 |
+
return output
|
FRESCO/src/ControlNet/annotator/uniformer/mmseg/models/decode_heads/dnl_head.py
ADDED
|
@@ -0,0 +1,131 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
from annotator.uniformer.mmcv.cnn import NonLocal2d
|
| 3 |
+
from torch import nn
|
| 4 |
+
|
| 5 |
+
from ..builder import HEADS
|
| 6 |
+
from .fcn_head import FCNHead
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
class DisentangledNonLocal2d(NonLocal2d):
|
| 10 |
+
"""Disentangled Non-Local Blocks.
|
| 11 |
+
|
| 12 |
+
Args:
|
| 13 |
+
temperature (float): Temperature to adjust attention. Default: 0.05
|
| 14 |
+
"""
|
| 15 |
+
|
| 16 |
+
def __init__(self, *arg, temperature, **kwargs):
|
| 17 |
+
super().__init__(*arg, **kwargs)
|
| 18 |
+
self.temperature = temperature
|
| 19 |
+
self.conv_mask = nn.Conv2d(self.in_channels, 1, kernel_size=1)
|
| 20 |
+
|
| 21 |
+
def embedded_gaussian(self, theta_x, phi_x):
|
| 22 |
+
"""Embedded gaussian with temperature."""
|
| 23 |
+
|
| 24 |
+
# NonLocal2d pairwise_weight: [N, HxW, HxW]
|
| 25 |
+
pairwise_weight = torch.matmul(theta_x, phi_x)
|
| 26 |
+
if self.use_scale:
|
| 27 |
+
# theta_x.shape[-1] is `self.inter_channels`
|
| 28 |
+
pairwise_weight /= theta_x.shape[-1]**0.5
|
| 29 |
+
pairwise_weight /= self.temperature
|
| 30 |
+
pairwise_weight = pairwise_weight.softmax(dim=-1)
|
| 31 |
+
return pairwise_weight
|
| 32 |
+
|
| 33 |
+
def forward(self, x):
|
| 34 |
+
# x: [N, C, H, W]
|
| 35 |
+
n = x.size(0)
|
| 36 |
+
|
| 37 |
+
# g_x: [N, HxW, C]
|
| 38 |
+
g_x = self.g(x).view(n, self.inter_channels, -1)
|
| 39 |
+
g_x = g_x.permute(0, 2, 1)
|
| 40 |
+
|
| 41 |
+
# theta_x: [N, HxW, C], phi_x: [N, C, HxW]
|
| 42 |
+
if self.mode == 'gaussian':
|
| 43 |
+
theta_x = x.view(n, self.in_channels, -1)
|
| 44 |
+
theta_x = theta_x.permute(0, 2, 1)
|
| 45 |
+
if self.sub_sample:
|
| 46 |
+
phi_x = self.phi(x).view(n, self.in_channels, -1)
|
| 47 |
+
else:
|
| 48 |
+
phi_x = x.view(n, self.in_channels, -1)
|
| 49 |
+
elif self.mode == 'concatenation':
|
| 50 |
+
theta_x = self.theta(x).view(n, self.inter_channels, -1, 1)
|
| 51 |
+
phi_x = self.phi(x).view(n, self.inter_channels, 1, -1)
|
| 52 |
+
else:
|
| 53 |
+
theta_x = self.theta(x).view(n, self.inter_channels, -1)
|
| 54 |
+
theta_x = theta_x.permute(0, 2, 1)
|
| 55 |
+
phi_x = self.phi(x).view(n, self.inter_channels, -1)
|
| 56 |
+
|
| 57 |
+
# subtract mean
|
| 58 |
+
theta_x -= theta_x.mean(dim=-2, keepdim=True)
|
| 59 |
+
phi_x -= phi_x.mean(dim=-1, keepdim=True)
|
| 60 |
+
|
| 61 |
+
pairwise_func = getattr(self, self.mode)
|
| 62 |
+
# pairwise_weight: [N, HxW, HxW]
|
| 63 |
+
pairwise_weight = pairwise_func(theta_x, phi_x)
|
| 64 |
+
|
| 65 |
+
# y: [N, HxW, C]
|
| 66 |
+
y = torch.matmul(pairwise_weight, g_x)
|
| 67 |
+
# y: [N, C, H, W]
|
| 68 |
+
y = y.permute(0, 2, 1).contiguous().reshape(n, self.inter_channels,
|
| 69 |
+
*x.size()[2:])
|
| 70 |
+
|
| 71 |
+
# unary_mask: [N, 1, HxW]
|
| 72 |
+
unary_mask = self.conv_mask(x)
|
| 73 |
+
unary_mask = unary_mask.view(n, 1, -1)
|
| 74 |
+
unary_mask = unary_mask.softmax(dim=-1)
|
| 75 |
+
# unary_x: [N, 1, C]
|
| 76 |
+
unary_x = torch.matmul(unary_mask, g_x)
|
| 77 |
+
# unary_x: [N, C, 1, 1]
|
| 78 |
+
unary_x = unary_x.permute(0, 2, 1).contiguous().reshape(
|
| 79 |
+
n, self.inter_channels, 1, 1)
|
| 80 |
+
|
| 81 |
+
output = x + self.conv_out(y + unary_x)
|
| 82 |
+
|
| 83 |
+
return output
|
| 84 |
+
|
| 85 |
+
|
| 86 |
+
@HEADS.register_module()
|
| 87 |
+
class DNLHead(FCNHead):
|
| 88 |
+
"""Disentangled Non-Local Neural Networks.
|
| 89 |
+
|
| 90 |
+
This head is the implementation of `DNLNet
|
| 91 |
+
<https://arxiv.org/abs/2006.06668>`_.
|
| 92 |
+
|
| 93 |
+
Args:
|
| 94 |
+
reduction (int): Reduction factor of projection transform. Default: 2.
|
| 95 |
+
use_scale (bool): Whether to scale pairwise_weight by
|
| 96 |
+
sqrt(1/inter_channels). Default: False.
|
| 97 |
+
mode (str): The nonlocal mode. Options are 'embedded_gaussian',
|
| 98 |
+
'dot_product'. Default: 'embedded_gaussian.'.
|
| 99 |
+
temperature (float): Temperature to adjust attention. Default: 0.05
|
| 100 |
+
"""
|
| 101 |
+
|
| 102 |
+
def __init__(self,
|
| 103 |
+
reduction=2,
|
| 104 |
+
use_scale=True,
|
| 105 |
+
mode='embedded_gaussian',
|
| 106 |
+
temperature=0.05,
|
| 107 |
+
**kwargs):
|
| 108 |
+
super(DNLHead, self).__init__(num_convs=2, **kwargs)
|
| 109 |
+
self.reduction = reduction
|
| 110 |
+
self.use_scale = use_scale
|
| 111 |
+
self.mode = mode
|
| 112 |
+
self.temperature = temperature
|
| 113 |
+
self.dnl_block = DisentangledNonLocal2d(
|
| 114 |
+
in_channels=self.channels,
|
| 115 |
+
reduction=self.reduction,
|
| 116 |
+
use_scale=self.use_scale,
|
| 117 |
+
conv_cfg=self.conv_cfg,
|
| 118 |
+
norm_cfg=self.norm_cfg,
|
| 119 |
+
mode=self.mode,
|
| 120 |
+
temperature=self.temperature)
|
| 121 |
+
|
| 122 |
+
def forward(self, inputs):
|
| 123 |
+
"""Forward function."""
|
| 124 |
+
x = self._transform_inputs(inputs)
|
| 125 |
+
output = self.convs[0](x)
|
| 126 |
+
output = self.dnl_block(output)
|
| 127 |
+
output = self.convs[1](output)
|
| 128 |
+
if self.concat_input:
|
| 129 |
+
output = self.conv_cat(torch.cat([x, output], dim=1))
|
| 130 |
+
output = self.cls_seg(output)
|
| 131 |
+
return output
|