Moyao001 commited on
Commit
e397de7
·
verified ·
1 Parent(s): c9de500

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. FateZero-main/data/negative_reg/car/381-2d1e95f0027f7ccad3ec8977b0b3e3719c761ffc.jpg +3 -0
  2. FateZero-main/data/negative_reg/car/431-903df9625885e56c17b57c1b55972367a194a7af.jpg +3 -0
  3. Text2Video-Zero-main/annotator/uniformer/mmcv/cnn/__init__.py +41 -0
  4. Text2Video-Zero-main/annotator/uniformer/mmcv/cnn/alexnet.py +61 -0
  5. Text2Video-Zero-main/annotator/uniformer/mmcv/cnn/bricks/__init__.py +35 -0
  6. Text2Video-Zero-main/annotator/uniformer/mmcv/cnn/bricks/activation.py +92 -0
  7. Text2Video-Zero-main/annotator/uniformer/mmcv/cnn/bricks/context_block.py +125 -0
  8. Text2Video-Zero-main/annotator/uniformer/mmcv/cnn/bricks/conv.py +44 -0
  9. Text2Video-Zero-main/annotator/uniformer/mmcv/cnn/bricks/conv2d_adaptive_padding.py +62 -0
  10. Text2Video-Zero-main/annotator/uniformer/mmcv/cnn/bricks/conv_module.py +206 -0
  11. Text2Video-Zero-main/annotator/uniformer/mmcv/cnn/bricks/conv_ws.py +148 -0
  12. Text2Video-Zero-main/annotator/uniformer/mmcv/cnn/bricks/depthwise_separable_conv_module.py +96 -0
  13. Text2Video-Zero-main/annotator/uniformer/mmcv/cnn/bricks/drop.py +65 -0
  14. Text2Video-Zero-main/annotator/uniformer/mmcv/cnn/bricks/generalized_attention.py +412 -0
  15. Text2Video-Zero-main/annotator/uniformer/mmcv/cnn/bricks/hsigmoid.py +34 -0
  16. Text2Video-Zero-main/annotator/uniformer/mmcv/cnn/bricks/hswish.py +29 -0
  17. Text2Video-Zero-main/annotator/uniformer/mmcv/cnn/bricks/non_local.py +306 -0
  18. Text2Video-Zero-main/annotator/uniformer/mmcv/cnn/bricks/norm.py +144 -0
  19. Text2Video-Zero-main/annotator/uniformer/mmcv/cnn/bricks/padding.py +36 -0
  20. Text2Video-Zero-main/annotator/uniformer/mmcv/cnn/bricks/plugin.py +88 -0
  21. Text2Video-Zero-main/annotator/uniformer/mmcv/cnn/bricks/registry.py +16 -0
  22. Text2Video-Zero-main/annotator/uniformer/mmcv/cnn/bricks/scale.py +21 -0
  23. Text2Video-Zero-main/annotator/uniformer/mmcv/cnn/bricks/swish.py +25 -0
  24. Text2Video-Zero-main/annotator/uniformer/mmcv/cnn/bricks/transformer.py +595 -0
  25. Text2Video-Zero-main/annotator/uniformer/mmcv/cnn/bricks/upsample.py +84 -0
  26. Text2Video-Zero-main/annotator/uniformer/mmcv/cnn/bricks/wrappers.py +180 -0
  27. Text2Video-Zero-main/annotator/uniformer/mmcv/cnn/builder.py +30 -0
  28. Text2Video-Zero-main/annotator/uniformer/mmcv/cnn/resnet.py +316 -0
  29. Text2Video-Zero-main/annotator/uniformer/mmcv/cnn/utils/__init__.py +19 -0
  30. Text2Video-Zero-main/annotator/uniformer/mmcv/cnn/utils/flops_counter.py +599 -0
  31. Text2Video-Zero-main/annotator/uniformer/mmcv/cnn/utils/fuse_conv_bn.py +59 -0
  32. Text2Video-Zero-main/annotator/uniformer/mmcv/cnn/utils/sync_bn.py +59 -0
  33. Text2Video-Zero-main/annotator/uniformer/mmcv/cnn/utils/weight_init.py +684 -0
  34. Text2Video-Zero-main/annotator/uniformer/mmcv/cnn/vgg.py +175 -0
  35. Text2Video-Zero-main/annotator/uniformer/mmcv/ops/__init__.py +81 -0
  36. Text2Video-Zero-main/annotator/uniformer/mmcv/ops/assign_score_withk.py +123 -0
  37. Text2Video-Zero-main/annotator/uniformer/mmcv/ops/ball_query.py +55 -0
  38. Text2Video-Zero-main/annotator/uniformer/mmcv/ops/box_iou_rotated.py +45 -0
  39. Text2Video-Zero-main/annotator/uniformer/mmcv/ops/cc_attention.py +83 -0
  40. Text2Video-Zero-main/annotator/uniformer/mmcv/ops/correlation.py +196 -0
  41. Text2Video-Zero-main/annotator/uniformer/mmcv/ops/deform_conv.py +405 -0
  42. Text2Video-Zero-main/annotator/uniformer/mmcv/ops/gather_points.py +57 -0
  43. Text2Video-Zero-main/annotator/uniformer/mmcv/ops/multi_scale_deform_attn.py +358 -0
  44. Text2Video-Zero-main/annotator/uniformer/mmcv/ops/roi_pool.py +86 -0
  45. Text2Video-Zero-main/annotator/uniformer/mmcv/ops/saconv.py +145 -0
  46. Text2Video-Zero-main/annotator/uniformer/mmcv/runner/__init__.py +47 -0
  47. Text2Video-Zero-main/annotator/uniformer/mmcv/runner/base_module.py +195 -0
  48. Text2Video-Zero-main/annotator/uniformer/mmcv/runner/base_runner.py +542 -0
  49. Text2Video-Zero-main/annotator/uniformer/mmcv/runner/builder.py +24 -0
  50. Text2Video-Zero-main/annotator/uniformer/mmcv/runner/checkpoint.py +707 -0
FateZero-main/data/negative_reg/car/381-2d1e95f0027f7ccad3ec8977b0b3e3719c761ffc.jpg ADDED

Git LFS Details

  • SHA256: bfd17ad83e0b874017d158959edf3f01b47585847bf96965b461d1974abc3254
  • Pointer size: 130 Bytes
  • Size of remote file: 53.1 kB
FateZero-main/data/negative_reg/car/431-903df9625885e56c17b57c1b55972367a194a7af.jpg ADDED

Git LFS Details

  • SHA256: 2058695ef343c739319095cab7ecf5880ab1a2411a09152f284419509cc0aab0
  • Pointer size: 130 Bytes
  • Size of remote file: 46.1 kB
Text2Video-Zero-main/annotator/uniformer/mmcv/cnn/__init__.py ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) OpenMMLab. All rights reserved.
2
+ from .alexnet import AlexNet
3
+ # yapf: disable
4
+ from .bricks import (ACTIVATION_LAYERS, CONV_LAYERS, NORM_LAYERS,
5
+ PADDING_LAYERS, PLUGIN_LAYERS, UPSAMPLE_LAYERS,
6
+ ContextBlock, Conv2d, Conv3d, ConvAWS2d, ConvModule,
7
+ ConvTranspose2d, ConvTranspose3d, ConvWS2d,
8
+ DepthwiseSeparableConvModule, GeneralizedAttention,
9
+ HSigmoid, HSwish, Linear, MaxPool2d, MaxPool3d,
10
+ NonLocal1d, NonLocal2d, NonLocal3d, Scale, Swish,
11
+ build_activation_layer, build_conv_layer,
12
+ build_norm_layer, build_padding_layer, build_plugin_layer,
13
+ build_upsample_layer, conv_ws_2d, is_norm)
14
+ from .builder import MODELS, build_model_from_cfg
15
+ # yapf: enable
16
+ from .resnet import ResNet, make_res_layer
17
+ from .utils import (INITIALIZERS, Caffe2XavierInit, ConstantInit, KaimingInit,
18
+ NormalInit, PretrainedInit, TruncNormalInit, UniformInit,
19
+ XavierInit, bias_init_with_prob, caffe2_xavier_init,
20
+ constant_init, fuse_conv_bn, get_model_complexity_info,
21
+ initialize, kaiming_init, normal_init, trunc_normal_init,
22
+ uniform_init, xavier_init)
23
+ from .vgg import VGG, make_vgg_layer
24
+
25
+ __all__ = [
26
+ 'AlexNet', 'VGG', 'make_vgg_layer', 'ResNet', 'make_res_layer',
27
+ 'constant_init', 'xavier_init', 'normal_init', 'trunc_normal_init',
28
+ 'uniform_init', 'kaiming_init', 'caffe2_xavier_init',
29
+ 'bias_init_with_prob', 'ConvModule', 'build_activation_layer',
30
+ 'build_conv_layer', 'build_norm_layer', 'build_padding_layer',
31
+ 'build_upsample_layer', 'build_plugin_layer', 'is_norm', 'NonLocal1d',
32
+ 'NonLocal2d', 'NonLocal3d', 'ContextBlock', 'HSigmoid', 'Swish', 'HSwish',
33
+ 'GeneralizedAttention', 'ACTIVATION_LAYERS', 'CONV_LAYERS', 'NORM_LAYERS',
34
+ 'PADDING_LAYERS', 'UPSAMPLE_LAYERS', 'PLUGIN_LAYERS', 'Scale',
35
+ 'get_model_complexity_info', 'conv_ws_2d', 'ConvAWS2d', 'ConvWS2d',
36
+ 'fuse_conv_bn', 'DepthwiseSeparableConvModule', 'Linear', 'Conv2d',
37
+ 'ConvTranspose2d', 'MaxPool2d', 'ConvTranspose3d', 'MaxPool3d', 'Conv3d',
38
+ 'initialize', 'INITIALIZERS', 'ConstantInit', 'XavierInit', 'NormalInit',
39
+ 'TruncNormalInit', 'UniformInit', 'KaimingInit', 'PretrainedInit',
40
+ 'Caffe2XavierInit', 'MODELS', 'build_model_from_cfg'
41
+ ]
Text2Video-Zero-main/annotator/uniformer/mmcv/cnn/alexnet.py ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) OpenMMLab. All rights reserved.
2
+ import logging
3
+
4
+ import torch.nn as nn
5
+
6
+
7
+ class AlexNet(nn.Module):
8
+ """AlexNet backbone.
9
+
10
+ Args:
11
+ num_classes (int): number of classes for classification.
12
+ """
13
+
14
+ def __init__(self, num_classes=-1):
15
+ super(AlexNet, self).__init__()
16
+ self.num_classes = num_classes
17
+ self.features = nn.Sequential(
18
+ nn.Conv2d(3, 64, kernel_size=11, stride=4, padding=2),
19
+ nn.ReLU(inplace=True),
20
+ nn.MaxPool2d(kernel_size=3, stride=2),
21
+ nn.Conv2d(64, 192, kernel_size=5, padding=2),
22
+ nn.ReLU(inplace=True),
23
+ nn.MaxPool2d(kernel_size=3, stride=2),
24
+ nn.Conv2d(192, 384, kernel_size=3, padding=1),
25
+ nn.ReLU(inplace=True),
26
+ nn.Conv2d(384, 256, kernel_size=3, padding=1),
27
+ nn.ReLU(inplace=True),
28
+ nn.Conv2d(256, 256, kernel_size=3, padding=1),
29
+ nn.ReLU(inplace=True),
30
+ nn.MaxPool2d(kernel_size=3, stride=2),
31
+ )
32
+ if self.num_classes > 0:
33
+ self.classifier = nn.Sequential(
34
+ nn.Dropout(),
35
+ nn.Linear(256 * 6 * 6, 4096),
36
+ nn.ReLU(inplace=True),
37
+ nn.Dropout(),
38
+ nn.Linear(4096, 4096),
39
+ nn.ReLU(inplace=True),
40
+ nn.Linear(4096, num_classes),
41
+ )
42
+
43
+ def init_weights(self, pretrained=None):
44
+ if isinstance(pretrained, str):
45
+ logger = logging.getLogger()
46
+ from ..runner import load_checkpoint
47
+ load_checkpoint(self, pretrained, strict=False, logger=logger)
48
+ elif pretrained is None:
49
+ # use default initializer
50
+ pass
51
+ else:
52
+ raise TypeError('pretrained must be a str or None')
53
+
54
+ def forward(self, x):
55
+
56
+ x = self.features(x)
57
+ if self.num_classes > 0:
58
+ x = x.view(x.size(0), 256 * 6 * 6)
59
+ x = self.classifier(x)
60
+
61
+ return x
Text2Video-Zero-main/annotator/uniformer/mmcv/cnn/bricks/__init__.py ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) OpenMMLab. All rights reserved.
2
+ from .activation import build_activation_layer
3
+ from .context_block import ContextBlock
4
+ from .conv import build_conv_layer
5
+ from .conv2d_adaptive_padding import Conv2dAdaptivePadding
6
+ from .conv_module import ConvModule
7
+ from .conv_ws import ConvAWS2d, ConvWS2d, conv_ws_2d
8
+ from .depthwise_separable_conv_module import DepthwiseSeparableConvModule
9
+ from .drop import Dropout, DropPath
10
+ from .generalized_attention import GeneralizedAttention
11
+ from .hsigmoid import HSigmoid
12
+ from .hswish import HSwish
13
+ from .non_local import NonLocal1d, NonLocal2d, NonLocal3d
14
+ from .norm import build_norm_layer, is_norm
15
+ from .padding import build_padding_layer
16
+ from .plugin import build_plugin_layer
17
+ from .registry import (ACTIVATION_LAYERS, CONV_LAYERS, NORM_LAYERS,
18
+ PADDING_LAYERS, PLUGIN_LAYERS, UPSAMPLE_LAYERS)
19
+ from .scale import Scale
20
+ from .swish import Swish
21
+ from .upsample import build_upsample_layer
22
+ from .wrappers import (Conv2d, Conv3d, ConvTranspose2d, ConvTranspose3d,
23
+ Linear, MaxPool2d, MaxPool3d)
24
+
25
+ __all__ = [
26
+ 'ConvModule', 'build_activation_layer', 'build_conv_layer',
27
+ 'build_norm_layer', 'build_padding_layer', 'build_upsample_layer',
28
+ 'build_plugin_layer', 'is_norm', 'HSigmoid', 'HSwish', 'NonLocal1d',
29
+ 'NonLocal2d', 'NonLocal3d', 'ContextBlock', 'GeneralizedAttention',
30
+ 'ACTIVATION_LAYERS', 'CONV_LAYERS', 'NORM_LAYERS', 'PADDING_LAYERS',
31
+ 'UPSAMPLE_LAYERS', 'PLUGIN_LAYERS', 'Scale', 'ConvAWS2d', 'ConvWS2d',
32
+ 'conv_ws_2d', 'DepthwiseSeparableConvModule', 'Swish', 'Linear',
33
+ 'Conv2dAdaptivePadding', 'Conv2d', 'ConvTranspose2d', 'MaxPool2d',
34
+ 'ConvTranspose3d', 'MaxPool3d', 'Conv3d', 'Dropout', 'DropPath'
35
+ ]
Text2Video-Zero-main/annotator/uniformer/mmcv/cnn/bricks/activation.py ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) OpenMMLab. All rights reserved.
2
+ import torch
3
+ import torch.nn as nn
4
+ import torch.nn.functional as F
5
+
6
+ from annotator.uniformer.mmcv.utils import TORCH_VERSION, build_from_cfg, digit_version
7
+ from .registry import ACTIVATION_LAYERS
8
+
9
+ for module in [
10
+ nn.ReLU, nn.LeakyReLU, nn.PReLU, nn.RReLU, nn.ReLU6, nn.ELU,
11
+ nn.Sigmoid, nn.Tanh
12
+ ]:
13
+ ACTIVATION_LAYERS.register_module(module=module)
14
+
15
+
16
+ @ACTIVATION_LAYERS.register_module(name='Clip')
17
+ @ACTIVATION_LAYERS.register_module()
18
+ class Clamp(nn.Module):
19
+ """Clamp activation layer.
20
+
21
+ This activation function is to clamp the feature map value within
22
+ :math:`[min, max]`. More details can be found in ``torch.clamp()``.
23
+
24
+ Args:
25
+ min (Number | optional): Lower-bound of the range to be clamped to.
26
+ Default to -1.
27
+ max (Number | optional): Upper-bound of the range to be clamped to.
28
+ Default to 1.
29
+ """
30
+
31
+ def __init__(self, min=-1., max=1.):
32
+ super(Clamp, self).__init__()
33
+ self.min = min
34
+ self.max = max
35
+
36
+ def forward(self, x):
37
+ """Forward function.
38
+
39
+ Args:
40
+ x (torch.Tensor): The input tensor.
41
+
42
+ Returns:
43
+ torch.Tensor: Clamped tensor.
44
+ """
45
+ return torch.clamp(x, min=self.min, max=self.max)
46
+
47
+
48
+ class GELU(nn.Module):
49
+ r"""Applies the Gaussian Error Linear Units function:
50
+
51
+ .. math::
52
+ \text{GELU}(x) = x * \Phi(x)
53
+ where :math:`\Phi(x)` is the Cumulative Distribution Function for
54
+ Gaussian Distribution.
55
+
56
+ Shape:
57
+ - Input: :math:`(N, *)` where `*` means, any number of additional
58
+ dimensions
59
+ - Output: :math:`(N, *)`, same shape as the input
60
+
61
+ .. image:: scripts/activation_images/GELU.png
62
+
63
+ Examples::
64
+
65
+ >>> m = nn.GELU()
66
+ >>> input = torch.randn(2)
67
+ >>> output = m(input)
68
+ """
69
+
70
+ def forward(self, input):
71
+ return F.gelu(input)
72
+
73
+
74
+ if (TORCH_VERSION == 'parrots'
75
+ or digit_version(TORCH_VERSION) < digit_version('1.4')):
76
+ ACTIVATION_LAYERS.register_module(module=GELU)
77
+ else:
78
+ ACTIVATION_LAYERS.register_module(module=nn.GELU)
79
+
80
+
81
+ def build_activation_layer(cfg):
82
+ """Build activation layer.
83
+
84
+ Args:
85
+ cfg (dict): The activation layer config, which should contain:
86
+ - type (str): Layer type.
87
+ - layer args: Args needed to instantiate an activation layer.
88
+
89
+ Returns:
90
+ nn.Module: Created activation layer.
91
+ """
92
+ return build_from_cfg(cfg, ACTIVATION_LAYERS)
Text2Video-Zero-main/annotator/uniformer/mmcv/cnn/bricks/context_block.py ADDED
@@ -0,0 +1,125 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) OpenMMLab. All rights reserved.
2
+ import torch
3
+ from torch import nn
4
+
5
+ from ..utils import constant_init, kaiming_init
6
+ from .registry import PLUGIN_LAYERS
7
+
8
+
9
+ def last_zero_init(m):
10
+ if isinstance(m, nn.Sequential):
11
+ constant_init(m[-1], val=0)
12
+ else:
13
+ constant_init(m, val=0)
14
+
15
+
16
+ @PLUGIN_LAYERS.register_module()
17
+ class ContextBlock(nn.Module):
18
+ """ContextBlock module in GCNet.
19
+
20
+ See 'GCNet: Non-local Networks Meet Squeeze-Excitation Networks and Beyond'
21
+ (https://arxiv.org/abs/1904.11492) for details.
22
+
23
+ Args:
24
+ in_channels (int): Channels of the input feature map.
25
+ ratio (float): Ratio of channels of transform bottleneck
26
+ pooling_type (str): Pooling method for context modeling.
27
+ Options are 'att' and 'avg', stand for attention pooling and
28
+ average pooling respectively. Default: 'att'.
29
+ fusion_types (Sequence[str]): Fusion method for feature fusion,
30
+ Options are 'channels_add', 'channel_mul', stand for channelwise
31
+ addition and multiplication respectively. Default: ('channel_add',)
32
+ """
33
+
34
+ _abbr_ = 'context_block'
35
+
36
+ def __init__(self,
37
+ in_channels,
38
+ ratio,
39
+ pooling_type='att',
40
+ fusion_types=('channel_add', )):
41
+ super(ContextBlock, self).__init__()
42
+ assert pooling_type in ['avg', 'att']
43
+ assert isinstance(fusion_types, (list, tuple))
44
+ valid_fusion_types = ['channel_add', 'channel_mul']
45
+ assert all([f in valid_fusion_types for f in fusion_types])
46
+ assert len(fusion_types) > 0, 'at least one fusion should be used'
47
+ self.in_channels = in_channels
48
+ self.ratio = ratio
49
+ self.planes = int(in_channels * ratio)
50
+ self.pooling_type = pooling_type
51
+ self.fusion_types = fusion_types
52
+ if pooling_type == 'att':
53
+ self.conv_mask = nn.Conv2d(in_channels, 1, kernel_size=1)
54
+ self.softmax = nn.Softmax(dim=2)
55
+ else:
56
+ self.avg_pool = nn.AdaptiveAvgPool2d(1)
57
+ if 'channel_add' in fusion_types:
58
+ self.channel_add_conv = nn.Sequential(
59
+ nn.Conv2d(self.in_channels, self.planes, kernel_size=1),
60
+ nn.LayerNorm([self.planes, 1, 1]),
61
+ nn.ReLU(inplace=True), # yapf: disable
62
+ nn.Conv2d(self.planes, self.in_channels, kernel_size=1))
63
+ else:
64
+ self.channel_add_conv = None
65
+ if 'channel_mul' in fusion_types:
66
+ self.channel_mul_conv = nn.Sequential(
67
+ nn.Conv2d(self.in_channels, self.planes, kernel_size=1),
68
+ nn.LayerNorm([self.planes, 1, 1]),
69
+ nn.ReLU(inplace=True), # yapf: disable
70
+ nn.Conv2d(self.planes, self.in_channels, kernel_size=1))
71
+ else:
72
+ self.channel_mul_conv = None
73
+ self.reset_parameters()
74
+
75
+ def reset_parameters(self):
76
+ if self.pooling_type == 'att':
77
+ kaiming_init(self.conv_mask, mode='fan_in')
78
+ self.conv_mask.inited = True
79
+
80
+ if self.channel_add_conv is not None:
81
+ last_zero_init(self.channel_add_conv)
82
+ if self.channel_mul_conv is not None:
83
+ last_zero_init(self.channel_mul_conv)
84
+
85
+ def spatial_pool(self, x):
86
+ batch, channel, height, width = x.size()
87
+ if self.pooling_type == 'att':
88
+ input_x = x
89
+ # [N, C, H * W]
90
+ input_x = input_x.view(batch, channel, height * width)
91
+ # [N, 1, C, H * W]
92
+ input_x = input_x.unsqueeze(1)
93
+ # [N, 1, H, W]
94
+ context_mask = self.conv_mask(x)
95
+ # [N, 1, H * W]
96
+ context_mask = context_mask.view(batch, 1, height * width)
97
+ # [N, 1, H * W]
98
+ context_mask = self.softmax(context_mask)
99
+ # [N, 1, H * W, 1]
100
+ context_mask = context_mask.unsqueeze(-1)
101
+ # [N, 1, C, 1]
102
+ context = torch.matmul(input_x, context_mask)
103
+ # [N, C, 1, 1]
104
+ context = context.view(batch, channel, 1, 1)
105
+ else:
106
+ # [N, C, 1, 1]
107
+ context = self.avg_pool(x)
108
+
109
+ return context
110
+
111
+ def forward(self, x):
112
+ # [N, C, 1, 1]
113
+ context = self.spatial_pool(x)
114
+
115
+ out = x
116
+ if self.channel_mul_conv is not None:
117
+ # [N, C, 1, 1]
118
+ channel_mul_term = torch.sigmoid(self.channel_mul_conv(context))
119
+ out = out * channel_mul_term
120
+ if self.channel_add_conv is not None:
121
+ # [N, C, 1, 1]
122
+ channel_add_term = self.channel_add_conv(context)
123
+ out = out + channel_add_term
124
+
125
+ return out
Text2Video-Zero-main/annotator/uniformer/mmcv/cnn/bricks/conv.py ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) OpenMMLab. All rights reserved.
2
+ from torch import nn
3
+
4
+ from .registry import CONV_LAYERS
5
+
6
+ CONV_LAYERS.register_module('Conv1d', module=nn.Conv1d)
7
+ CONV_LAYERS.register_module('Conv2d', module=nn.Conv2d)
8
+ CONV_LAYERS.register_module('Conv3d', module=nn.Conv3d)
9
+ CONV_LAYERS.register_module('Conv', module=nn.Conv2d)
10
+
11
+
12
+ def build_conv_layer(cfg, *args, **kwargs):
13
+ """Build convolution layer.
14
+
15
+ Args:
16
+ cfg (None or dict): The conv layer config, which should contain:
17
+ - type (str): Layer type.
18
+ - layer args: Args needed to instantiate an conv layer.
19
+ args (argument list): Arguments passed to the `__init__`
20
+ method of the corresponding conv layer.
21
+ kwargs (keyword arguments): Keyword arguments passed to the `__init__`
22
+ method of the corresponding conv layer.
23
+
24
+ Returns:
25
+ nn.Module: Created conv layer.
26
+ """
27
+ if cfg is None:
28
+ cfg_ = dict(type='Conv2d')
29
+ else:
30
+ if not isinstance(cfg, dict):
31
+ raise TypeError('cfg must be a dict')
32
+ if 'type' not in cfg:
33
+ raise KeyError('the cfg dict must contain the key "type"')
34
+ cfg_ = cfg.copy()
35
+
36
+ layer_type = cfg_.pop('type')
37
+ if layer_type not in CONV_LAYERS:
38
+ raise KeyError(f'Unrecognized norm type {layer_type}')
39
+ else:
40
+ conv_layer = CONV_LAYERS.get(layer_type)
41
+
42
+ layer = conv_layer(*args, **kwargs, **cfg_)
43
+
44
+ return layer
Text2Video-Zero-main/annotator/uniformer/mmcv/cnn/bricks/conv2d_adaptive_padding.py ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) OpenMMLab. All rights reserved.
2
+ import math
3
+
4
+ from torch import nn
5
+ from torch.nn import functional as F
6
+
7
+ from .registry import CONV_LAYERS
8
+
9
+
10
+ @CONV_LAYERS.register_module()
11
+ class Conv2dAdaptivePadding(nn.Conv2d):
12
+ """Implementation of 2D convolution in tensorflow with `padding` as "same",
13
+ which applies padding to input (if needed) so that input image gets fully
14
+ covered by filter and stride you specified. For stride 1, this will ensure
15
+ that output image size is same as input. For stride of 2, output dimensions
16
+ will be half, for example.
17
+
18
+ Args:
19
+ in_channels (int): Number of channels in the input image
20
+ out_channels (int): Number of channels produced by the convolution
21
+ kernel_size (int or tuple): Size of the convolving kernel
22
+ stride (int or tuple, optional): Stride of the convolution. Default: 1
23
+ padding (int or tuple, optional): Zero-padding added to both sides of
24
+ the input. Default: 0
25
+ dilation (int or tuple, optional): Spacing between kernel elements.
26
+ Default: 1
27
+ groups (int, optional): Number of blocked connections from input
28
+ channels to output channels. Default: 1
29
+ bias (bool, optional): If ``True``, adds a learnable bias to the
30
+ output. Default: ``True``
31
+ """
32
+
33
+ def __init__(self,
34
+ in_channels,
35
+ out_channels,
36
+ kernel_size,
37
+ stride=1,
38
+ padding=0,
39
+ dilation=1,
40
+ groups=1,
41
+ bias=True):
42
+ super().__init__(in_channels, out_channels, kernel_size, stride, 0,
43
+ dilation, groups, bias)
44
+
45
+ def forward(self, x):
46
+ img_h, img_w = x.size()[-2:]
47
+ kernel_h, kernel_w = self.weight.size()[-2:]
48
+ stride_h, stride_w = self.stride
49
+ output_h = math.ceil(img_h / stride_h)
50
+ output_w = math.ceil(img_w / stride_w)
51
+ pad_h = (
52
+ max((output_h - 1) * self.stride[0] +
53
+ (kernel_h - 1) * self.dilation[0] + 1 - img_h, 0))
54
+ pad_w = (
55
+ max((output_w - 1) * self.stride[1] +
56
+ (kernel_w - 1) * self.dilation[1] + 1 - img_w, 0))
57
+ if pad_h > 0 or pad_w > 0:
58
+ x = F.pad(x, [
59
+ pad_w // 2, pad_w - pad_w // 2, pad_h // 2, pad_h - pad_h // 2
60
+ ])
61
+ return F.conv2d(x, self.weight, self.bias, self.stride, self.padding,
62
+ self.dilation, self.groups)
Text2Video-Zero-main/annotator/uniformer/mmcv/cnn/bricks/conv_module.py ADDED
@@ -0,0 +1,206 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) OpenMMLab. All rights reserved.
2
+ import warnings
3
+
4
+ import torch.nn as nn
5
+
6
+ from annotator.uniformer.mmcv.utils import _BatchNorm, _InstanceNorm
7
+ from ..utils import constant_init, kaiming_init
8
+ from .activation import build_activation_layer
9
+ from .conv import build_conv_layer
10
+ from .norm import build_norm_layer
11
+ from .padding import build_padding_layer
12
+ from .registry import PLUGIN_LAYERS
13
+
14
+
15
+ @PLUGIN_LAYERS.register_module()
16
+ class ConvModule(nn.Module):
17
+ """A conv block that bundles conv/norm/activation layers.
18
+
19
+ This block simplifies the usage of convolution layers, which are commonly
20
+ used with a norm layer (e.g., BatchNorm) and activation layer (e.g., ReLU).
21
+ It is based upon three build methods: `build_conv_layer()`,
22
+ `build_norm_layer()` and `build_activation_layer()`.
23
+
24
+ Besides, we add some additional features in this module.
25
+ 1. Automatically set `bias` of the conv layer.
26
+ 2. Spectral norm is supported.
27
+ 3. More padding modes are supported. Before PyTorch 1.5, nn.Conv2d only
28
+ supports zero and circular padding, and we add "reflect" padding mode.
29
+
30
+ Args:
31
+ in_channels (int): Number of channels in the input feature map.
32
+ Same as that in ``nn._ConvNd``.
33
+ out_channels (int): Number of channels produced by the convolution.
34
+ Same as that in ``nn._ConvNd``.
35
+ kernel_size (int | tuple[int]): Size of the convolving kernel.
36
+ Same as that in ``nn._ConvNd``.
37
+ stride (int | tuple[int]): Stride of the convolution.
38
+ Same as that in ``nn._ConvNd``.
39
+ padding (int | tuple[int]): Zero-padding added to both sides of
40
+ the input. Same as that in ``nn._ConvNd``.
41
+ dilation (int | tuple[int]): Spacing between kernel elements.
42
+ Same as that in ``nn._ConvNd``.
43
+ groups (int): Number of blocked connections from input channels to
44
+ output channels. Same as that in ``nn._ConvNd``.
45
+ bias (bool | str): If specified as `auto`, it will be decided by the
46
+ norm_cfg. Bias will be set as True if `norm_cfg` is None, otherwise
47
+ False. Default: "auto".
48
+ conv_cfg (dict): Config dict for convolution layer. Default: None,
49
+ which means using conv2d.
50
+ norm_cfg (dict): Config dict for normalization layer. Default: None.
51
+ act_cfg (dict): Config dict for activation layer.
52
+ Default: dict(type='ReLU').
53
+ inplace (bool): Whether to use inplace mode for activation.
54
+ Default: True.
55
+ with_spectral_norm (bool): Whether use spectral norm in conv module.
56
+ Default: False.
57
+ padding_mode (str): If the `padding_mode` has not been supported by
58
+ current `Conv2d` in PyTorch, we will use our own padding layer
59
+ instead. Currently, we support ['zeros', 'circular'] with official
60
+ implementation and ['reflect'] with our own implementation.
61
+ Default: 'zeros'.
62
+ order (tuple[str]): The order of conv/norm/activation layers. It is a
63
+ sequence of "conv", "norm" and "act". Common examples are
64
+ ("conv", "norm", "act") and ("act", "conv", "norm").
65
+ Default: ('conv', 'norm', 'act').
66
+ """
67
+
68
+ _abbr_ = 'conv_block'
69
+
70
+ def __init__(self,
71
+ in_channels,
72
+ out_channels,
73
+ kernel_size,
74
+ stride=1,
75
+ padding=0,
76
+ dilation=1,
77
+ groups=1,
78
+ bias='auto',
79
+ conv_cfg=None,
80
+ norm_cfg=None,
81
+ act_cfg=dict(type='ReLU'),
82
+ inplace=True,
83
+ with_spectral_norm=False,
84
+ padding_mode='zeros',
85
+ order=('conv', 'norm', 'act')):
86
+ super(ConvModule, self).__init__()
87
+ assert conv_cfg is None or isinstance(conv_cfg, dict)
88
+ assert norm_cfg is None or isinstance(norm_cfg, dict)
89
+ assert act_cfg is None or isinstance(act_cfg, dict)
90
+ official_padding_mode = ['zeros', 'circular']
91
+ self.conv_cfg = conv_cfg
92
+ self.norm_cfg = norm_cfg
93
+ self.act_cfg = act_cfg
94
+ self.inplace = inplace
95
+ self.with_spectral_norm = with_spectral_norm
96
+ self.with_explicit_padding = padding_mode not in official_padding_mode
97
+ self.order = order
98
+ assert isinstance(self.order, tuple) and len(self.order) == 3
99
+ assert set(order) == set(['conv', 'norm', 'act'])
100
+
101
+ self.with_norm = norm_cfg is not None
102
+ self.with_activation = act_cfg is not None
103
+ # if the conv layer is before a norm layer, bias is unnecessary.
104
+ if bias == 'auto':
105
+ bias = not self.with_norm
106
+ self.with_bias = bias
107
+
108
+ if self.with_explicit_padding:
109
+ pad_cfg = dict(type=padding_mode)
110
+ self.padding_layer = build_padding_layer(pad_cfg, padding)
111
+
112
+ # reset padding to 0 for conv module
113
+ conv_padding = 0 if self.with_explicit_padding else padding
114
+ # build convolution layer
115
+ self.conv = build_conv_layer(
116
+ conv_cfg,
117
+ in_channels,
118
+ out_channels,
119
+ kernel_size,
120
+ stride=stride,
121
+ padding=conv_padding,
122
+ dilation=dilation,
123
+ groups=groups,
124
+ bias=bias)
125
+ # export the attributes of self.conv to a higher level for convenience
126
+ self.in_channels = self.conv.in_channels
127
+ self.out_channels = self.conv.out_channels
128
+ self.kernel_size = self.conv.kernel_size
129
+ self.stride = self.conv.stride
130
+ self.padding = padding
131
+ self.dilation = self.conv.dilation
132
+ self.transposed = self.conv.transposed
133
+ self.output_padding = self.conv.output_padding
134
+ self.groups = self.conv.groups
135
+
136
+ if self.with_spectral_norm:
137
+ self.conv = nn.utils.spectral_norm(self.conv)
138
+
139
+ # build normalization layers
140
+ if self.with_norm:
141
+ # norm layer is after conv layer
142
+ if order.index('norm') > order.index('conv'):
143
+ norm_channels = out_channels
144
+ else:
145
+ norm_channels = in_channels
146
+ self.norm_name, norm = build_norm_layer(norm_cfg, norm_channels)
147
+ self.add_module(self.norm_name, norm)
148
+ if self.with_bias:
149
+ if isinstance(norm, (_BatchNorm, _InstanceNorm)):
150
+ warnings.warn(
151
+ 'Unnecessary conv bias before batch/instance norm')
152
+ else:
153
+ self.norm_name = None
154
+
155
+ # build activation layer
156
+ if self.with_activation:
157
+ act_cfg_ = act_cfg.copy()
158
+ # nn.Tanh has no 'inplace' argument
159
+ if act_cfg_['type'] not in [
160
+ 'Tanh', 'PReLU', 'Sigmoid', 'HSigmoid', 'Swish'
161
+ ]:
162
+ act_cfg_.setdefault('inplace', inplace)
163
+ self.activate = build_activation_layer(act_cfg_)
164
+
165
+ # Use msra init by default
166
+ self.init_weights()
167
+
168
+ @property
169
+ def norm(self):
170
+ if self.norm_name:
171
+ return getattr(self, self.norm_name)
172
+ else:
173
+ return None
174
+
175
+ def init_weights(self):
176
+ # 1. It is mainly for customized conv layers with their own
177
+ # initialization manners by calling their own ``init_weights()``,
178
+ # and we do not want ConvModule to override the initialization.
179
+ # 2. For customized conv layers without their own initialization
180
+ # manners (that is, they don't have their own ``init_weights()``)
181
+ # and PyTorch's conv layers, they will be initialized by
182
+ # this method with default ``kaiming_init``.
183
+ # Note: For PyTorch's conv layers, they will be overwritten by our
184
+ # initialization implementation using default ``kaiming_init``.
185
+ if not hasattr(self.conv, 'init_weights'):
186
+ if self.with_activation and self.act_cfg['type'] == 'LeakyReLU':
187
+ nonlinearity = 'leaky_relu'
188
+ a = self.act_cfg.get('negative_slope', 0.01)
189
+ else:
190
+ nonlinearity = 'relu'
191
+ a = 0
192
+ kaiming_init(self.conv, a=a, nonlinearity=nonlinearity)
193
+ if self.with_norm:
194
+ constant_init(self.norm, 1, bias=0)
195
+
196
+ def forward(self, x, activate=True, norm=True):
197
+ for layer in self.order:
198
+ if layer == 'conv':
199
+ if self.with_explicit_padding:
200
+ x = self.padding_layer(x)
201
+ x = self.conv(x)
202
+ elif layer == 'norm' and norm and self.with_norm:
203
+ x = self.norm(x)
204
+ elif layer == 'act' and activate and self.with_activation:
205
+ x = self.activate(x)
206
+ return x
Text2Video-Zero-main/annotator/uniformer/mmcv/cnn/bricks/conv_ws.py ADDED
@@ -0,0 +1,148 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) OpenMMLab. All rights reserved.
2
+ import torch
3
+ import torch.nn as nn
4
+ import torch.nn.functional as F
5
+
6
+ from .registry import CONV_LAYERS
7
+
8
+
9
+ def conv_ws_2d(input,
10
+ weight,
11
+ bias=None,
12
+ stride=1,
13
+ padding=0,
14
+ dilation=1,
15
+ groups=1,
16
+ eps=1e-5):
17
+ c_in = weight.size(0)
18
+ weight_flat = weight.view(c_in, -1)
19
+ mean = weight_flat.mean(dim=1, keepdim=True).view(c_in, 1, 1, 1)
20
+ std = weight_flat.std(dim=1, keepdim=True).view(c_in, 1, 1, 1)
21
+ weight = (weight - mean) / (std + eps)
22
+ return F.conv2d(input, weight, bias, stride, padding, dilation, groups)
23
+
24
+
25
+ @CONV_LAYERS.register_module('ConvWS')
26
+ class ConvWS2d(nn.Conv2d):
27
+
28
+ def __init__(self,
29
+ in_channels,
30
+ out_channels,
31
+ kernel_size,
32
+ stride=1,
33
+ padding=0,
34
+ dilation=1,
35
+ groups=1,
36
+ bias=True,
37
+ eps=1e-5):
38
+ super(ConvWS2d, self).__init__(
39
+ in_channels,
40
+ out_channels,
41
+ kernel_size,
42
+ stride=stride,
43
+ padding=padding,
44
+ dilation=dilation,
45
+ groups=groups,
46
+ bias=bias)
47
+ self.eps = eps
48
+
49
+ def forward(self, x):
50
+ return conv_ws_2d(x, self.weight, self.bias, self.stride, self.padding,
51
+ self.dilation, self.groups, self.eps)
52
+
53
+
54
+ @CONV_LAYERS.register_module(name='ConvAWS')
55
+ class ConvAWS2d(nn.Conv2d):
56
+ """AWS (Adaptive Weight Standardization)
57
+
58
+ This is a variant of Weight Standardization
59
+ (https://arxiv.org/pdf/1903.10520.pdf)
60
+ It is used in DetectoRS to avoid NaN
61
+ (https://arxiv.org/pdf/2006.02334.pdf)
62
+
63
+ Args:
64
+ in_channels (int): Number of channels in the input image
65
+ out_channels (int): Number of channels produced by the convolution
66
+ kernel_size (int or tuple): Size of the conv kernel
67
+ stride (int or tuple, optional): Stride of the convolution. Default: 1
68
+ padding (int or tuple, optional): Zero-padding added to both sides of
69
+ the input. Default: 0
70
+ dilation (int or tuple, optional): Spacing between kernel elements.
71
+ Default: 1
72
+ groups (int, optional): Number of blocked connections from input
73
+ channels to output channels. Default: 1
74
+ bias (bool, optional): If set True, adds a learnable bias to the
75
+ output. Default: True
76
+ """
77
+
78
+ def __init__(self,
79
+ in_channels,
80
+ out_channels,
81
+ kernel_size,
82
+ stride=1,
83
+ padding=0,
84
+ dilation=1,
85
+ groups=1,
86
+ bias=True):
87
+ super().__init__(
88
+ in_channels,
89
+ out_channels,
90
+ kernel_size,
91
+ stride=stride,
92
+ padding=padding,
93
+ dilation=dilation,
94
+ groups=groups,
95
+ bias=bias)
96
+ self.register_buffer('weight_gamma',
97
+ torch.ones(self.out_channels, 1, 1, 1))
98
+ self.register_buffer('weight_beta',
99
+ torch.zeros(self.out_channels, 1, 1, 1))
100
+
101
+ def _get_weight(self, weight):
102
+ weight_flat = weight.view(weight.size(0), -1)
103
+ mean = weight_flat.mean(dim=1).view(-1, 1, 1, 1)
104
+ std = torch.sqrt(weight_flat.var(dim=1) + 1e-5).view(-1, 1, 1, 1)
105
+ weight = (weight - mean) / std
106
+ weight = self.weight_gamma * weight + self.weight_beta
107
+ return weight
108
+
109
+ def forward(self, x):
110
+ weight = self._get_weight(self.weight)
111
+ return F.conv2d(x, weight, self.bias, self.stride, self.padding,
112
+ self.dilation, self.groups)
113
+
114
+ def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict,
115
+ missing_keys, unexpected_keys, error_msgs):
116
+ """Override default load function.
117
+
118
+ AWS overrides the function _load_from_state_dict to recover
119
+ weight_gamma and weight_beta if they are missing. If weight_gamma and
120
+ weight_beta are found in the checkpoint, this function will return
121
+ after super()._load_from_state_dict. Otherwise, it will compute the
122
+ mean and std of the pretrained weights and store them in weight_beta
123
+ and weight_gamma.
124
+ """
125
+
126
+ self.weight_gamma.data.fill_(-1)
127
+ local_missing_keys = []
128
+ super()._load_from_state_dict(state_dict, prefix, local_metadata,
129
+ strict, local_missing_keys,
130
+ unexpected_keys, error_msgs)
131
+ if self.weight_gamma.data.mean() > 0:
132
+ for k in local_missing_keys:
133
+ missing_keys.append(k)
134
+ return
135
+ weight = self.weight.data
136
+ weight_flat = weight.view(weight.size(0), -1)
137
+ mean = weight_flat.mean(dim=1).view(-1, 1, 1, 1)
138
+ std = torch.sqrt(weight_flat.var(dim=1) + 1e-5).view(-1, 1, 1, 1)
139
+ self.weight_beta.data.copy_(mean)
140
+ self.weight_gamma.data.copy_(std)
141
+ missing_gamma_beta = [
142
+ k for k in local_missing_keys
143
+ if k.endswith('weight_gamma') or k.endswith('weight_beta')
144
+ ]
145
+ for k in missing_gamma_beta:
146
+ local_missing_keys.remove(k)
147
+ for k in local_missing_keys:
148
+ missing_keys.append(k)
Text2Video-Zero-main/annotator/uniformer/mmcv/cnn/bricks/depthwise_separable_conv_module.py ADDED
@@ -0,0 +1,96 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) OpenMMLab. All rights reserved.
2
+ import torch.nn as nn
3
+
4
+ from .conv_module import ConvModule
5
+
6
+
7
+ class DepthwiseSeparableConvModule(nn.Module):
8
+ """Depthwise separable convolution module.
9
+
10
+ See https://arxiv.org/pdf/1704.04861.pdf for details.
11
+
12
+ This module can replace a ConvModule with the conv block replaced by two
13
+ conv block: depthwise conv block and pointwise conv block. The depthwise
14
+ conv block contains depthwise-conv/norm/activation layers. The pointwise
15
+ conv block contains pointwise-conv/norm/activation layers. It should be
16
+ noted that there will be norm/activation layer in the depthwise conv block
17
+ if `norm_cfg` and `act_cfg` are specified.
18
+
19
+ Args:
20
+ in_channels (int): Number of channels in the input feature map.
21
+ Same as that in ``nn._ConvNd``.
22
+ out_channels (int): Number of channels produced by the convolution.
23
+ Same as that in ``nn._ConvNd``.
24
+ kernel_size (int | tuple[int]): Size of the convolving kernel.
25
+ Same as that in ``nn._ConvNd``.
26
+ stride (int | tuple[int]): Stride of the convolution.
27
+ Same as that in ``nn._ConvNd``. Default: 1.
28
+ padding (int | tuple[int]): Zero-padding added to both sides of
29
+ the input. Same as that in ``nn._ConvNd``. Default: 0.
30
+ dilation (int | tuple[int]): Spacing between kernel elements.
31
+ Same as that in ``nn._ConvNd``. Default: 1.
32
+ norm_cfg (dict): Default norm config for both depthwise ConvModule and
33
+ pointwise ConvModule. Default: None.
34
+ act_cfg (dict): Default activation config for both depthwise ConvModule
35
+ and pointwise ConvModule. Default: dict(type='ReLU').
36
+ dw_norm_cfg (dict): Norm config of depthwise ConvModule. If it is
37
+ 'default', it will be the same as `norm_cfg`. Default: 'default'.
38
+ dw_act_cfg (dict): Activation config of depthwise ConvModule. If it is
39
+ 'default', it will be the same as `act_cfg`. Default: 'default'.
40
+ pw_norm_cfg (dict): Norm config of pointwise ConvModule. If it is
41
+ 'default', it will be the same as `norm_cfg`. Default: 'default'.
42
+ pw_act_cfg (dict): Activation config of pointwise ConvModule. If it is
43
+ 'default', it will be the same as `act_cfg`. Default: 'default'.
44
+ kwargs (optional): Other shared arguments for depthwise and pointwise
45
+ ConvModule. See ConvModule for ref.
46
+ """
47
+
48
+ def __init__(self,
49
+ in_channels,
50
+ out_channels,
51
+ kernel_size,
52
+ stride=1,
53
+ padding=0,
54
+ dilation=1,
55
+ norm_cfg=None,
56
+ act_cfg=dict(type='ReLU'),
57
+ dw_norm_cfg='default',
58
+ dw_act_cfg='default',
59
+ pw_norm_cfg='default',
60
+ pw_act_cfg='default',
61
+ **kwargs):
62
+ super(DepthwiseSeparableConvModule, self).__init__()
63
+ assert 'groups' not in kwargs, 'groups should not be specified'
64
+
65
+ # if norm/activation config of depthwise/pointwise ConvModule is not
66
+ # specified, use default config.
67
+ dw_norm_cfg = dw_norm_cfg if dw_norm_cfg != 'default' else norm_cfg
68
+ dw_act_cfg = dw_act_cfg if dw_act_cfg != 'default' else act_cfg
69
+ pw_norm_cfg = pw_norm_cfg if pw_norm_cfg != 'default' else norm_cfg
70
+ pw_act_cfg = pw_act_cfg if pw_act_cfg != 'default' else act_cfg
71
+
72
+ # depthwise convolution
73
+ self.depthwise_conv = ConvModule(
74
+ in_channels,
75
+ in_channels,
76
+ kernel_size,
77
+ stride=stride,
78
+ padding=padding,
79
+ dilation=dilation,
80
+ groups=in_channels,
81
+ norm_cfg=dw_norm_cfg,
82
+ act_cfg=dw_act_cfg,
83
+ **kwargs)
84
+
85
+ self.pointwise_conv = ConvModule(
86
+ in_channels,
87
+ out_channels,
88
+ 1,
89
+ norm_cfg=pw_norm_cfg,
90
+ act_cfg=pw_act_cfg,
91
+ **kwargs)
92
+
93
+ def forward(self, x):
94
+ x = self.depthwise_conv(x)
95
+ x = self.pointwise_conv(x)
96
+ return x
Text2Video-Zero-main/annotator/uniformer/mmcv/cnn/bricks/drop.py ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) OpenMMLab. All rights reserved.
2
+ import torch
3
+ import torch.nn as nn
4
+
5
+ from annotator.uniformer.mmcv import build_from_cfg
6
+ from .registry import DROPOUT_LAYERS
7
+
8
+
9
+ def drop_path(x, drop_prob=0., training=False):
10
+ """Drop paths (Stochastic Depth) per sample (when applied in main path of
11
+ residual blocks).
12
+
13
+ We follow the implementation
14
+ https://github.com/rwightman/pytorch-image-models/blob/a2727c1bf78ba0d7b5727f5f95e37fb7f8866b1f/timm/models/layers/drop.py # noqa: E501
15
+ """
16
+ if drop_prob == 0. or not training:
17
+ return x
18
+ keep_prob = 1 - drop_prob
19
+ # handle tensors with different dimensions, not just 4D tensors.
20
+ shape = (x.shape[0], ) + (1, ) * (x.ndim - 1)
21
+ random_tensor = keep_prob + torch.rand(
22
+ shape, dtype=x.dtype, device=x.device)
23
+ output = x.div(keep_prob) * random_tensor.floor()
24
+ return output
25
+
26
+
27
+ @DROPOUT_LAYERS.register_module()
28
+ class DropPath(nn.Module):
29
+ """Drop paths (Stochastic Depth) per sample (when applied in main path of
30
+ residual blocks).
31
+
32
+ We follow the implementation
33
+ https://github.com/rwightman/pytorch-image-models/blob/a2727c1bf78ba0d7b5727f5f95e37fb7f8866b1f/timm/models/layers/drop.py # noqa: E501
34
+
35
+ Args:
36
+ drop_prob (float): Probability of the path to be zeroed. Default: 0.1
37
+ """
38
+
39
+ def __init__(self, drop_prob=0.1):
40
+ super(DropPath, self).__init__()
41
+ self.drop_prob = drop_prob
42
+
43
+ def forward(self, x):
44
+ return drop_path(x, self.drop_prob, self.training)
45
+
46
+
47
+ @DROPOUT_LAYERS.register_module()
48
+ class Dropout(nn.Dropout):
49
+ """A wrapper for ``torch.nn.Dropout``, We rename the ``p`` of
50
+ ``torch.nn.Dropout`` to ``drop_prob`` so as to be consistent with
51
+ ``DropPath``
52
+
53
+ Args:
54
+ drop_prob (float): Probability of the elements to be
55
+ zeroed. Default: 0.5.
56
+ inplace (bool): Do the operation inplace or not. Default: False.
57
+ """
58
+
59
+ def __init__(self, drop_prob=0.5, inplace=False):
60
+ super().__init__(p=drop_prob, inplace=inplace)
61
+
62
+
63
+ def build_dropout(cfg, default_args=None):
64
+ """Builder for drop out layers."""
65
+ return build_from_cfg(cfg, DROPOUT_LAYERS, default_args)
Text2Video-Zero-main/annotator/uniformer/mmcv/cnn/bricks/generalized_attention.py ADDED
@@ -0,0 +1,412 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) OpenMMLab. All rights reserved.
2
+ import math
3
+
4
+ import numpy as np
5
+ import torch
6
+ import torch.nn as nn
7
+ import torch.nn.functional as F
8
+
9
+ from ..utils import kaiming_init
10
+ from .registry import PLUGIN_LAYERS
11
+
12
+
13
+ @PLUGIN_LAYERS.register_module()
14
+ class GeneralizedAttention(nn.Module):
15
+ """GeneralizedAttention module.
16
+
17
+ See 'An Empirical Study of Spatial Attention Mechanisms in Deep Networks'
18
+ (https://arxiv.org/abs/1711.07971) for details.
19
+
20
+ Args:
21
+ in_channels (int): Channels of the input feature map.
22
+ spatial_range (int): The spatial range. -1 indicates no spatial range
23
+ constraint. Default: -1.
24
+ num_heads (int): The head number of empirical_attention module.
25
+ Default: 9.
26
+ position_embedding_dim (int): The position embedding dimension.
27
+ Default: -1.
28
+ position_magnitude (int): A multiplier acting on coord difference.
29
+ Default: 1.
30
+ kv_stride (int): The feature stride acting on key/value feature map.
31
+ Default: 2.
32
+ q_stride (int): The feature stride acting on query feature map.
33
+ Default: 1.
34
+ attention_type (str): A binary indicator string for indicating which
35
+ items in generalized empirical_attention module are used.
36
+ Default: '1111'.
37
+
38
+ - '1000' indicates 'query and key content' (appr - appr) item,
39
+ - '0100' indicates 'query content and relative position'
40
+ (appr - position) item,
41
+ - '0010' indicates 'key content only' (bias - appr) item,
42
+ - '0001' indicates 'relative position only' (bias - position) item.
43
+ """
44
+
45
+ _abbr_ = 'gen_attention_block'
46
+
47
+ def __init__(self,
48
+ in_channels,
49
+ spatial_range=-1,
50
+ num_heads=9,
51
+ position_embedding_dim=-1,
52
+ position_magnitude=1,
53
+ kv_stride=2,
54
+ q_stride=1,
55
+ attention_type='1111'):
56
+
57
+ super(GeneralizedAttention, self).__init__()
58
+
59
+ # hard range means local range for non-local operation
60
+ self.position_embedding_dim = (
61
+ position_embedding_dim
62
+ if position_embedding_dim > 0 else in_channels)
63
+
64
+ self.position_magnitude = position_magnitude
65
+ self.num_heads = num_heads
66
+ self.in_channels = in_channels
67
+ self.spatial_range = spatial_range
68
+ self.kv_stride = kv_stride
69
+ self.q_stride = q_stride
70
+ self.attention_type = [bool(int(_)) for _ in attention_type]
71
+ self.qk_embed_dim = in_channels // num_heads
72
+ out_c = self.qk_embed_dim * num_heads
73
+
74
+ if self.attention_type[0] or self.attention_type[1]:
75
+ self.query_conv = nn.Conv2d(
76
+ in_channels=in_channels,
77
+ out_channels=out_c,
78
+ kernel_size=1,
79
+ bias=False)
80
+ self.query_conv.kaiming_init = True
81
+
82
+ if self.attention_type[0] or self.attention_type[2]:
83
+ self.key_conv = nn.Conv2d(
84
+ in_channels=in_channels,
85
+ out_channels=out_c,
86
+ kernel_size=1,
87
+ bias=False)
88
+ self.key_conv.kaiming_init = True
89
+
90
+ self.v_dim = in_channels // num_heads
91
+ self.value_conv = nn.Conv2d(
92
+ in_channels=in_channels,
93
+ out_channels=self.v_dim * num_heads,
94
+ kernel_size=1,
95
+ bias=False)
96
+ self.value_conv.kaiming_init = True
97
+
98
+ if self.attention_type[1] or self.attention_type[3]:
99
+ self.appr_geom_fc_x = nn.Linear(
100
+ self.position_embedding_dim // 2, out_c, bias=False)
101
+ self.appr_geom_fc_x.kaiming_init = True
102
+
103
+ self.appr_geom_fc_y = nn.Linear(
104
+ self.position_embedding_dim // 2, out_c, bias=False)
105
+ self.appr_geom_fc_y.kaiming_init = True
106
+
107
+ if self.attention_type[2]:
108
+ stdv = 1.0 / math.sqrt(self.qk_embed_dim * 2)
109
+ appr_bias_value = -2 * stdv * torch.rand(out_c) + stdv
110
+ self.appr_bias = nn.Parameter(appr_bias_value)
111
+
112
+ if self.attention_type[3]:
113
+ stdv = 1.0 / math.sqrt(self.qk_embed_dim * 2)
114
+ geom_bias_value = -2 * stdv * torch.rand(out_c) + stdv
115
+ self.geom_bias = nn.Parameter(geom_bias_value)
116
+
117
+ self.proj_conv = nn.Conv2d(
118
+ in_channels=self.v_dim * num_heads,
119
+ out_channels=in_channels,
120
+ kernel_size=1,
121
+ bias=True)
122
+ self.proj_conv.kaiming_init = True
123
+ self.gamma = nn.Parameter(torch.zeros(1))
124
+
125
+ if self.spatial_range >= 0:
126
+ # only works when non local is after 3*3 conv
127
+ if in_channels == 256:
128
+ max_len = 84
129
+ elif in_channels == 512:
130
+ max_len = 42
131
+
132
+ max_len_kv = int((max_len - 1.0) / self.kv_stride + 1)
133
+ local_constraint_map = np.ones(
134
+ (max_len, max_len, max_len_kv, max_len_kv), dtype=np.int)
135
+ for iy in range(max_len):
136
+ for ix in range(max_len):
137
+ local_constraint_map[
138
+ iy, ix,
139
+ max((iy - self.spatial_range) //
140
+ self.kv_stride, 0):min((iy + self.spatial_range +
141
+ 1) // self.kv_stride +
142
+ 1, max_len),
143
+ max((ix - self.spatial_range) //
144
+ self.kv_stride, 0):min((ix + self.spatial_range +
145
+ 1) // self.kv_stride +
146
+ 1, max_len)] = 0
147
+
148
+ self.local_constraint_map = nn.Parameter(
149
+ torch.from_numpy(local_constraint_map).byte(),
150
+ requires_grad=False)
151
+
152
+ if self.q_stride > 1:
153
+ self.q_downsample = nn.AvgPool2d(
154
+ kernel_size=1, stride=self.q_stride)
155
+ else:
156
+ self.q_downsample = None
157
+
158
+ if self.kv_stride > 1:
159
+ self.kv_downsample = nn.AvgPool2d(
160
+ kernel_size=1, stride=self.kv_stride)
161
+ else:
162
+ self.kv_downsample = None
163
+
164
+ self.init_weights()
165
+
166
+ def get_position_embedding(self,
167
+ h,
168
+ w,
169
+ h_kv,
170
+ w_kv,
171
+ q_stride,
172
+ kv_stride,
173
+ device,
174
+ dtype,
175
+ feat_dim,
176
+ wave_length=1000):
177
+ # the default type of Tensor is float32, leading to type mismatch
178
+ # in fp16 mode. Cast it to support fp16 mode.
179
+ h_idxs = torch.linspace(0, h - 1, h).to(device=device, dtype=dtype)
180
+ h_idxs = h_idxs.view((h, 1)) * q_stride
181
+
182
+ w_idxs = torch.linspace(0, w - 1, w).to(device=device, dtype=dtype)
183
+ w_idxs = w_idxs.view((w, 1)) * q_stride
184
+
185
+ h_kv_idxs = torch.linspace(0, h_kv - 1, h_kv).to(
186
+ device=device, dtype=dtype)
187
+ h_kv_idxs = h_kv_idxs.view((h_kv, 1)) * kv_stride
188
+
189
+ w_kv_idxs = torch.linspace(0, w_kv - 1, w_kv).to(
190
+ device=device, dtype=dtype)
191
+ w_kv_idxs = w_kv_idxs.view((w_kv, 1)) * kv_stride
192
+
193
+ # (h, h_kv, 1)
194
+ h_diff = h_idxs.unsqueeze(1) - h_kv_idxs.unsqueeze(0)
195
+ h_diff *= self.position_magnitude
196
+
197
+ # (w, w_kv, 1)
198
+ w_diff = w_idxs.unsqueeze(1) - w_kv_idxs.unsqueeze(0)
199
+ w_diff *= self.position_magnitude
200
+
201
+ feat_range = torch.arange(0, feat_dim / 4).to(
202
+ device=device, dtype=dtype)
203
+
204
+ dim_mat = torch.Tensor([wave_length]).to(device=device, dtype=dtype)
205
+ dim_mat = dim_mat**((4. / feat_dim) * feat_range)
206
+ dim_mat = dim_mat.view((1, 1, -1))
207
+
208
+ embedding_x = torch.cat(
209
+ ((w_diff / dim_mat).sin(), (w_diff / dim_mat).cos()), dim=2)
210
+
211
+ embedding_y = torch.cat(
212
+ ((h_diff / dim_mat).sin(), (h_diff / dim_mat).cos()), dim=2)
213
+
214
+ return embedding_x, embedding_y
215
+
216
+ def forward(self, x_input):
217
+ num_heads = self.num_heads
218
+
219
+ # use empirical_attention
220
+ if self.q_downsample is not None:
221
+ x_q = self.q_downsample(x_input)
222
+ else:
223
+ x_q = x_input
224
+ n, _, h, w = x_q.shape
225
+
226
+ if self.kv_downsample is not None:
227
+ x_kv = self.kv_downsample(x_input)
228
+ else:
229
+ x_kv = x_input
230
+ _, _, h_kv, w_kv = x_kv.shape
231
+
232
+ if self.attention_type[0] or self.attention_type[1]:
233
+ proj_query = self.query_conv(x_q).view(
234
+ (n, num_heads, self.qk_embed_dim, h * w))
235
+ proj_query = proj_query.permute(0, 1, 3, 2)
236
+
237
+ if self.attention_type[0] or self.attention_type[2]:
238
+ proj_key = self.key_conv(x_kv).view(
239
+ (n, num_heads, self.qk_embed_dim, h_kv * w_kv))
240
+
241
+ if self.attention_type[1] or self.attention_type[3]:
242
+ position_embed_x, position_embed_y = self.get_position_embedding(
243
+ h, w, h_kv, w_kv, self.q_stride, self.kv_stride,
244
+ x_input.device, x_input.dtype, self.position_embedding_dim)
245
+ # (n, num_heads, w, w_kv, dim)
246
+ position_feat_x = self.appr_geom_fc_x(position_embed_x).\
247
+ view(1, w, w_kv, num_heads, self.qk_embed_dim).\
248
+ permute(0, 3, 1, 2, 4).\
249
+ repeat(n, 1, 1, 1, 1)
250
+
251
+ # (n, num_heads, h, h_kv, dim)
252
+ position_feat_y = self.appr_geom_fc_y(position_embed_y).\
253
+ view(1, h, h_kv, num_heads, self.qk_embed_dim).\
254
+ permute(0, 3, 1, 2, 4).\
255
+ repeat(n, 1, 1, 1, 1)
256
+
257
+ position_feat_x /= math.sqrt(2)
258
+ position_feat_y /= math.sqrt(2)
259
+
260
+ # accelerate for saliency only
261
+ if (np.sum(self.attention_type) == 1) and self.attention_type[2]:
262
+ appr_bias = self.appr_bias.\
263
+ view(1, num_heads, 1, self.qk_embed_dim).\
264
+ repeat(n, 1, 1, 1)
265
+
266
+ energy = torch.matmul(appr_bias, proj_key).\
267
+ view(n, num_heads, 1, h_kv * w_kv)
268
+
269
+ h = 1
270
+ w = 1
271
+ else:
272
+ # (n, num_heads, h*w, h_kv*w_kv), query before key, 540mb for
273
+ if not self.attention_type[0]:
274
+ energy = torch.zeros(
275
+ n,
276
+ num_heads,
277
+ h,
278
+ w,
279
+ h_kv,
280
+ w_kv,
281
+ dtype=x_input.dtype,
282
+ device=x_input.device)
283
+
284
+ # attention_type[0]: appr - appr
285
+ # attention_type[1]: appr - position
286
+ # attention_type[2]: bias - appr
287
+ # attention_type[3]: bias - position
288
+ if self.attention_type[0] or self.attention_type[2]:
289
+ if self.attention_type[0] and self.attention_type[2]:
290
+ appr_bias = self.appr_bias.\
291
+ view(1, num_heads, 1, self.qk_embed_dim)
292
+ energy = torch.matmul(proj_query + appr_bias, proj_key).\
293
+ view(n, num_heads, h, w, h_kv, w_kv)
294
+
295
+ elif self.attention_type[0]:
296
+ energy = torch.matmul(proj_query, proj_key).\
297
+ view(n, num_heads, h, w, h_kv, w_kv)
298
+
299
+ elif self.attention_type[2]:
300
+ appr_bias = self.appr_bias.\
301
+ view(1, num_heads, 1, self.qk_embed_dim).\
302
+ repeat(n, 1, 1, 1)
303
+
304
+ energy += torch.matmul(appr_bias, proj_key).\
305
+ view(n, num_heads, 1, 1, h_kv, w_kv)
306
+
307
+ if self.attention_type[1] or self.attention_type[3]:
308
+ if self.attention_type[1] and self.attention_type[3]:
309
+ geom_bias = self.geom_bias.\
310
+ view(1, num_heads, 1, self.qk_embed_dim)
311
+
312
+ proj_query_reshape = (proj_query + geom_bias).\
313
+ view(n, num_heads, h, w, self.qk_embed_dim)
314
+
315
+ energy_x = torch.matmul(
316
+ proj_query_reshape.permute(0, 1, 3, 2, 4),
317
+ position_feat_x.permute(0, 1, 2, 4, 3))
318
+ energy_x = energy_x.\
319
+ permute(0, 1, 3, 2, 4).unsqueeze(4)
320
+
321
+ energy_y = torch.matmul(
322
+ proj_query_reshape,
323
+ position_feat_y.permute(0, 1, 2, 4, 3))
324
+ energy_y = energy_y.unsqueeze(5)
325
+
326
+ energy += energy_x + energy_y
327
+
328
+ elif self.attention_type[1]:
329
+ proj_query_reshape = proj_query.\
330
+ view(n, num_heads, h, w, self.qk_embed_dim)
331
+ proj_query_reshape = proj_query_reshape.\
332
+ permute(0, 1, 3, 2, 4)
333
+ position_feat_x_reshape = position_feat_x.\
334
+ permute(0, 1, 2, 4, 3)
335
+ position_feat_y_reshape = position_feat_y.\
336
+ permute(0, 1, 2, 4, 3)
337
+
338
+ energy_x = torch.matmul(proj_query_reshape,
339
+ position_feat_x_reshape)
340
+ energy_x = energy_x.permute(0, 1, 3, 2, 4).unsqueeze(4)
341
+
342
+ energy_y = torch.matmul(proj_query_reshape,
343
+ position_feat_y_reshape)
344
+ energy_y = energy_y.unsqueeze(5)
345
+
346
+ energy += energy_x + energy_y
347
+
348
+ elif self.attention_type[3]:
349
+ geom_bias = self.geom_bias.\
350
+ view(1, num_heads, self.qk_embed_dim, 1).\
351
+ repeat(n, 1, 1, 1)
352
+
353
+ position_feat_x_reshape = position_feat_x.\
354
+ view(n, num_heads, w*w_kv, self.qk_embed_dim)
355
+
356
+ position_feat_y_reshape = position_feat_y.\
357
+ view(n, num_heads, h * h_kv, self.qk_embed_dim)
358
+
359
+ energy_x = torch.matmul(position_feat_x_reshape, geom_bias)
360
+ energy_x = energy_x.view(n, num_heads, 1, w, 1, w_kv)
361
+
362
+ energy_y = torch.matmul(position_feat_y_reshape, geom_bias)
363
+ energy_y = energy_y.view(n, num_heads, h, 1, h_kv, 1)
364
+
365
+ energy += energy_x + energy_y
366
+
367
+ energy = energy.view(n, num_heads, h * w, h_kv * w_kv)
368
+
369
+ if self.spatial_range >= 0:
370
+ cur_local_constraint_map = \
371
+ self.local_constraint_map[:h, :w, :h_kv, :w_kv].\
372
+ contiguous().\
373
+ view(1, 1, h*w, h_kv*w_kv)
374
+
375
+ energy = energy.masked_fill_(cur_local_constraint_map,
376
+ float('-inf'))
377
+
378
+ attention = F.softmax(energy, 3)
379
+
380
+ proj_value = self.value_conv(x_kv)
381
+ proj_value_reshape = proj_value.\
382
+ view((n, num_heads, self.v_dim, h_kv * w_kv)).\
383
+ permute(0, 1, 3, 2)
384
+
385
+ out = torch.matmul(attention, proj_value_reshape).\
386
+ permute(0, 1, 3, 2).\
387
+ contiguous().\
388
+ view(n, self.v_dim * self.num_heads, h, w)
389
+
390
+ out = self.proj_conv(out)
391
+
392
+ # output is downsampled, upsample back to input size
393
+ if self.q_downsample is not None:
394
+ out = F.interpolate(
395
+ out,
396
+ size=x_input.shape[2:],
397
+ mode='bilinear',
398
+ align_corners=False)
399
+
400
+ out = self.gamma * out + x_input
401
+ return out
402
+
403
+ def init_weights(self):
404
+ for m in self.modules():
405
+ if hasattr(m, 'kaiming_init') and m.kaiming_init:
406
+ kaiming_init(
407
+ m,
408
+ mode='fan_in',
409
+ nonlinearity='leaky_relu',
410
+ bias=0,
411
+ distribution='uniform',
412
+ a=1)
Text2Video-Zero-main/annotator/uniformer/mmcv/cnn/bricks/hsigmoid.py ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) OpenMMLab. All rights reserved.
2
+ import torch.nn as nn
3
+
4
+ from .registry import ACTIVATION_LAYERS
5
+
6
+
7
+ @ACTIVATION_LAYERS.register_module()
8
+ class HSigmoid(nn.Module):
9
+ """Hard Sigmoid Module. Apply the hard sigmoid function:
10
+ Hsigmoid(x) = min(max((x + bias) / divisor, min_value), max_value)
11
+ Default: Hsigmoid(x) = min(max((x + 1) / 2, 0), 1)
12
+
13
+ Args:
14
+ bias (float): Bias of the input feature map. Default: 1.0.
15
+ divisor (float): Divisor of the input feature map. Default: 2.0.
16
+ min_value (float): Lower bound value. Default: 0.0.
17
+ max_value (float): Upper bound value. Default: 1.0.
18
+
19
+ Returns:
20
+ Tensor: The output tensor.
21
+ """
22
+
23
+ def __init__(self, bias=1.0, divisor=2.0, min_value=0.0, max_value=1.0):
24
+ super(HSigmoid, self).__init__()
25
+ self.bias = bias
26
+ self.divisor = divisor
27
+ assert self.divisor != 0
28
+ self.min_value = min_value
29
+ self.max_value = max_value
30
+
31
+ def forward(self, x):
32
+ x = (x + self.bias) / self.divisor
33
+
34
+ return x.clamp_(self.min_value, self.max_value)
Text2Video-Zero-main/annotator/uniformer/mmcv/cnn/bricks/hswish.py ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) OpenMMLab. All rights reserved.
2
+ import torch.nn as nn
3
+
4
+ from .registry import ACTIVATION_LAYERS
5
+
6
+
7
+ @ACTIVATION_LAYERS.register_module()
8
+ class HSwish(nn.Module):
9
+ """Hard Swish Module.
10
+
11
+ This module applies the hard swish function:
12
+
13
+ .. math::
14
+ Hswish(x) = x * ReLU6(x + 3) / 6
15
+
16
+ Args:
17
+ inplace (bool): can optionally do the operation in-place.
18
+ Default: False.
19
+
20
+ Returns:
21
+ Tensor: The output tensor.
22
+ """
23
+
24
+ def __init__(self, inplace=False):
25
+ super(HSwish, self).__init__()
26
+ self.act = nn.ReLU6(inplace)
27
+
28
+ def forward(self, x):
29
+ return x * self.act(x + 3) / 6
Text2Video-Zero-main/annotator/uniformer/mmcv/cnn/bricks/non_local.py ADDED
@@ -0,0 +1,306 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) OpenMMLab. All rights reserved.
2
+ from abc import ABCMeta
3
+
4
+ import torch
5
+ import torch.nn as nn
6
+
7
+ from ..utils import constant_init, normal_init
8
+ from .conv_module import ConvModule
9
+ from .registry import PLUGIN_LAYERS
10
+
11
+
12
+ class _NonLocalNd(nn.Module, metaclass=ABCMeta):
13
+ """Basic Non-local module.
14
+
15
+ This module is proposed in
16
+ "Non-local Neural Networks"
17
+ Paper reference: https://arxiv.org/abs/1711.07971
18
+ Code reference: https://github.com/AlexHex7/Non-local_pytorch
19
+
20
+ Args:
21
+ in_channels (int): Channels of the input feature map.
22
+ reduction (int): Channel reduction ratio. Default: 2.
23
+ use_scale (bool): Whether to scale pairwise_weight by
24
+ `1/sqrt(inter_channels)` when the mode is `embedded_gaussian`.
25
+ Default: True.
26
+ conv_cfg (None | dict): The config dict for convolution layers.
27
+ If not specified, it will use `nn.Conv2d` for convolution layers.
28
+ Default: None.
29
+ norm_cfg (None | dict): The config dict for normalization layers.
30
+ Default: None. (This parameter is only applicable to conv_out.)
31
+ mode (str): Options are `gaussian`, `concatenation`,
32
+ `embedded_gaussian` and `dot_product`. Default: embedded_gaussian.
33
+ """
34
+
35
+ def __init__(self,
36
+ in_channels,
37
+ reduction=2,
38
+ use_scale=True,
39
+ conv_cfg=None,
40
+ norm_cfg=None,
41
+ mode='embedded_gaussian',
42
+ **kwargs):
43
+ super(_NonLocalNd, self).__init__()
44
+ self.in_channels = in_channels
45
+ self.reduction = reduction
46
+ self.use_scale = use_scale
47
+ self.inter_channels = max(in_channels // reduction, 1)
48
+ self.mode = mode
49
+
50
+ if mode not in [
51
+ 'gaussian', 'embedded_gaussian', 'dot_product', 'concatenation'
52
+ ]:
53
+ raise ValueError("Mode should be in 'gaussian', 'concatenation', "
54
+ f"'embedded_gaussian' or 'dot_product', but got "
55
+ f'{mode} instead.')
56
+
57
+ # g, theta, phi are defaulted as `nn.ConvNd`.
58
+ # Here we use ConvModule for potential usage.
59
+ self.g = ConvModule(
60
+ self.in_channels,
61
+ self.inter_channels,
62
+ kernel_size=1,
63
+ conv_cfg=conv_cfg,
64
+ act_cfg=None)
65
+ self.conv_out = ConvModule(
66
+ self.inter_channels,
67
+ self.in_channels,
68
+ kernel_size=1,
69
+ conv_cfg=conv_cfg,
70
+ norm_cfg=norm_cfg,
71
+ act_cfg=None)
72
+
73
+ if self.mode != 'gaussian':
74
+ self.theta = ConvModule(
75
+ self.in_channels,
76
+ self.inter_channels,
77
+ kernel_size=1,
78
+ conv_cfg=conv_cfg,
79
+ act_cfg=None)
80
+ self.phi = ConvModule(
81
+ self.in_channels,
82
+ self.inter_channels,
83
+ kernel_size=1,
84
+ conv_cfg=conv_cfg,
85
+ act_cfg=None)
86
+
87
+ if self.mode == 'concatenation':
88
+ self.concat_project = ConvModule(
89
+ self.inter_channels * 2,
90
+ 1,
91
+ kernel_size=1,
92
+ stride=1,
93
+ padding=0,
94
+ bias=False,
95
+ act_cfg=dict(type='ReLU'))
96
+
97
+ self.init_weights(**kwargs)
98
+
99
+ def init_weights(self, std=0.01, zeros_init=True):
100
+ if self.mode != 'gaussian':
101
+ for m in [self.g, self.theta, self.phi]:
102
+ normal_init(m.conv, std=std)
103
+ else:
104
+ normal_init(self.g.conv, std=std)
105
+ if zeros_init:
106
+ if self.conv_out.norm_cfg is None:
107
+ constant_init(self.conv_out.conv, 0)
108
+ else:
109
+ constant_init(self.conv_out.norm, 0)
110
+ else:
111
+ if self.conv_out.norm_cfg is None:
112
+ normal_init(self.conv_out.conv, std=std)
113
+ else:
114
+ normal_init(self.conv_out.norm, std=std)
115
+
116
+ def gaussian(self, theta_x, phi_x):
117
+ # NonLocal1d pairwise_weight: [N, H, H]
118
+ # NonLocal2d pairwise_weight: [N, HxW, HxW]
119
+ # NonLocal3d pairwise_weight: [N, TxHxW, TxHxW]
120
+ pairwise_weight = torch.matmul(theta_x, phi_x)
121
+ pairwise_weight = pairwise_weight.softmax(dim=-1)
122
+ return pairwise_weight
123
+
124
+ def embedded_gaussian(self, theta_x, phi_x):
125
+ # NonLocal1d pairwise_weight: [N, H, H]
126
+ # NonLocal2d pairwise_weight: [N, HxW, HxW]
127
+ # NonLocal3d pairwise_weight: [N, TxHxW, TxHxW]
128
+ pairwise_weight = torch.matmul(theta_x, phi_x)
129
+ if self.use_scale:
130
+ # theta_x.shape[-1] is `self.inter_channels`
131
+ pairwise_weight /= theta_x.shape[-1]**0.5
132
+ pairwise_weight = pairwise_weight.softmax(dim=-1)
133
+ return pairwise_weight
134
+
135
+ def dot_product(self, theta_x, phi_x):
136
+ # NonLocal1d pairwise_weight: [N, H, H]
137
+ # NonLocal2d pairwise_weight: [N, HxW, HxW]
138
+ # NonLocal3d pairwise_weight: [N, TxHxW, TxHxW]
139
+ pairwise_weight = torch.matmul(theta_x, phi_x)
140
+ pairwise_weight /= pairwise_weight.shape[-1]
141
+ return pairwise_weight
142
+
143
+ def concatenation(self, theta_x, phi_x):
144
+ # NonLocal1d pairwise_weight: [N, H, H]
145
+ # NonLocal2d pairwise_weight: [N, HxW, HxW]
146
+ # NonLocal3d pairwise_weight: [N, TxHxW, TxHxW]
147
+ h = theta_x.size(2)
148
+ w = phi_x.size(3)
149
+ theta_x = theta_x.repeat(1, 1, 1, w)
150
+ phi_x = phi_x.repeat(1, 1, h, 1)
151
+
152
+ concat_feature = torch.cat([theta_x, phi_x], dim=1)
153
+ pairwise_weight = self.concat_project(concat_feature)
154
+ n, _, h, w = pairwise_weight.size()
155
+ pairwise_weight = pairwise_weight.view(n, h, w)
156
+ pairwise_weight /= pairwise_weight.shape[-1]
157
+
158
+ return pairwise_weight
159
+
160
+ def forward(self, x):
161
+ # Assume `reduction = 1`, then `inter_channels = C`
162
+ # or `inter_channels = C` when `mode="gaussian"`
163
+
164
+ # NonLocal1d x: [N, C, H]
165
+ # NonLocal2d x: [N, C, H, W]
166
+ # NonLocal3d x: [N, C, T, H, W]
167
+ n = x.size(0)
168
+
169
+ # NonLocal1d g_x: [N, H, C]
170
+ # NonLocal2d g_x: [N, HxW, C]
171
+ # NonLocal3d g_x: [N, TxHxW, C]
172
+ g_x = self.g(x).view(n, self.inter_channels, -1)
173
+ g_x = g_x.permute(0, 2, 1)
174
+
175
+ # NonLocal1d theta_x: [N, H, C], phi_x: [N, C, H]
176
+ # NonLocal2d theta_x: [N, HxW, C], phi_x: [N, C, HxW]
177
+ # NonLocal3d theta_x: [N, TxHxW, C], phi_x: [N, C, TxHxW]
178
+ if self.mode == 'gaussian':
179
+ theta_x = x.view(n, self.in_channels, -1)
180
+ theta_x = theta_x.permute(0, 2, 1)
181
+ if self.sub_sample:
182
+ phi_x = self.phi(x).view(n, self.in_channels, -1)
183
+ else:
184
+ phi_x = x.view(n, self.in_channels, -1)
185
+ elif self.mode == 'concatenation':
186
+ theta_x = self.theta(x).view(n, self.inter_channels, -1, 1)
187
+ phi_x = self.phi(x).view(n, self.inter_channels, 1, -1)
188
+ else:
189
+ theta_x = self.theta(x).view(n, self.inter_channels, -1)
190
+ theta_x = theta_x.permute(0, 2, 1)
191
+ phi_x = self.phi(x).view(n, self.inter_channels, -1)
192
+
193
+ pairwise_func = getattr(self, self.mode)
194
+ # NonLocal1d pairwise_weight: [N, H, H]
195
+ # NonLocal2d pairwise_weight: [N, HxW, HxW]
196
+ # NonLocal3d pairwise_weight: [N, TxHxW, TxHxW]
197
+ pairwise_weight = pairwise_func(theta_x, phi_x)
198
+
199
+ # NonLocal1d y: [N, H, C]
200
+ # NonLocal2d y: [N, HxW, C]
201
+ # NonLocal3d y: [N, TxHxW, C]
202
+ y = torch.matmul(pairwise_weight, g_x)
203
+ # NonLocal1d y: [N, C, H]
204
+ # NonLocal2d y: [N, C, H, W]
205
+ # NonLocal3d y: [N, C, T, H, W]
206
+ y = y.permute(0, 2, 1).contiguous().reshape(n, self.inter_channels,
207
+ *x.size()[2:])
208
+
209
+ output = x + self.conv_out(y)
210
+
211
+ return output
212
+
213
+
214
+ class NonLocal1d(_NonLocalNd):
215
+ """1D Non-local module.
216
+
217
+ Args:
218
+ in_channels (int): Same as `NonLocalND`.
219
+ sub_sample (bool): Whether to apply max pooling after pairwise
220
+ function (Note that the `sub_sample` is applied on spatial only).
221
+ Default: False.
222
+ conv_cfg (None | dict): Same as `NonLocalND`.
223
+ Default: dict(type='Conv1d').
224
+ """
225
+
226
+ def __init__(self,
227
+ in_channels,
228
+ sub_sample=False,
229
+ conv_cfg=dict(type='Conv1d'),
230
+ **kwargs):
231
+ super(NonLocal1d, self).__init__(
232
+ in_channels, conv_cfg=conv_cfg, **kwargs)
233
+
234
+ self.sub_sample = sub_sample
235
+
236
+ if sub_sample:
237
+ max_pool_layer = nn.MaxPool1d(kernel_size=2)
238
+ self.g = nn.Sequential(self.g, max_pool_layer)
239
+ if self.mode != 'gaussian':
240
+ self.phi = nn.Sequential(self.phi, max_pool_layer)
241
+ else:
242
+ self.phi = max_pool_layer
243
+
244
+
245
+ @PLUGIN_LAYERS.register_module()
246
+ class NonLocal2d(_NonLocalNd):
247
+ """2D Non-local module.
248
+
249
+ Args:
250
+ in_channels (int): Same as `NonLocalND`.
251
+ sub_sample (bool): Whether to apply max pooling after pairwise
252
+ function (Note that the `sub_sample` is applied on spatial only).
253
+ Default: False.
254
+ conv_cfg (None | dict): Same as `NonLocalND`.
255
+ Default: dict(type='Conv2d').
256
+ """
257
+
258
+ _abbr_ = 'nonlocal_block'
259
+
260
+ def __init__(self,
261
+ in_channels,
262
+ sub_sample=False,
263
+ conv_cfg=dict(type='Conv2d'),
264
+ **kwargs):
265
+ super(NonLocal2d, self).__init__(
266
+ in_channels, conv_cfg=conv_cfg, **kwargs)
267
+
268
+ self.sub_sample = sub_sample
269
+
270
+ if sub_sample:
271
+ max_pool_layer = nn.MaxPool2d(kernel_size=(2, 2))
272
+ self.g = nn.Sequential(self.g, max_pool_layer)
273
+ if self.mode != 'gaussian':
274
+ self.phi = nn.Sequential(self.phi, max_pool_layer)
275
+ else:
276
+ self.phi = max_pool_layer
277
+
278
+
279
+ class NonLocal3d(_NonLocalNd):
280
+ """3D Non-local module.
281
+
282
+ Args:
283
+ in_channels (int): Same as `NonLocalND`.
284
+ sub_sample (bool): Whether to apply max pooling after pairwise
285
+ function (Note that the `sub_sample` is applied on spatial only).
286
+ Default: False.
287
+ conv_cfg (None | dict): Same as `NonLocalND`.
288
+ Default: dict(type='Conv3d').
289
+ """
290
+
291
+ def __init__(self,
292
+ in_channels,
293
+ sub_sample=False,
294
+ conv_cfg=dict(type='Conv3d'),
295
+ **kwargs):
296
+ super(NonLocal3d, self).__init__(
297
+ in_channels, conv_cfg=conv_cfg, **kwargs)
298
+ self.sub_sample = sub_sample
299
+
300
+ if sub_sample:
301
+ max_pool_layer = nn.MaxPool3d(kernel_size=(1, 2, 2))
302
+ self.g = nn.Sequential(self.g, max_pool_layer)
303
+ if self.mode != 'gaussian':
304
+ self.phi = nn.Sequential(self.phi, max_pool_layer)
305
+ else:
306
+ self.phi = max_pool_layer
Text2Video-Zero-main/annotator/uniformer/mmcv/cnn/bricks/norm.py ADDED
@@ -0,0 +1,144 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) OpenMMLab. All rights reserved.
2
+ import inspect
3
+
4
+ import torch.nn as nn
5
+
6
+ from annotator.uniformer.mmcv.utils import is_tuple_of
7
+ from annotator.uniformer.mmcv.utils.parrots_wrapper import SyncBatchNorm, _BatchNorm, _InstanceNorm
8
+ from .registry import NORM_LAYERS
9
+
10
+ NORM_LAYERS.register_module('BN', module=nn.BatchNorm2d)
11
+ NORM_LAYERS.register_module('BN1d', module=nn.BatchNorm1d)
12
+ NORM_LAYERS.register_module('BN2d', module=nn.BatchNorm2d)
13
+ NORM_LAYERS.register_module('BN3d', module=nn.BatchNorm3d)
14
+ NORM_LAYERS.register_module('SyncBN', module=SyncBatchNorm)
15
+ NORM_LAYERS.register_module('GN', module=nn.GroupNorm)
16
+ NORM_LAYERS.register_module('LN', module=nn.LayerNorm)
17
+ NORM_LAYERS.register_module('IN', module=nn.InstanceNorm2d)
18
+ NORM_LAYERS.register_module('IN1d', module=nn.InstanceNorm1d)
19
+ NORM_LAYERS.register_module('IN2d', module=nn.InstanceNorm2d)
20
+ NORM_LAYERS.register_module('IN3d', module=nn.InstanceNorm3d)
21
+
22
+
23
+ def infer_abbr(class_type):
24
+ """Infer abbreviation from the class name.
25
+
26
+ When we build a norm layer with `build_norm_layer()`, we want to preserve
27
+ the norm type in variable names, e.g, self.bn1, self.gn. This method will
28
+ infer the abbreviation to map class types to abbreviations.
29
+
30
+ Rule 1: If the class has the property "_abbr_", return the property.
31
+ Rule 2: If the parent class is _BatchNorm, GroupNorm, LayerNorm or
32
+ InstanceNorm, the abbreviation of this layer will be "bn", "gn", "ln" and
33
+ "in" respectively.
34
+ Rule 3: If the class name contains "batch", "group", "layer" or "instance",
35
+ the abbreviation of this layer will be "bn", "gn", "ln" and "in"
36
+ respectively.
37
+ Rule 4: Otherwise, the abbreviation falls back to "norm".
38
+
39
+ Args:
40
+ class_type (type): The norm layer type.
41
+
42
+ Returns:
43
+ str: The inferred abbreviation.
44
+ """
45
+ if not inspect.isclass(class_type):
46
+ raise TypeError(
47
+ f'class_type must be a type, but got {type(class_type)}')
48
+ if hasattr(class_type, '_abbr_'):
49
+ return class_type._abbr_
50
+ if issubclass(class_type, _InstanceNorm): # IN is a subclass of BN
51
+ return 'in'
52
+ elif issubclass(class_type, _BatchNorm):
53
+ return 'bn'
54
+ elif issubclass(class_type, nn.GroupNorm):
55
+ return 'gn'
56
+ elif issubclass(class_type, nn.LayerNorm):
57
+ return 'ln'
58
+ else:
59
+ class_name = class_type.__name__.lower()
60
+ if 'batch' in class_name:
61
+ return 'bn'
62
+ elif 'group' in class_name:
63
+ return 'gn'
64
+ elif 'layer' in class_name:
65
+ return 'ln'
66
+ elif 'instance' in class_name:
67
+ return 'in'
68
+ else:
69
+ return 'norm_layer'
70
+
71
+
72
+ def build_norm_layer(cfg, num_features, postfix=''):
73
+ """Build normalization layer.
74
+
75
+ Args:
76
+ cfg (dict): The norm layer config, which should contain:
77
+
78
+ - type (str): Layer type.
79
+ - layer args: Args needed to instantiate a norm layer.
80
+ - requires_grad (bool, optional): Whether stop gradient updates.
81
+ num_features (int): Number of input channels.
82
+ postfix (int | str): The postfix to be appended into norm abbreviation
83
+ to create named layer.
84
+
85
+ Returns:
86
+ (str, nn.Module): The first element is the layer name consisting of
87
+ abbreviation and postfix, e.g., bn1, gn. The second element is the
88
+ created norm layer.
89
+ """
90
+ if not isinstance(cfg, dict):
91
+ raise TypeError('cfg must be a dict')
92
+ if 'type' not in cfg:
93
+ raise KeyError('the cfg dict must contain the key "type"')
94
+ cfg_ = cfg.copy()
95
+
96
+ layer_type = cfg_.pop('type')
97
+ if layer_type not in NORM_LAYERS:
98
+ raise KeyError(f'Unrecognized norm type {layer_type}')
99
+
100
+ norm_layer = NORM_LAYERS.get(layer_type)
101
+ abbr = infer_abbr(norm_layer)
102
+
103
+ assert isinstance(postfix, (int, str))
104
+ name = abbr + str(postfix)
105
+
106
+ requires_grad = cfg_.pop('requires_grad', True)
107
+ cfg_.setdefault('eps', 1e-5)
108
+ if layer_type != 'GN':
109
+ layer = norm_layer(num_features, **cfg_)
110
+ if layer_type == 'SyncBN' and hasattr(layer, '_specify_ddp_gpu_num'):
111
+ layer._specify_ddp_gpu_num(1)
112
+ else:
113
+ assert 'num_groups' in cfg_
114
+ layer = norm_layer(num_channels=num_features, **cfg_)
115
+
116
+ for param in layer.parameters():
117
+ param.requires_grad = requires_grad
118
+
119
+ return name, layer
120
+
121
+
122
+ def is_norm(layer, exclude=None):
123
+ """Check if a layer is a normalization layer.
124
+
125
+ Args:
126
+ layer (nn.Module): The layer to be checked.
127
+ exclude (type | tuple[type]): Types to be excluded.
128
+
129
+ Returns:
130
+ bool: Whether the layer is a norm layer.
131
+ """
132
+ if exclude is not None:
133
+ if not isinstance(exclude, tuple):
134
+ exclude = (exclude, )
135
+ if not is_tuple_of(exclude, type):
136
+ raise TypeError(
137
+ f'"exclude" must be either None or type or a tuple of types, '
138
+ f'but got {type(exclude)}: {exclude}')
139
+
140
+ if exclude and isinstance(layer, exclude):
141
+ return False
142
+
143
+ all_norm_bases = (_BatchNorm, _InstanceNorm, nn.GroupNorm, nn.LayerNorm)
144
+ return isinstance(layer, all_norm_bases)
Text2Video-Zero-main/annotator/uniformer/mmcv/cnn/bricks/padding.py ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) OpenMMLab. All rights reserved.
2
+ import torch.nn as nn
3
+
4
+ from .registry import PADDING_LAYERS
5
+
6
+ PADDING_LAYERS.register_module('zero', module=nn.ZeroPad2d)
7
+ PADDING_LAYERS.register_module('reflect', module=nn.ReflectionPad2d)
8
+ PADDING_LAYERS.register_module('replicate', module=nn.ReplicationPad2d)
9
+
10
+
11
+ def build_padding_layer(cfg, *args, **kwargs):
12
+ """Build padding layer.
13
+
14
+ Args:
15
+ cfg (None or dict): The padding layer config, which should contain:
16
+ - type (str): Layer type.
17
+ - layer args: Args needed to instantiate a padding layer.
18
+
19
+ Returns:
20
+ nn.Module: Created padding layer.
21
+ """
22
+ if not isinstance(cfg, dict):
23
+ raise TypeError('cfg must be a dict')
24
+ if 'type' not in cfg:
25
+ raise KeyError('the cfg dict must contain the key "type"')
26
+
27
+ cfg_ = cfg.copy()
28
+ padding_type = cfg_.pop('type')
29
+ if padding_type not in PADDING_LAYERS:
30
+ raise KeyError(f'Unrecognized padding type {padding_type}.')
31
+ else:
32
+ padding_layer = PADDING_LAYERS.get(padding_type)
33
+
34
+ layer = padding_layer(*args, **kwargs, **cfg_)
35
+
36
+ return layer
Text2Video-Zero-main/annotator/uniformer/mmcv/cnn/bricks/plugin.py ADDED
@@ -0,0 +1,88 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import inspect
2
+ import platform
3
+
4
+ from .registry import PLUGIN_LAYERS
5
+
6
+ if platform.system() == 'Windows':
7
+ import regex as re
8
+ else:
9
+ import re
10
+
11
+
12
+ def infer_abbr(class_type):
13
+ """Infer abbreviation from the class name.
14
+
15
+ This method will infer the abbreviation to map class types to
16
+ abbreviations.
17
+
18
+ Rule 1: If the class has the property "abbr", return the property.
19
+ Rule 2: Otherwise, the abbreviation falls back to snake case of class
20
+ name, e.g. the abbreviation of ``FancyBlock`` will be ``fancy_block``.
21
+
22
+ Args:
23
+ class_type (type): The norm layer type.
24
+
25
+ Returns:
26
+ str: The inferred abbreviation.
27
+ """
28
+
29
+ def camel2snack(word):
30
+ """Convert camel case word into snack case.
31
+
32
+ Modified from `inflection lib
33
+ <https://inflection.readthedocs.io/en/latest/#inflection.underscore>`_.
34
+
35
+ Example::
36
+
37
+ >>> camel2snack("FancyBlock")
38
+ 'fancy_block'
39
+ """
40
+
41
+ word = re.sub(r'([A-Z]+)([A-Z][a-z])', r'\1_\2', word)
42
+ word = re.sub(r'([a-z\d])([A-Z])', r'\1_\2', word)
43
+ word = word.replace('-', '_')
44
+ return word.lower()
45
+
46
+ if not inspect.isclass(class_type):
47
+ raise TypeError(
48
+ f'class_type must be a type, but got {type(class_type)}')
49
+ if hasattr(class_type, '_abbr_'):
50
+ return class_type._abbr_
51
+ else:
52
+ return camel2snack(class_type.__name__)
53
+
54
+
55
+ def build_plugin_layer(cfg, postfix='', **kwargs):
56
+ """Build plugin layer.
57
+
58
+ Args:
59
+ cfg (None or dict): cfg should contain:
60
+ type (str): identify plugin layer type.
61
+ layer args: args needed to instantiate a plugin layer.
62
+ postfix (int, str): appended into norm abbreviation to
63
+ create named layer. Default: ''.
64
+
65
+ Returns:
66
+ tuple[str, nn.Module]:
67
+ name (str): abbreviation + postfix
68
+ layer (nn.Module): created plugin layer
69
+ """
70
+ if not isinstance(cfg, dict):
71
+ raise TypeError('cfg must be a dict')
72
+ if 'type' not in cfg:
73
+ raise KeyError('the cfg dict must contain the key "type"')
74
+ cfg_ = cfg.copy()
75
+
76
+ layer_type = cfg_.pop('type')
77
+ if layer_type not in PLUGIN_LAYERS:
78
+ raise KeyError(f'Unrecognized plugin type {layer_type}')
79
+
80
+ plugin_layer = PLUGIN_LAYERS.get(layer_type)
81
+ abbr = infer_abbr(plugin_layer)
82
+
83
+ assert isinstance(postfix, (int, str))
84
+ name = abbr + str(postfix)
85
+
86
+ layer = plugin_layer(**kwargs, **cfg_)
87
+
88
+ return name, layer
Text2Video-Zero-main/annotator/uniformer/mmcv/cnn/bricks/registry.py ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) OpenMMLab. All rights reserved.
2
+ from annotator.uniformer.mmcv.utils import Registry
3
+
4
+ CONV_LAYERS = Registry('conv layer')
5
+ NORM_LAYERS = Registry('norm layer')
6
+ ACTIVATION_LAYERS = Registry('activation layer')
7
+ PADDING_LAYERS = Registry('padding layer')
8
+ UPSAMPLE_LAYERS = Registry('upsample layer')
9
+ PLUGIN_LAYERS = Registry('plugin layer')
10
+
11
+ DROPOUT_LAYERS = Registry('drop out layers')
12
+ POSITIONAL_ENCODING = Registry('position encoding')
13
+ ATTENTION = Registry('attention')
14
+ FEEDFORWARD_NETWORK = Registry('feed-forward Network')
15
+ TRANSFORMER_LAYER = Registry('transformerLayer')
16
+ TRANSFORMER_LAYER_SEQUENCE = Registry('transformer-layers sequence')
Text2Video-Zero-main/annotator/uniformer/mmcv/cnn/bricks/scale.py ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) OpenMMLab. All rights reserved.
2
+ import torch
3
+ import torch.nn as nn
4
+
5
+
6
+ class Scale(nn.Module):
7
+ """A learnable scale parameter.
8
+
9
+ This layer scales the input by a learnable factor. It multiplies a
10
+ learnable scale parameter of shape (1,) with input of any shape.
11
+
12
+ Args:
13
+ scale (float): Initial value of scale factor. Default: 1.0
14
+ """
15
+
16
+ def __init__(self, scale=1.0):
17
+ super(Scale, self).__init__()
18
+ self.scale = nn.Parameter(torch.tensor(scale, dtype=torch.float))
19
+
20
+ def forward(self, x):
21
+ return x * self.scale
Text2Video-Zero-main/annotator/uniformer/mmcv/cnn/bricks/swish.py ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) OpenMMLab. All rights reserved.
2
+ import torch
3
+ import torch.nn as nn
4
+
5
+ from .registry import ACTIVATION_LAYERS
6
+
7
+
8
+ @ACTIVATION_LAYERS.register_module()
9
+ class Swish(nn.Module):
10
+ """Swish Module.
11
+
12
+ This module applies the swish function:
13
+
14
+ .. math::
15
+ Swish(x) = x * Sigmoid(x)
16
+
17
+ Returns:
18
+ Tensor: The output tensor.
19
+ """
20
+
21
+ def __init__(self):
22
+ super(Swish, self).__init__()
23
+
24
+ def forward(self, x):
25
+ return x * torch.sigmoid(x)
Text2Video-Zero-main/annotator/uniformer/mmcv/cnn/bricks/transformer.py ADDED
@@ -0,0 +1,595 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) OpenMMLab. All rights reserved.
2
+ import copy
3
+ import warnings
4
+
5
+ import torch
6
+ import torch.nn as nn
7
+
8
+ from annotator.uniformer.mmcv import ConfigDict, deprecated_api_warning
9
+ from annotator.uniformer.mmcv.cnn import Linear, build_activation_layer, build_norm_layer
10
+ from annotator.uniformer.mmcv.runner.base_module import BaseModule, ModuleList, Sequential
11
+ from annotator.uniformer.mmcv.utils import build_from_cfg
12
+ from .drop import build_dropout
13
+ from .registry import (ATTENTION, FEEDFORWARD_NETWORK, POSITIONAL_ENCODING,
14
+ TRANSFORMER_LAYER, TRANSFORMER_LAYER_SEQUENCE)
15
+
16
+ # Avoid BC-breaking of importing MultiScaleDeformableAttention from this file
17
+ try:
18
+ from annotator.uniformer.mmcv.ops.multi_scale_deform_attn import MultiScaleDeformableAttention # noqa F401
19
+ warnings.warn(
20
+ ImportWarning(
21
+ '``MultiScaleDeformableAttention`` has been moved to '
22
+ '``mmcv.ops.multi_scale_deform_attn``, please change original path ' # noqa E501
23
+ '``from annotator.uniformer.mmcv.cnn.bricks.transformer import MultiScaleDeformableAttention`` ' # noqa E501
24
+ 'to ``from annotator.uniformer.mmcv.ops.multi_scale_deform_attn import MultiScaleDeformableAttention`` ' # noqa E501
25
+ ))
26
+
27
+ except ImportError:
28
+ warnings.warn('Fail to import ``MultiScaleDeformableAttention`` from '
29
+ '``mmcv.ops.multi_scale_deform_attn``, '
30
+ 'You should install ``mmcv-full`` if you need this module. ')
31
+
32
+
33
+ def build_positional_encoding(cfg, default_args=None):
34
+ """Builder for Position Encoding."""
35
+ return build_from_cfg(cfg, POSITIONAL_ENCODING, default_args)
36
+
37
+
38
+ def build_attention(cfg, default_args=None):
39
+ """Builder for attention."""
40
+ return build_from_cfg(cfg, ATTENTION, default_args)
41
+
42
+
43
+ def build_feedforward_network(cfg, default_args=None):
44
+ """Builder for feed-forward network (FFN)."""
45
+ return build_from_cfg(cfg, FEEDFORWARD_NETWORK, default_args)
46
+
47
+
48
+ def build_transformer_layer(cfg, default_args=None):
49
+ """Builder for transformer layer."""
50
+ return build_from_cfg(cfg, TRANSFORMER_LAYER, default_args)
51
+
52
+
53
+ def build_transformer_layer_sequence(cfg, default_args=None):
54
+ """Builder for transformer encoder and transformer decoder."""
55
+ return build_from_cfg(cfg, TRANSFORMER_LAYER_SEQUENCE, default_args)
56
+
57
+
58
+ @ATTENTION.register_module()
59
+ class MultiheadAttention(BaseModule):
60
+ """A wrapper for ``torch.nn.MultiheadAttention``.
61
+
62
+ This module implements MultiheadAttention with identity connection,
63
+ and positional encoding is also passed as input.
64
+
65
+ Args:
66
+ embed_dims (int): The embedding dimension.
67
+ num_heads (int): Parallel attention heads.
68
+ attn_drop (float): A Dropout layer on attn_output_weights.
69
+ Default: 0.0.
70
+ proj_drop (float): A Dropout layer after `nn.MultiheadAttention`.
71
+ Default: 0.0.
72
+ dropout_layer (obj:`ConfigDict`): The dropout_layer used
73
+ when adding the shortcut.
74
+ init_cfg (obj:`mmcv.ConfigDict`): The Config for initialization.
75
+ Default: None.
76
+ batch_first (bool): When it is True, Key, Query and Value are shape of
77
+ (batch, n, embed_dim), otherwise (n, batch, embed_dim).
78
+ Default to False.
79
+ """
80
+
81
+ def __init__(self,
82
+ embed_dims,
83
+ num_heads,
84
+ attn_drop=0.,
85
+ proj_drop=0.,
86
+ dropout_layer=dict(type='Dropout', drop_prob=0.),
87
+ init_cfg=None,
88
+ batch_first=False,
89
+ **kwargs):
90
+ super(MultiheadAttention, self).__init__(init_cfg)
91
+ if 'dropout' in kwargs:
92
+ warnings.warn('The arguments `dropout` in MultiheadAttention '
93
+ 'has been deprecated, now you can separately '
94
+ 'set `attn_drop`(float), proj_drop(float), '
95
+ 'and `dropout_layer`(dict) ')
96
+ attn_drop = kwargs['dropout']
97
+ dropout_layer['drop_prob'] = kwargs.pop('dropout')
98
+
99
+ self.embed_dims = embed_dims
100
+ self.num_heads = num_heads
101
+ self.batch_first = batch_first
102
+
103
+ self.attn = nn.MultiheadAttention(embed_dims, num_heads, attn_drop,
104
+ **kwargs)
105
+
106
+ self.proj_drop = nn.Dropout(proj_drop)
107
+ self.dropout_layer = build_dropout(
108
+ dropout_layer) if dropout_layer else nn.Identity()
109
+
110
+ @deprecated_api_warning({'residual': 'identity'},
111
+ cls_name='MultiheadAttention')
112
+ def forward(self,
113
+ query,
114
+ key=None,
115
+ value=None,
116
+ identity=None,
117
+ query_pos=None,
118
+ key_pos=None,
119
+ attn_mask=None,
120
+ key_padding_mask=None,
121
+ **kwargs):
122
+ """Forward function for `MultiheadAttention`.
123
+
124
+ **kwargs allow passing a more general data flow when combining
125
+ with other operations in `transformerlayer`.
126
+
127
+ Args:
128
+ query (Tensor): The input query with shape [num_queries, bs,
129
+ embed_dims] if self.batch_first is False, else
130
+ [bs, num_queries embed_dims].
131
+ key (Tensor): The key tensor with shape [num_keys, bs,
132
+ embed_dims] if self.batch_first is False, else
133
+ [bs, num_keys, embed_dims] .
134
+ If None, the ``query`` will be used. Defaults to None.
135
+ value (Tensor): The value tensor with same shape as `key`.
136
+ Same in `nn.MultiheadAttention.forward`. Defaults to None.
137
+ If None, the `key` will be used.
138
+ identity (Tensor): This tensor, with the same shape as x,
139
+ will be used for the identity link.
140
+ If None, `x` will be used. Defaults to None.
141
+ query_pos (Tensor): The positional encoding for query, with
142
+ the same shape as `x`. If not None, it will
143
+ be added to `x` before forward function. Defaults to None.
144
+ key_pos (Tensor): The positional encoding for `key`, with the
145
+ same shape as `key`. Defaults to None. If not None, it will
146
+ be added to `key` before forward function. If None, and
147
+ `query_pos` has the same shape as `key`, then `query_pos`
148
+ will be used for `key_pos`. Defaults to None.
149
+ attn_mask (Tensor): ByteTensor mask with shape [num_queries,
150
+ num_keys]. Same in `nn.MultiheadAttention.forward`.
151
+ Defaults to None.
152
+ key_padding_mask (Tensor): ByteTensor with shape [bs, num_keys].
153
+ Defaults to None.
154
+
155
+ Returns:
156
+ Tensor: forwarded results with shape
157
+ [num_queries, bs, embed_dims]
158
+ if self.batch_first is False, else
159
+ [bs, num_queries embed_dims].
160
+ """
161
+
162
+ if key is None:
163
+ key = query
164
+ if value is None:
165
+ value = key
166
+ if identity is None:
167
+ identity = query
168
+ if key_pos is None:
169
+ if query_pos is not None:
170
+ # use query_pos if key_pos is not available
171
+ if query_pos.shape == key.shape:
172
+ key_pos = query_pos
173
+ else:
174
+ warnings.warn(f'position encoding of key is'
175
+ f'missing in {self.__class__.__name__}.')
176
+ if query_pos is not None:
177
+ query = query + query_pos
178
+ if key_pos is not None:
179
+ key = key + key_pos
180
+
181
+ # Because the dataflow('key', 'query', 'value') of
182
+ # ``torch.nn.MultiheadAttention`` is (num_query, batch,
183
+ # embed_dims), We should adjust the shape of dataflow from
184
+ # batch_first (batch, num_query, embed_dims) to num_query_first
185
+ # (num_query ,batch, embed_dims), and recover ``attn_output``
186
+ # from num_query_first to batch_first.
187
+ if self.batch_first:
188
+ query = query.transpose(0, 1)
189
+ key = key.transpose(0, 1)
190
+ value = value.transpose(0, 1)
191
+
192
+ out = self.attn(
193
+ query=query,
194
+ key=key,
195
+ value=value,
196
+ attn_mask=attn_mask,
197
+ key_padding_mask=key_padding_mask)[0]
198
+
199
+ if self.batch_first:
200
+ out = out.transpose(0, 1)
201
+
202
+ return identity + self.dropout_layer(self.proj_drop(out))
203
+
204
+
205
+ @FEEDFORWARD_NETWORK.register_module()
206
+ class FFN(BaseModule):
207
+ """Implements feed-forward networks (FFNs) with identity connection.
208
+
209
+ Args:
210
+ embed_dims (int): The feature dimension. Same as
211
+ `MultiheadAttention`. Defaults: 256.
212
+ feedforward_channels (int): The hidden dimension of FFNs.
213
+ Defaults: 1024.
214
+ num_fcs (int, optional): The number of fully-connected layers in
215
+ FFNs. Default: 2.
216
+ act_cfg (dict, optional): The activation config for FFNs.
217
+ Default: dict(type='ReLU')
218
+ ffn_drop (float, optional): Probability of an element to be
219
+ zeroed in FFN. Default 0.0.
220
+ add_identity (bool, optional): Whether to add the
221
+ identity connection. Default: `True`.
222
+ dropout_layer (obj:`ConfigDict`): The dropout_layer used
223
+ when adding the shortcut.
224
+ init_cfg (obj:`mmcv.ConfigDict`): The Config for initialization.
225
+ Default: None.
226
+ """
227
+
228
+ @deprecated_api_warning(
229
+ {
230
+ 'dropout': 'ffn_drop',
231
+ 'add_residual': 'add_identity'
232
+ },
233
+ cls_name='FFN')
234
+ def __init__(self,
235
+ embed_dims=256,
236
+ feedforward_channels=1024,
237
+ num_fcs=2,
238
+ act_cfg=dict(type='ReLU', inplace=True),
239
+ ffn_drop=0.,
240
+ dropout_layer=None,
241
+ add_identity=True,
242
+ init_cfg=None,
243
+ **kwargs):
244
+ super(FFN, self).__init__(init_cfg)
245
+ assert num_fcs >= 2, 'num_fcs should be no less ' \
246
+ f'than 2. got {num_fcs}.'
247
+ self.embed_dims = embed_dims
248
+ self.feedforward_channels = feedforward_channels
249
+ self.num_fcs = num_fcs
250
+ self.act_cfg = act_cfg
251
+ self.activate = build_activation_layer(act_cfg)
252
+
253
+ layers = []
254
+ in_channels = embed_dims
255
+ for _ in range(num_fcs - 1):
256
+ layers.append(
257
+ Sequential(
258
+ Linear(in_channels, feedforward_channels), self.activate,
259
+ nn.Dropout(ffn_drop)))
260
+ in_channels = feedforward_channels
261
+ layers.append(Linear(feedforward_channels, embed_dims))
262
+ layers.append(nn.Dropout(ffn_drop))
263
+ self.layers = Sequential(*layers)
264
+ self.dropout_layer = build_dropout(
265
+ dropout_layer) if dropout_layer else torch.nn.Identity()
266
+ self.add_identity = add_identity
267
+
268
+ @deprecated_api_warning({'residual': 'identity'}, cls_name='FFN')
269
+ def forward(self, x, identity=None):
270
+ """Forward function for `FFN`.
271
+
272
+ The function would add x to the output tensor if residue is None.
273
+ """
274
+ out = self.layers(x)
275
+ if not self.add_identity:
276
+ return self.dropout_layer(out)
277
+ if identity is None:
278
+ identity = x
279
+ return identity + self.dropout_layer(out)
280
+
281
+
282
+ @TRANSFORMER_LAYER.register_module()
283
+ class BaseTransformerLayer(BaseModule):
284
+ """Base `TransformerLayer` for vision transformer.
285
+
286
+ It can be built from `mmcv.ConfigDict` and support more flexible
287
+ customization, for example, using any number of `FFN or LN ` and
288
+ use different kinds of `attention` by specifying a list of `ConfigDict`
289
+ named `attn_cfgs`. It is worth mentioning that it supports `prenorm`
290
+ when you specifying `norm` as the first element of `operation_order`.
291
+ More details about the `prenorm`: `On Layer Normalization in the
292
+ Transformer Architecture <https://arxiv.org/abs/2002.04745>`_ .
293
+
294
+ Args:
295
+ attn_cfgs (list[`mmcv.ConfigDict`] | obj:`mmcv.ConfigDict` | None )):
296
+ Configs for `self_attention` or `cross_attention` modules,
297
+ The order of the configs in the list should be consistent with
298
+ corresponding attentions in operation_order.
299
+ If it is a dict, all of the attention modules in operation_order
300
+ will be built with this config. Default: None.
301
+ ffn_cfgs (list[`mmcv.ConfigDict`] | obj:`mmcv.ConfigDict` | None )):
302
+ Configs for FFN, The order of the configs in the list should be
303
+ consistent with corresponding ffn in operation_order.
304
+ If it is a dict, all of the attention modules in operation_order
305
+ will be built with this config.
306
+ operation_order (tuple[str]): The execution order of operation
307
+ in transformer. Such as ('self_attn', 'norm', 'ffn', 'norm').
308
+ Support `prenorm` when you specifying first element as `norm`.
309
+ Default:None.
310
+ norm_cfg (dict): Config dict for normalization layer.
311
+ Default: dict(type='LN').
312
+ init_cfg (obj:`mmcv.ConfigDict`): The Config for initialization.
313
+ Default: None.
314
+ batch_first (bool): Key, Query and Value are shape
315
+ of (batch, n, embed_dim)
316
+ or (n, batch, embed_dim). Default to False.
317
+ """
318
+
319
+ def __init__(self,
320
+ attn_cfgs=None,
321
+ ffn_cfgs=dict(
322
+ type='FFN',
323
+ embed_dims=256,
324
+ feedforward_channels=1024,
325
+ num_fcs=2,
326
+ ffn_drop=0.,
327
+ act_cfg=dict(type='ReLU', inplace=True),
328
+ ),
329
+ operation_order=None,
330
+ norm_cfg=dict(type='LN'),
331
+ init_cfg=None,
332
+ batch_first=False,
333
+ **kwargs):
334
+
335
+ deprecated_args = dict(
336
+ feedforward_channels='feedforward_channels',
337
+ ffn_dropout='ffn_drop',
338
+ ffn_num_fcs='num_fcs')
339
+ for ori_name, new_name in deprecated_args.items():
340
+ if ori_name in kwargs:
341
+ warnings.warn(
342
+ f'The arguments `{ori_name}` in BaseTransformerLayer '
343
+ f'has been deprecated, now you should set `{new_name}` '
344
+ f'and other FFN related arguments '
345
+ f'to a dict named `ffn_cfgs`. ')
346
+ ffn_cfgs[new_name] = kwargs[ori_name]
347
+
348
+ super(BaseTransformerLayer, self).__init__(init_cfg)
349
+
350
+ self.batch_first = batch_first
351
+
352
+ assert set(operation_order) & set(
353
+ ['self_attn', 'norm', 'ffn', 'cross_attn']) == \
354
+ set(operation_order), f'The operation_order of' \
355
+ f' {self.__class__.__name__} should ' \
356
+ f'contains all four operation type ' \
357
+ f"{['self_attn', 'norm', 'ffn', 'cross_attn']}"
358
+
359
+ num_attn = operation_order.count('self_attn') + operation_order.count(
360
+ 'cross_attn')
361
+ if isinstance(attn_cfgs, dict):
362
+ attn_cfgs = [copy.deepcopy(attn_cfgs) for _ in range(num_attn)]
363
+ else:
364
+ assert num_attn == len(attn_cfgs), f'The length ' \
365
+ f'of attn_cfg {num_attn} is ' \
366
+ f'not consistent with the number of attention' \
367
+ f'in operation_order {operation_order}.'
368
+
369
+ self.num_attn = num_attn
370
+ self.operation_order = operation_order
371
+ self.norm_cfg = norm_cfg
372
+ self.pre_norm = operation_order[0] == 'norm'
373
+ self.attentions = ModuleList()
374
+
375
+ index = 0
376
+ for operation_name in operation_order:
377
+ if operation_name in ['self_attn', 'cross_attn']:
378
+ if 'batch_first' in attn_cfgs[index]:
379
+ assert self.batch_first == attn_cfgs[index]['batch_first']
380
+ else:
381
+ attn_cfgs[index]['batch_first'] = self.batch_first
382
+ attention = build_attention(attn_cfgs[index])
383
+ # Some custom attentions used as `self_attn`
384
+ # or `cross_attn` can have different behavior.
385
+ attention.operation_name = operation_name
386
+ self.attentions.append(attention)
387
+ index += 1
388
+
389
+ self.embed_dims = self.attentions[0].embed_dims
390
+
391
+ self.ffns = ModuleList()
392
+ num_ffns = operation_order.count('ffn')
393
+ if isinstance(ffn_cfgs, dict):
394
+ ffn_cfgs = ConfigDict(ffn_cfgs)
395
+ if isinstance(ffn_cfgs, dict):
396
+ ffn_cfgs = [copy.deepcopy(ffn_cfgs) for _ in range(num_ffns)]
397
+ assert len(ffn_cfgs) == num_ffns
398
+ for ffn_index in range(num_ffns):
399
+ if 'embed_dims' not in ffn_cfgs[ffn_index]:
400
+ ffn_cfgs['embed_dims'] = self.embed_dims
401
+ else:
402
+ assert ffn_cfgs[ffn_index]['embed_dims'] == self.embed_dims
403
+ self.ffns.append(
404
+ build_feedforward_network(ffn_cfgs[ffn_index],
405
+ dict(type='FFN')))
406
+
407
+ self.norms = ModuleList()
408
+ num_norms = operation_order.count('norm')
409
+ for _ in range(num_norms):
410
+ self.norms.append(build_norm_layer(norm_cfg, self.embed_dims)[1])
411
+
412
+ def forward(self,
413
+ query,
414
+ key=None,
415
+ value=None,
416
+ query_pos=None,
417
+ key_pos=None,
418
+ attn_masks=None,
419
+ query_key_padding_mask=None,
420
+ key_padding_mask=None,
421
+ **kwargs):
422
+ """Forward function for `TransformerDecoderLayer`.
423
+
424
+ **kwargs contains some specific arguments of attentions.
425
+
426
+ Args:
427
+ query (Tensor): The input query with shape
428
+ [num_queries, bs, embed_dims] if
429
+ self.batch_first is False, else
430
+ [bs, num_queries embed_dims].
431
+ key (Tensor): The key tensor with shape [num_keys, bs,
432
+ embed_dims] if self.batch_first is False, else
433
+ [bs, num_keys, embed_dims] .
434
+ value (Tensor): The value tensor with same shape as `key`.
435
+ query_pos (Tensor): The positional encoding for `query`.
436
+ Default: None.
437
+ key_pos (Tensor): The positional encoding for `key`.
438
+ Default: None.
439
+ attn_masks (List[Tensor] | None): 2D Tensor used in
440
+ calculation of corresponding attention. The length of
441
+ it should equal to the number of `attention` in
442
+ `operation_order`. Default: None.
443
+ query_key_padding_mask (Tensor): ByteTensor for `query`, with
444
+ shape [bs, num_queries]. Only used in `self_attn` layer.
445
+ Defaults to None.
446
+ key_padding_mask (Tensor): ByteTensor for `query`, with
447
+ shape [bs, num_keys]. Default: None.
448
+
449
+ Returns:
450
+ Tensor: forwarded results with shape [num_queries, bs, embed_dims].
451
+ """
452
+
453
+ norm_index = 0
454
+ attn_index = 0
455
+ ffn_index = 0
456
+ identity = query
457
+ if attn_masks is None:
458
+ attn_masks = [None for _ in range(self.num_attn)]
459
+ elif isinstance(attn_masks, torch.Tensor):
460
+ attn_masks = [
461
+ copy.deepcopy(attn_masks) for _ in range(self.num_attn)
462
+ ]
463
+ warnings.warn(f'Use same attn_mask in all attentions in '
464
+ f'{self.__class__.__name__} ')
465
+ else:
466
+ assert len(attn_masks) == self.num_attn, f'The length of ' \
467
+ f'attn_masks {len(attn_masks)} must be equal ' \
468
+ f'to the number of attention in ' \
469
+ f'operation_order {self.num_attn}'
470
+
471
+ for layer in self.operation_order:
472
+ if layer == 'self_attn':
473
+ temp_key = temp_value = query
474
+ query = self.attentions[attn_index](
475
+ query,
476
+ temp_key,
477
+ temp_value,
478
+ identity if self.pre_norm else None,
479
+ query_pos=query_pos,
480
+ key_pos=query_pos,
481
+ attn_mask=attn_masks[attn_index],
482
+ key_padding_mask=query_key_padding_mask,
483
+ **kwargs)
484
+ attn_index += 1
485
+ identity = query
486
+
487
+ elif layer == 'norm':
488
+ query = self.norms[norm_index](query)
489
+ norm_index += 1
490
+
491
+ elif layer == 'cross_attn':
492
+ query = self.attentions[attn_index](
493
+ query,
494
+ key,
495
+ value,
496
+ identity if self.pre_norm else None,
497
+ query_pos=query_pos,
498
+ key_pos=key_pos,
499
+ attn_mask=attn_masks[attn_index],
500
+ key_padding_mask=key_padding_mask,
501
+ **kwargs)
502
+ attn_index += 1
503
+ identity = query
504
+
505
+ elif layer == 'ffn':
506
+ query = self.ffns[ffn_index](
507
+ query, identity if self.pre_norm else None)
508
+ ffn_index += 1
509
+
510
+ return query
511
+
512
+
513
+ @TRANSFORMER_LAYER_SEQUENCE.register_module()
514
+ class TransformerLayerSequence(BaseModule):
515
+ """Base class for TransformerEncoder and TransformerDecoder in vision
516
+ transformer.
517
+
518
+ As base-class of Encoder and Decoder in vision transformer.
519
+ Support customization such as specifying different kind
520
+ of `transformer_layer` in `transformer_coder`.
521
+
522
+ Args:
523
+ transformerlayer (list[obj:`mmcv.ConfigDict`] |
524
+ obj:`mmcv.ConfigDict`): Config of transformerlayer
525
+ in TransformerCoder. If it is obj:`mmcv.ConfigDict`,
526
+ it would be repeated `num_layer` times to a
527
+ list[`mmcv.ConfigDict`]. Default: None.
528
+ num_layers (int): The number of `TransformerLayer`. Default: None.
529
+ init_cfg (obj:`mmcv.ConfigDict`): The Config for initialization.
530
+ Default: None.
531
+ """
532
+
533
+ def __init__(self, transformerlayers=None, num_layers=None, init_cfg=None):
534
+ super(TransformerLayerSequence, self).__init__(init_cfg)
535
+ if isinstance(transformerlayers, dict):
536
+ transformerlayers = [
537
+ copy.deepcopy(transformerlayers) for _ in range(num_layers)
538
+ ]
539
+ else:
540
+ assert isinstance(transformerlayers, list) and \
541
+ len(transformerlayers) == num_layers
542
+ self.num_layers = num_layers
543
+ self.layers = ModuleList()
544
+ for i in range(num_layers):
545
+ self.layers.append(build_transformer_layer(transformerlayers[i]))
546
+ self.embed_dims = self.layers[0].embed_dims
547
+ self.pre_norm = self.layers[0].pre_norm
548
+
549
+ def forward(self,
550
+ query,
551
+ key,
552
+ value,
553
+ query_pos=None,
554
+ key_pos=None,
555
+ attn_masks=None,
556
+ query_key_padding_mask=None,
557
+ key_padding_mask=None,
558
+ **kwargs):
559
+ """Forward function for `TransformerCoder`.
560
+
561
+ Args:
562
+ query (Tensor): Input query with shape
563
+ `(num_queries, bs, embed_dims)`.
564
+ key (Tensor): The key tensor with shape
565
+ `(num_keys, bs, embed_dims)`.
566
+ value (Tensor): The value tensor with shape
567
+ `(num_keys, bs, embed_dims)`.
568
+ query_pos (Tensor): The positional encoding for `query`.
569
+ Default: None.
570
+ key_pos (Tensor): The positional encoding for `key`.
571
+ Default: None.
572
+ attn_masks (List[Tensor], optional): Each element is 2D Tensor
573
+ which is used in calculation of corresponding attention in
574
+ operation_order. Default: None.
575
+ query_key_padding_mask (Tensor): ByteTensor for `query`, with
576
+ shape [bs, num_queries]. Only used in self-attention
577
+ Default: None.
578
+ key_padding_mask (Tensor): ByteTensor for `query`, with
579
+ shape [bs, num_keys]. Default: None.
580
+
581
+ Returns:
582
+ Tensor: results with shape [num_queries, bs, embed_dims].
583
+ """
584
+ for layer in self.layers:
585
+ query = layer(
586
+ query,
587
+ key,
588
+ value,
589
+ query_pos=query_pos,
590
+ key_pos=key_pos,
591
+ attn_masks=attn_masks,
592
+ query_key_padding_mask=query_key_padding_mask,
593
+ key_padding_mask=key_padding_mask,
594
+ **kwargs)
595
+ return query
Text2Video-Zero-main/annotator/uniformer/mmcv/cnn/bricks/upsample.py ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) OpenMMLab. All rights reserved.
2
+ import torch.nn as nn
3
+ import torch.nn.functional as F
4
+
5
+ from ..utils import xavier_init
6
+ from .registry import UPSAMPLE_LAYERS
7
+
8
+ UPSAMPLE_LAYERS.register_module('nearest', module=nn.Upsample)
9
+ UPSAMPLE_LAYERS.register_module('bilinear', module=nn.Upsample)
10
+
11
+
12
+ @UPSAMPLE_LAYERS.register_module(name='pixel_shuffle')
13
+ class PixelShufflePack(nn.Module):
14
+ """Pixel Shuffle upsample layer.
15
+
16
+ This module packs `F.pixel_shuffle()` and a nn.Conv2d module together to
17
+ achieve a simple upsampling with pixel shuffle.
18
+
19
+ Args:
20
+ in_channels (int): Number of input channels.
21
+ out_channels (int): Number of output channels.
22
+ scale_factor (int): Upsample ratio.
23
+ upsample_kernel (int): Kernel size of the conv layer to expand the
24
+ channels.
25
+ """
26
+
27
+ def __init__(self, in_channels, out_channels, scale_factor,
28
+ upsample_kernel):
29
+ super(PixelShufflePack, self).__init__()
30
+ self.in_channels = in_channels
31
+ self.out_channels = out_channels
32
+ self.scale_factor = scale_factor
33
+ self.upsample_kernel = upsample_kernel
34
+ self.upsample_conv = nn.Conv2d(
35
+ self.in_channels,
36
+ self.out_channels * scale_factor * scale_factor,
37
+ self.upsample_kernel,
38
+ padding=(self.upsample_kernel - 1) // 2)
39
+ self.init_weights()
40
+
41
+ def init_weights(self):
42
+ xavier_init(self.upsample_conv, distribution='uniform')
43
+
44
+ def forward(self, x):
45
+ x = self.upsample_conv(x)
46
+ x = F.pixel_shuffle(x, self.scale_factor)
47
+ return x
48
+
49
+
50
+ def build_upsample_layer(cfg, *args, **kwargs):
51
+ """Build upsample layer.
52
+
53
+ Args:
54
+ cfg (dict): The upsample layer config, which should contain:
55
+
56
+ - type (str): Layer type.
57
+ - scale_factor (int): Upsample ratio, which is not applicable to
58
+ deconv.
59
+ - layer args: Args needed to instantiate a upsample layer.
60
+ args (argument list): Arguments passed to the ``__init__``
61
+ method of the corresponding conv layer.
62
+ kwargs (keyword arguments): Keyword arguments passed to the
63
+ ``__init__`` method of the corresponding conv layer.
64
+
65
+ Returns:
66
+ nn.Module: Created upsample layer.
67
+ """
68
+ if not isinstance(cfg, dict):
69
+ raise TypeError(f'cfg must be a dict, but got {type(cfg)}')
70
+ if 'type' not in cfg:
71
+ raise KeyError(
72
+ f'the cfg dict must contain the key "type", but got {cfg}')
73
+ cfg_ = cfg.copy()
74
+
75
+ layer_type = cfg_.pop('type')
76
+ if layer_type not in UPSAMPLE_LAYERS:
77
+ raise KeyError(f'Unrecognized upsample type {layer_type}')
78
+ else:
79
+ upsample = UPSAMPLE_LAYERS.get(layer_type)
80
+
81
+ if upsample is nn.Upsample:
82
+ cfg_['mode'] = layer_type
83
+ layer = upsample(*args, **kwargs, **cfg_)
84
+ return layer
Text2Video-Zero-main/annotator/uniformer/mmcv/cnn/bricks/wrappers.py ADDED
@@ -0,0 +1,180 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) OpenMMLab. All rights reserved.
2
+ r"""Modified from https://github.com/facebookresearch/detectron2/blob/master/detectron2/layers/wrappers.py # noqa: E501
3
+
4
+ Wrap some nn modules to support empty tensor input. Currently, these wrappers
5
+ are mainly used in mask heads like fcn_mask_head and maskiou_heads since mask
6
+ heads are trained on only positive RoIs.
7
+ """
8
+ import math
9
+
10
+ import torch
11
+ import torch.nn as nn
12
+ from torch.nn.modules.utils import _pair, _triple
13
+
14
+ from .registry import CONV_LAYERS, UPSAMPLE_LAYERS
15
+
16
+ if torch.__version__ == 'parrots':
17
+ TORCH_VERSION = torch.__version__
18
+ else:
19
+ # torch.__version__ could be 1.3.1+cu92, we only need the first two
20
+ # for comparison
21
+ TORCH_VERSION = tuple(int(x) for x in torch.__version__.split('.')[:2])
22
+
23
+
24
+ def obsolete_torch_version(torch_version, version_threshold):
25
+ return torch_version == 'parrots' or torch_version <= version_threshold
26
+
27
+
28
+ class NewEmptyTensorOp(torch.autograd.Function):
29
+
30
+ @staticmethod
31
+ def forward(ctx, x, new_shape):
32
+ ctx.shape = x.shape
33
+ return x.new_empty(new_shape)
34
+
35
+ @staticmethod
36
+ def backward(ctx, grad):
37
+ shape = ctx.shape
38
+ return NewEmptyTensorOp.apply(grad, shape), None
39
+
40
+
41
+ @CONV_LAYERS.register_module('Conv', force=True)
42
+ class Conv2d(nn.Conv2d):
43
+
44
+ def forward(self, x):
45
+ if x.numel() == 0 and obsolete_torch_version(TORCH_VERSION, (1, 4)):
46
+ out_shape = [x.shape[0], self.out_channels]
47
+ for i, k, p, s, d in zip(x.shape[-2:], self.kernel_size,
48
+ self.padding, self.stride, self.dilation):
49
+ o = (i + 2 * p - (d * (k - 1) + 1)) // s + 1
50
+ out_shape.append(o)
51
+ empty = NewEmptyTensorOp.apply(x, out_shape)
52
+ if self.training:
53
+ # produce dummy gradient to avoid DDP warning.
54
+ dummy = sum(x.view(-1)[0] for x in self.parameters()) * 0.0
55
+ return empty + dummy
56
+ else:
57
+ return empty
58
+
59
+ return super().forward(x)
60
+
61
+
62
+ @CONV_LAYERS.register_module('Conv3d', force=True)
63
+ class Conv3d(nn.Conv3d):
64
+
65
+ def forward(self, x):
66
+ if x.numel() == 0 and obsolete_torch_version(TORCH_VERSION, (1, 4)):
67
+ out_shape = [x.shape[0], self.out_channels]
68
+ for i, k, p, s, d in zip(x.shape[-3:], self.kernel_size,
69
+ self.padding, self.stride, self.dilation):
70
+ o = (i + 2 * p - (d * (k - 1) + 1)) // s + 1
71
+ out_shape.append(o)
72
+ empty = NewEmptyTensorOp.apply(x, out_shape)
73
+ if self.training:
74
+ # produce dummy gradient to avoid DDP warning.
75
+ dummy = sum(x.view(-1)[0] for x in self.parameters()) * 0.0
76
+ return empty + dummy
77
+ else:
78
+ return empty
79
+
80
+ return super().forward(x)
81
+
82
+
83
+ @CONV_LAYERS.register_module()
84
+ @CONV_LAYERS.register_module('deconv')
85
+ @UPSAMPLE_LAYERS.register_module('deconv', force=True)
86
+ class ConvTranspose2d(nn.ConvTranspose2d):
87
+
88
+ def forward(self, x):
89
+ if x.numel() == 0 and obsolete_torch_version(TORCH_VERSION, (1, 4)):
90
+ out_shape = [x.shape[0], self.out_channels]
91
+ for i, k, p, s, d, op in zip(x.shape[-2:], self.kernel_size,
92
+ self.padding, self.stride,
93
+ self.dilation, self.output_padding):
94
+ out_shape.append((i - 1) * s - 2 * p + (d * (k - 1) + 1) + op)
95
+ empty = NewEmptyTensorOp.apply(x, out_shape)
96
+ if self.training:
97
+ # produce dummy gradient to avoid DDP warning.
98
+ dummy = sum(x.view(-1)[0] for x in self.parameters()) * 0.0
99
+ return empty + dummy
100
+ else:
101
+ return empty
102
+
103
+ return super().forward(x)
104
+
105
+
106
+ @CONV_LAYERS.register_module()
107
+ @CONV_LAYERS.register_module('deconv3d')
108
+ @UPSAMPLE_LAYERS.register_module('deconv3d', force=True)
109
+ class ConvTranspose3d(nn.ConvTranspose3d):
110
+
111
+ def forward(self, x):
112
+ if x.numel() == 0 and obsolete_torch_version(TORCH_VERSION, (1, 4)):
113
+ out_shape = [x.shape[0], self.out_channels]
114
+ for i, k, p, s, d, op in zip(x.shape[-3:], self.kernel_size,
115
+ self.padding, self.stride,
116
+ self.dilation, self.output_padding):
117
+ out_shape.append((i - 1) * s - 2 * p + (d * (k - 1) + 1) + op)
118
+ empty = NewEmptyTensorOp.apply(x, out_shape)
119
+ if self.training:
120
+ # produce dummy gradient to avoid DDP warning.
121
+ dummy = sum(x.view(-1)[0] for x in self.parameters()) * 0.0
122
+ return empty + dummy
123
+ else:
124
+ return empty
125
+
126
+ return super().forward(x)
127
+
128
+
129
+ class MaxPool2d(nn.MaxPool2d):
130
+
131
+ def forward(self, x):
132
+ # PyTorch 1.9 does not support empty tensor inference yet
133
+ if x.numel() == 0 and obsolete_torch_version(TORCH_VERSION, (1, 9)):
134
+ out_shape = list(x.shape[:2])
135
+ for i, k, p, s, d in zip(x.shape[-2:], _pair(self.kernel_size),
136
+ _pair(self.padding), _pair(self.stride),
137
+ _pair(self.dilation)):
138
+ o = (i + 2 * p - (d * (k - 1) + 1)) / s + 1
139
+ o = math.ceil(o) if self.ceil_mode else math.floor(o)
140
+ out_shape.append(o)
141
+ empty = NewEmptyTensorOp.apply(x, out_shape)
142
+ return empty
143
+
144
+ return super().forward(x)
145
+
146
+
147
+ class MaxPool3d(nn.MaxPool3d):
148
+
149
+ def forward(self, x):
150
+ # PyTorch 1.9 does not support empty tensor inference yet
151
+ if x.numel() == 0 and obsolete_torch_version(TORCH_VERSION, (1, 9)):
152
+ out_shape = list(x.shape[:2])
153
+ for i, k, p, s, d in zip(x.shape[-3:], _triple(self.kernel_size),
154
+ _triple(self.padding),
155
+ _triple(self.stride),
156
+ _triple(self.dilation)):
157
+ o = (i + 2 * p - (d * (k - 1) + 1)) / s + 1
158
+ o = math.ceil(o) if self.ceil_mode else math.floor(o)
159
+ out_shape.append(o)
160
+ empty = NewEmptyTensorOp.apply(x, out_shape)
161
+ return empty
162
+
163
+ return super().forward(x)
164
+
165
+
166
+ class Linear(torch.nn.Linear):
167
+
168
+ def forward(self, x):
169
+ # empty tensor forward of Linear layer is supported in Pytorch 1.6
170
+ if x.numel() == 0 and obsolete_torch_version(TORCH_VERSION, (1, 5)):
171
+ out_shape = [x.shape[0], self.out_features]
172
+ empty = NewEmptyTensorOp.apply(x, out_shape)
173
+ if self.training:
174
+ # produce dummy gradient to avoid DDP warning.
175
+ dummy = sum(x.view(-1)[0] for x in self.parameters()) * 0.0
176
+ return empty + dummy
177
+ else:
178
+ return empty
179
+
180
+ return super().forward(x)
Text2Video-Zero-main/annotator/uniformer/mmcv/cnn/builder.py ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) OpenMMLab. All rights reserved.
2
+ from ..runner import Sequential
3
+ from ..utils import Registry, build_from_cfg
4
+
5
+
6
+ def build_model_from_cfg(cfg, registry, default_args=None):
7
+ """Build a PyTorch model from config dict(s). Different from
8
+ ``build_from_cfg``, if cfg is a list, a ``nn.Sequential`` will be built.
9
+
10
+ Args:
11
+ cfg (dict, list[dict]): The config of modules, is is either a config
12
+ dict or a list of config dicts. If cfg is a list, a
13
+ the built modules will be wrapped with ``nn.Sequential``.
14
+ registry (:obj:`Registry`): A registry the module belongs to.
15
+ default_args (dict, optional): Default arguments to build the module.
16
+ Defaults to None.
17
+
18
+ Returns:
19
+ nn.Module: A built nn module.
20
+ """
21
+ if isinstance(cfg, list):
22
+ modules = [
23
+ build_from_cfg(cfg_, registry, default_args) for cfg_ in cfg
24
+ ]
25
+ return Sequential(*modules)
26
+ else:
27
+ return build_from_cfg(cfg, registry, default_args)
28
+
29
+
30
+ MODELS = Registry('model', build_func=build_model_from_cfg)
Text2Video-Zero-main/annotator/uniformer/mmcv/cnn/resnet.py ADDED
@@ -0,0 +1,316 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) OpenMMLab. All rights reserved.
2
+ import logging
3
+
4
+ import torch.nn as nn
5
+ import torch.utils.checkpoint as cp
6
+
7
+ from .utils import constant_init, kaiming_init
8
+
9
+
10
+ def conv3x3(in_planes, out_planes, stride=1, dilation=1):
11
+ """3x3 convolution with padding."""
12
+ return nn.Conv2d(
13
+ in_planes,
14
+ out_planes,
15
+ kernel_size=3,
16
+ stride=stride,
17
+ padding=dilation,
18
+ dilation=dilation,
19
+ bias=False)
20
+
21
+
22
+ class BasicBlock(nn.Module):
23
+ expansion = 1
24
+
25
+ def __init__(self,
26
+ inplanes,
27
+ planes,
28
+ stride=1,
29
+ dilation=1,
30
+ downsample=None,
31
+ style='pytorch',
32
+ with_cp=False):
33
+ super(BasicBlock, self).__init__()
34
+ assert style in ['pytorch', 'caffe']
35
+ self.conv1 = conv3x3(inplanes, planes, stride, dilation)
36
+ self.bn1 = nn.BatchNorm2d(planes)
37
+ self.relu = nn.ReLU(inplace=True)
38
+ self.conv2 = conv3x3(planes, planes)
39
+ self.bn2 = nn.BatchNorm2d(planes)
40
+ self.downsample = downsample
41
+ self.stride = stride
42
+ self.dilation = dilation
43
+ assert not with_cp
44
+
45
+ def forward(self, x):
46
+ residual = x
47
+
48
+ out = self.conv1(x)
49
+ out = self.bn1(out)
50
+ out = self.relu(out)
51
+
52
+ out = self.conv2(out)
53
+ out = self.bn2(out)
54
+
55
+ if self.downsample is not None:
56
+ residual = self.downsample(x)
57
+
58
+ out += residual
59
+ out = self.relu(out)
60
+
61
+ return out
62
+
63
+
64
+ class Bottleneck(nn.Module):
65
+ expansion = 4
66
+
67
+ def __init__(self,
68
+ inplanes,
69
+ planes,
70
+ stride=1,
71
+ dilation=1,
72
+ downsample=None,
73
+ style='pytorch',
74
+ with_cp=False):
75
+ """Bottleneck block.
76
+
77
+ If style is "pytorch", the stride-two layer is the 3x3 conv layer, if
78
+ it is "caffe", the stride-two layer is the first 1x1 conv layer.
79
+ """
80
+ super(Bottleneck, self).__init__()
81
+ assert style in ['pytorch', 'caffe']
82
+ if style == 'pytorch':
83
+ conv1_stride = 1
84
+ conv2_stride = stride
85
+ else:
86
+ conv1_stride = stride
87
+ conv2_stride = 1
88
+ self.conv1 = nn.Conv2d(
89
+ inplanes, planes, kernel_size=1, stride=conv1_stride, bias=False)
90
+ self.conv2 = nn.Conv2d(
91
+ planes,
92
+ planes,
93
+ kernel_size=3,
94
+ stride=conv2_stride,
95
+ padding=dilation,
96
+ dilation=dilation,
97
+ bias=False)
98
+
99
+ self.bn1 = nn.BatchNorm2d(planes)
100
+ self.bn2 = nn.BatchNorm2d(planes)
101
+ self.conv3 = nn.Conv2d(
102
+ planes, planes * self.expansion, kernel_size=1, bias=False)
103
+ self.bn3 = nn.BatchNorm2d(planes * self.expansion)
104
+ self.relu = nn.ReLU(inplace=True)
105
+ self.downsample = downsample
106
+ self.stride = stride
107
+ self.dilation = dilation
108
+ self.with_cp = with_cp
109
+
110
+ def forward(self, x):
111
+
112
+ def _inner_forward(x):
113
+ residual = x
114
+
115
+ out = self.conv1(x)
116
+ out = self.bn1(out)
117
+ out = self.relu(out)
118
+
119
+ out = self.conv2(out)
120
+ out = self.bn2(out)
121
+ out = self.relu(out)
122
+
123
+ out = self.conv3(out)
124
+ out = self.bn3(out)
125
+
126
+ if self.downsample is not None:
127
+ residual = self.downsample(x)
128
+
129
+ out += residual
130
+
131
+ return out
132
+
133
+ if self.with_cp and x.requires_grad:
134
+ out = cp.checkpoint(_inner_forward, x)
135
+ else:
136
+ out = _inner_forward(x)
137
+
138
+ out = self.relu(out)
139
+
140
+ return out
141
+
142
+
143
+ def make_res_layer(block,
144
+ inplanes,
145
+ planes,
146
+ blocks,
147
+ stride=1,
148
+ dilation=1,
149
+ style='pytorch',
150
+ with_cp=False):
151
+ downsample = None
152
+ if stride != 1 or inplanes != planes * block.expansion:
153
+ downsample = nn.Sequential(
154
+ nn.Conv2d(
155
+ inplanes,
156
+ planes * block.expansion,
157
+ kernel_size=1,
158
+ stride=stride,
159
+ bias=False),
160
+ nn.BatchNorm2d(planes * block.expansion),
161
+ )
162
+
163
+ layers = []
164
+ layers.append(
165
+ block(
166
+ inplanes,
167
+ planes,
168
+ stride,
169
+ dilation,
170
+ downsample,
171
+ style=style,
172
+ with_cp=with_cp))
173
+ inplanes = planes * block.expansion
174
+ for _ in range(1, blocks):
175
+ layers.append(
176
+ block(inplanes, planes, 1, dilation, style=style, with_cp=with_cp))
177
+
178
+ return nn.Sequential(*layers)
179
+
180
+
181
+ class ResNet(nn.Module):
182
+ """ResNet backbone.
183
+
184
+ Args:
185
+ depth (int): Depth of resnet, from {18, 34, 50, 101, 152}.
186
+ num_stages (int): Resnet stages, normally 4.
187
+ strides (Sequence[int]): Strides of the first block of each stage.
188
+ dilations (Sequence[int]): Dilation of each stage.
189
+ out_indices (Sequence[int]): Output from which stages.
190
+ style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two
191
+ layer is the 3x3 conv layer, otherwise the stride-two layer is
192
+ the first 1x1 conv layer.
193
+ frozen_stages (int): Stages to be frozen (all param fixed). -1 means
194
+ not freezing any parameters.
195
+ bn_eval (bool): Whether to set BN layers as eval mode, namely, freeze
196
+ running stats (mean and var).
197
+ bn_frozen (bool): Whether to freeze weight and bias of BN layers.
198
+ with_cp (bool): Use checkpoint or not. Using checkpoint will save some
199
+ memory while slowing down the training speed.
200
+ """
201
+
202
+ arch_settings = {
203
+ 18: (BasicBlock, (2, 2, 2, 2)),
204
+ 34: (BasicBlock, (3, 4, 6, 3)),
205
+ 50: (Bottleneck, (3, 4, 6, 3)),
206
+ 101: (Bottleneck, (3, 4, 23, 3)),
207
+ 152: (Bottleneck, (3, 8, 36, 3))
208
+ }
209
+
210
+ def __init__(self,
211
+ depth,
212
+ num_stages=4,
213
+ strides=(1, 2, 2, 2),
214
+ dilations=(1, 1, 1, 1),
215
+ out_indices=(0, 1, 2, 3),
216
+ style='pytorch',
217
+ frozen_stages=-1,
218
+ bn_eval=True,
219
+ bn_frozen=False,
220
+ with_cp=False):
221
+ super(ResNet, self).__init__()
222
+ if depth not in self.arch_settings:
223
+ raise KeyError(f'invalid depth {depth} for resnet')
224
+ assert num_stages >= 1 and num_stages <= 4
225
+ block, stage_blocks = self.arch_settings[depth]
226
+ stage_blocks = stage_blocks[:num_stages]
227
+ assert len(strides) == len(dilations) == num_stages
228
+ assert max(out_indices) < num_stages
229
+
230
+ self.out_indices = out_indices
231
+ self.style = style
232
+ self.frozen_stages = frozen_stages
233
+ self.bn_eval = bn_eval
234
+ self.bn_frozen = bn_frozen
235
+ self.with_cp = with_cp
236
+
237
+ self.inplanes = 64
238
+ self.conv1 = nn.Conv2d(
239
+ 3, 64, kernel_size=7, stride=2, padding=3, bias=False)
240
+ self.bn1 = nn.BatchNorm2d(64)
241
+ self.relu = nn.ReLU(inplace=True)
242
+ self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
243
+
244
+ self.res_layers = []
245
+ for i, num_blocks in enumerate(stage_blocks):
246
+ stride = strides[i]
247
+ dilation = dilations[i]
248
+ planes = 64 * 2**i
249
+ res_layer = make_res_layer(
250
+ block,
251
+ self.inplanes,
252
+ planes,
253
+ num_blocks,
254
+ stride=stride,
255
+ dilation=dilation,
256
+ style=self.style,
257
+ with_cp=with_cp)
258
+ self.inplanes = planes * block.expansion
259
+ layer_name = f'layer{i + 1}'
260
+ self.add_module(layer_name, res_layer)
261
+ self.res_layers.append(layer_name)
262
+
263
+ self.feat_dim = block.expansion * 64 * 2**(len(stage_blocks) - 1)
264
+
265
+ def init_weights(self, pretrained=None):
266
+ if isinstance(pretrained, str):
267
+ logger = logging.getLogger()
268
+ from ..runner import load_checkpoint
269
+ load_checkpoint(self, pretrained, strict=False, logger=logger)
270
+ elif pretrained is None:
271
+ for m in self.modules():
272
+ if isinstance(m, nn.Conv2d):
273
+ kaiming_init(m)
274
+ elif isinstance(m, nn.BatchNorm2d):
275
+ constant_init(m, 1)
276
+ else:
277
+ raise TypeError('pretrained must be a str or None')
278
+
279
+ def forward(self, x):
280
+ x = self.conv1(x)
281
+ x = self.bn1(x)
282
+ x = self.relu(x)
283
+ x = self.maxpool(x)
284
+ outs = []
285
+ for i, layer_name in enumerate(self.res_layers):
286
+ res_layer = getattr(self, layer_name)
287
+ x = res_layer(x)
288
+ if i in self.out_indices:
289
+ outs.append(x)
290
+ if len(outs) == 1:
291
+ return outs[0]
292
+ else:
293
+ return tuple(outs)
294
+
295
+ def train(self, mode=True):
296
+ super(ResNet, self).train(mode)
297
+ if self.bn_eval:
298
+ for m in self.modules():
299
+ if isinstance(m, nn.BatchNorm2d):
300
+ m.eval()
301
+ if self.bn_frozen:
302
+ for params in m.parameters():
303
+ params.requires_grad = False
304
+ if mode and self.frozen_stages >= 0:
305
+ for param in self.conv1.parameters():
306
+ param.requires_grad = False
307
+ for param in self.bn1.parameters():
308
+ param.requires_grad = False
309
+ self.bn1.eval()
310
+ self.bn1.weight.requires_grad = False
311
+ self.bn1.bias.requires_grad = False
312
+ for i in range(1, self.frozen_stages + 1):
313
+ mod = getattr(self, f'layer{i}')
314
+ mod.eval()
315
+ for param in mod.parameters():
316
+ param.requires_grad = False
Text2Video-Zero-main/annotator/uniformer/mmcv/cnn/utils/__init__.py ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) OpenMMLab. All rights reserved.
2
+ from .flops_counter import get_model_complexity_info
3
+ from .fuse_conv_bn import fuse_conv_bn
4
+ from .sync_bn import revert_sync_batchnorm
5
+ from .weight_init import (INITIALIZERS, Caffe2XavierInit, ConstantInit,
6
+ KaimingInit, NormalInit, PretrainedInit,
7
+ TruncNormalInit, UniformInit, XavierInit,
8
+ bias_init_with_prob, caffe2_xavier_init,
9
+ constant_init, initialize, kaiming_init, normal_init,
10
+ trunc_normal_init, uniform_init, xavier_init)
11
+
12
+ __all__ = [
13
+ 'get_model_complexity_info', 'bias_init_with_prob', 'caffe2_xavier_init',
14
+ 'constant_init', 'kaiming_init', 'normal_init', 'trunc_normal_init',
15
+ 'uniform_init', 'xavier_init', 'fuse_conv_bn', 'initialize',
16
+ 'INITIALIZERS', 'ConstantInit', 'XavierInit', 'NormalInit',
17
+ 'TruncNormalInit', 'UniformInit', 'KaimingInit', 'PretrainedInit',
18
+ 'Caffe2XavierInit', 'revert_sync_batchnorm'
19
+ ]
Text2Video-Zero-main/annotator/uniformer/mmcv/cnn/utils/flops_counter.py ADDED
@@ -0,0 +1,599 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Modified from flops-counter.pytorch by Vladislav Sovrasov
2
+ # original repo: https://github.com/sovrasov/flops-counter.pytorch
3
+
4
+ # MIT License
5
+
6
+ # Copyright (c) 2018 Vladislav Sovrasov
7
+
8
+ # Permission is hereby granted, free of charge, to any person obtaining a copy
9
+ # of this software and associated documentation files (the "Software"), to deal
10
+ # in the Software without restriction, including without limitation the rights
11
+ # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12
+ # copies of the Software, and to permit persons to whom the Software is
13
+ # furnished to do so, subject to the following conditions:
14
+
15
+ # The above copyright notice and this permission notice shall be included in
16
+ # all copies or substantial portions of the Software.
17
+
18
+ # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19
+ # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20
+ # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
21
+ # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22
+ # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23
+ # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24
+ # SOFTWARE.
25
+
26
+ import sys
27
+ from functools import partial
28
+
29
+ import numpy as np
30
+ import torch
31
+ import torch.nn as nn
32
+
33
+ import annotator.uniformer.mmcv as mmcv
34
+
35
+
36
+ def get_model_complexity_info(model,
37
+ input_shape,
38
+ print_per_layer_stat=True,
39
+ as_strings=True,
40
+ input_constructor=None,
41
+ flush=False,
42
+ ost=sys.stdout):
43
+ """Get complexity information of a model.
44
+
45
+ This method can calculate FLOPs and parameter counts of a model with
46
+ corresponding input shape. It can also print complexity information for
47
+ each layer in a model.
48
+
49
+ Supported layers are listed as below:
50
+ - Convolutions: ``nn.Conv1d``, ``nn.Conv2d``, ``nn.Conv3d``.
51
+ - Activations: ``nn.ReLU``, ``nn.PReLU``, ``nn.ELU``, ``nn.LeakyReLU``,
52
+ ``nn.ReLU6``.
53
+ - Poolings: ``nn.MaxPool1d``, ``nn.MaxPool2d``, ``nn.MaxPool3d``,
54
+ ``nn.AvgPool1d``, ``nn.AvgPool2d``, ``nn.AvgPool3d``,
55
+ ``nn.AdaptiveMaxPool1d``, ``nn.AdaptiveMaxPool2d``,
56
+ ``nn.AdaptiveMaxPool3d``, ``nn.AdaptiveAvgPool1d``,
57
+ ``nn.AdaptiveAvgPool2d``, ``nn.AdaptiveAvgPool3d``.
58
+ - BatchNorms: ``nn.BatchNorm1d``, ``nn.BatchNorm2d``,
59
+ ``nn.BatchNorm3d``, ``nn.GroupNorm``, ``nn.InstanceNorm1d``,
60
+ ``InstanceNorm2d``, ``InstanceNorm3d``, ``nn.LayerNorm``.
61
+ - Linear: ``nn.Linear``.
62
+ - Deconvolution: ``nn.ConvTranspose2d``.
63
+ - Upsample: ``nn.Upsample``.
64
+
65
+ Args:
66
+ model (nn.Module): The model for complexity calculation.
67
+ input_shape (tuple): Input shape used for calculation.
68
+ print_per_layer_stat (bool): Whether to print complexity information
69
+ for each layer in a model. Default: True.
70
+ as_strings (bool): Output FLOPs and params counts in a string form.
71
+ Default: True.
72
+ input_constructor (None | callable): If specified, it takes a callable
73
+ method that generates input. otherwise, it will generate a random
74
+ tensor with input shape to calculate FLOPs. Default: None.
75
+ flush (bool): same as that in :func:`print`. Default: False.
76
+ ost (stream): same as ``file`` param in :func:`print`.
77
+ Default: sys.stdout.
78
+
79
+ Returns:
80
+ tuple[float | str]: If ``as_strings`` is set to True, it will return
81
+ FLOPs and parameter counts in a string format. otherwise, it will
82
+ return those in a float number format.
83
+ """
84
+ assert type(input_shape) is tuple
85
+ assert len(input_shape) >= 1
86
+ assert isinstance(model, nn.Module)
87
+ flops_model = add_flops_counting_methods(model)
88
+ flops_model.eval()
89
+ flops_model.start_flops_count()
90
+ if input_constructor:
91
+ input = input_constructor(input_shape)
92
+ _ = flops_model(**input)
93
+ else:
94
+ try:
95
+ batch = torch.ones(()).new_empty(
96
+ (1, *input_shape),
97
+ dtype=next(flops_model.parameters()).dtype,
98
+ device=next(flops_model.parameters()).device)
99
+ except StopIteration:
100
+ # Avoid StopIteration for models which have no parameters,
101
+ # like `nn.Relu()`, `nn.AvgPool2d`, etc.
102
+ batch = torch.ones(()).new_empty((1, *input_shape))
103
+
104
+ _ = flops_model(batch)
105
+
106
+ flops_count, params_count = flops_model.compute_average_flops_cost()
107
+ if print_per_layer_stat:
108
+ print_model_with_flops(
109
+ flops_model, flops_count, params_count, ost=ost, flush=flush)
110
+ flops_model.stop_flops_count()
111
+
112
+ if as_strings:
113
+ return flops_to_string(flops_count), params_to_string(params_count)
114
+
115
+ return flops_count, params_count
116
+
117
+
118
+ def flops_to_string(flops, units='GFLOPs', precision=2):
119
+ """Convert FLOPs number into a string.
120
+
121
+ Note that Here we take a multiply-add counts as one FLOP.
122
+
123
+ Args:
124
+ flops (float): FLOPs number to be converted.
125
+ units (str | None): Converted FLOPs units. Options are None, 'GFLOPs',
126
+ 'MFLOPs', 'KFLOPs', 'FLOPs'. If set to None, it will automatically
127
+ choose the most suitable unit for FLOPs. Default: 'GFLOPs'.
128
+ precision (int): Digit number after the decimal point. Default: 2.
129
+
130
+ Returns:
131
+ str: The converted FLOPs number with units.
132
+
133
+ Examples:
134
+ >>> flops_to_string(1e9)
135
+ '1.0 GFLOPs'
136
+ >>> flops_to_string(2e5, 'MFLOPs')
137
+ '0.2 MFLOPs'
138
+ >>> flops_to_string(3e-9, None)
139
+ '3e-09 FLOPs'
140
+ """
141
+ if units is None:
142
+ if flops // 10**9 > 0:
143
+ return str(round(flops / 10.**9, precision)) + ' GFLOPs'
144
+ elif flops // 10**6 > 0:
145
+ return str(round(flops / 10.**6, precision)) + ' MFLOPs'
146
+ elif flops // 10**3 > 0:
147
+ return str(round(flops / 10.**3, precision)) + ' KFLOPs'
148
+ else:
149
+ return str(flops) + ' FLOPs'
150
+ else:
151
+ if units == 'GFLOPs':
152
+ return str(round(flops / 10.**9, precision)) + ' ' + units
153
+ elif units == 'MFLOPs':
154
+ return str(round(flops / 10.**6, precision)) + ' ' + units
155
+ elif units == 'KFLOPs':
156
+ return str(round(flops / 10.**3, precision)) + ' ' + units
157
+ else:
158
+ return str(flops) + ' FLOPs'
159
+
160
+
161
+ def params_to_string(num_params, units=None, precision=2):
162
+ """Convert parameter number into a string.
163
+
164
+ Args:
165
+ num_params (float): Parameter number to be converted.
166
+ units (str | None): Converted FLOPs units. Options are None, 'M',
167
+ 'K' and ''. If set to None, it will automatically choose the most
168
+ suitable unit for Parameter number. Default: None.
169
+ precision (int): Digit number after the decimal point. Default: 2.
170
+
171
+ Returns:
172
+ str: The converted parameter number with units.
173
+
174
+ Examples:
175
+ >>> params_to_string(1e9)
176
+ '1000.0 M'
177
+ >>> params_to_string(2e5)
178
+ '200.0 k'
179
+ >>> params_to_string(3e-9)
180
+ '3e-09'
181
+ """
182
+ if units is None:
183
+ if num_params // 10**6 > 0:
184
+ return str(round(num_params / 10**6, precision)) + ' M'
185
+ elif num_params // 10**3:
186
+ return str(round(num_params / 10**3, precision)) + ' k'
187
+ else:
188
+ return str(num_params)
189
+ else:
190
+ if units == 'M':
191
+ return str(round(num_params / 10.**6, precision)) + ' ' + units
192
+ elif units == 'K':
193
+ return str(round(num_params / 10.**3, precision)) + ' ' + units
194
+ else:
195
+ return str(num_params)
196
+
197
+
198
+ def print_model_with_flops(model,
199
+ total_flops,
200
+ total_params,
201
+ units='GFLOPs',
202
+ precision=3,
203
+ ost=sys.stdout,
204
+ flush=False):
205
+ """Print a model with FLOPs for each layer.
206
+
207
+ Args:
208
+ model (nn.Module): The model to be printed.
209
+ total_flops (float): Total FLOPs of the model.
210
+ total_params (float): Total parameter counts of the model.
211
+ units (str | None): Converted FLOPs units. Default: 'GFLOPs'.
212
+ precision (int): Digit number after the decimal point. Default: 3.
213
+ ost (stream): same as `file` param in :func:`print`.
214
+ Default: sys.stdout.
215
+ flush (bool): same as that in :func:`print`. Default: False.
216
+
217
+ Example:
218
+ >>> class ExampleModel(nn.Module):
219
+
220
+ >>> def __init__(self):
221
+ >>> super().__init__()
222
+ >>> self.conv1 = nn.Conv2d(3, 8, 3)
223
+ >>> self.conv2 = nn.Conv2d(8, 256, 3)
224
+ >>> self.conv3 = nn.Conv2d(256, 8, 3)
225
+ >>> self.avg_pool = nn.AdaptiveAvgPool2d((1, 1))
226
+ >>> self.flatten = nn.Flatten()
227
+ >>> self.fc = nn.Linear(8, 1)
228
+
229
+ >>> def forward(self, x):
230
+ >>> x = self.conv1(x)
231
+ >>> x = self.conv2(x)
232
+ >>> x = self.conv3(x)
233
+ >>> x = self.avg_pool(x)
234
+ >>> x = self.flatten(x)
235
+ >>> x = self.fc(x)
236
+ >>> return x
237
+
238
+ >>> model = ExampleModel()
239
+ >>> x = (3, 16, 16)
240
+ to print the complexity information state for each layer, you can use
241
+ >>> get_model_complexity_info(model, x)
242
+ or directly use
243
+ >>> print_model_with_flops(model, 4579784.0, 37361)
244
+ ExampleModel(
245
+ 0.037 M, 100.000% Params, 0.005 GFLOPs, 100.000% FLOPs,
246
+ (conv1): Conv2d(0.0 M, 0.600% Params, 0.0 GFLOPs, 0.959% FLOPs, 3, 8, kernel_size=(3, 3), stride=(1, 1)) # noqa: E501
247
+ (conv2): Conv2d(0.019 M, 50.020% Params, 0.003 GFLOPs, 58.760% FLOPs, 8, 256, kernel_size=(3, 3), stride=(1, 1))
248
+ (conv3): Conv2d(0.018 M, 49.356% Params, 0.002 GFLOPs, 40.264% FLOPs, 256, 8, kernel_size=(3, 3), stride=(1, 1))
249
+ (avg_pool): AdaptiveAvgPool2d(0.0 M, 0.000% Params, 0.0 GFLOPs, 0.017% FLOPs, output_size=(1, 1))
250
+ (flatten): Flatten(0.0 M, 0.000% Params, 0.0 GFLOPs, 0.000% FLOPs, )
251
+ (fc): Linear(0.0 M, 0.024% Params, 0.0 GFLOPs, 0.000% FLOPs, in_features=8, out_features=1, bias=True)
252
+ )
253
+ """
254
+
255
+ def accumulate_params(self):
256
+ if is_supported_instance(self):
257
+ return self.__params__
258
+ else:
259
+ sum = 0
260
+ for m in self.children():
261
+ sum += m.accumulate_params()
262
+ return sum
263
+
264
+ def accumulate_flops(self):
265
+ if is_supported_instance(self):
266
+ return self.__flops__ / model.__batch_counter__
267
+ else:
268
+ sum = 0
269
+ for m in self.children():
270
+ sum += m.accumulate_flops()
271
+ return sum
272
+
273
+ def flops_repr(self):
274
+ accumulated_num_params = self.accumulate_params()
275
+ accumulated_flops_cost = self.accumulate_flops()
276
+ return ', '.join([
277
+ params_to_string(
278
+ accumulated_num_params, units='M', precision=precision),
279
+ '{:.3%} Params'.format(accumulated_num_params / total_params),
280
+ flops_to_string(
281
+ accumulated_flops_cost, units=units, precision=precision),
282
+ '{:.3%} FLOPs'.format(accumulated_flops_cost / total_flops),
283
+ self.original_extra_repr()
284
+ ])
285
+
286
+ def add_extra_repr(m):
287
+ m.accumulate_flops = accumulate_flops.__get__(m)
288
+ m.accumulate_params = accumulate_params.__get__(m)
289
+ flops_extra_repr = flops_repr.__get__(m)
290
+ if m.extra_repr != flops_extra_repr:
291
+ m.original_extra_repr = m.extra_repr
292
+ m.extra_repr = flops_extra_repr
293
+ assert m.extra_repr != m.original_extra_repr
294
+
295
+ def del_extra_repr(m):
296
+ if hasattr(m, 'original_extra_repr'):
297
+ m.extra_repr = m.original_extra_repr
298
+ del m.original_extra_repr
299
+ if hasattr(m, 'accumulate_flops'):
300
+ del m.accumulate_flops
301
+
302
+ model.apply(add_extra_repr)
303
+ print(model, file=ost, flush=flush)
304
+ model.apply(del_extra_repr)
305
+
306
+
307
+ def get_model_parameters_number(model):
308
+ """Calculate parameter number of a model.
309
+
310
+ Args:
311
+ model (nn.module): The model for parameter number calculation.
312
+
313
+ Returns:
314
+ float: Parameter number of the model.
315
+ """
316
+ num_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
317
+ return num_params
318
+
319
+
320
+ def add_flops_counting_methods(net_main_module):
321
+ # adding additional methods to the existing module object,
322
+ # this is done this way so that each function has access to self object
323
+ net_main_module.start_flops_count = start_flops_count.__get__(
324
+ net_main_module)
325
+ net_main_module.stop_flops_count = stop_flops_count.__get__(
326
+ net_main_module)
327
+ net_main_module.reset_flops_count = reset_flops_count.__get__(
328
+ net_main_module)
329
+ net_main_module.compute_average_flops_cost = compute_average_flops_cost.__get__( # noqa: E501
330
+ net_main_module)
331
+
332
+ net_main_module.reset_flops_count()
333
+
334
+ return net_main_module
335
+
336
+
337
+ def compute_average_flops_cost(self):
338
+ """Compute average FLOPs cost.
339
+
340
+ A method to compute average FLOPs cost, which will be available after
341
+ `add_flops_counting_methods()` is called on a desired net object.
342
+
343
+ Returns:
344
+ float: Current mean flops consumption per image.
345
+ """
346
+ batches_count = self.__batch_counter__
347
+ flops_sum = 0
348
+ for module in self.modules():
349
+ if is_supported_instance(module):
350
+ flops_sum += module.__flops__
351
+ params_sum = get_model_parameters_number(self)
352
+ return flops_sum / batches_count, params_sum
353
+
354
+
355
+ def start_flops_count(self):
356
+ """Activate the computation of mean flops consumption per image.
357
+
358
+ A method to activate the computation of mean flops consumption per image.
359
+ which will be available after ``add_flops_counting_methods()`` is called on
360
+ a desired net object. It should be called before running the network.
361
+ """
362
+ add_batch_counter_hook_function(self)
363
+
364
+ def add_flops_counter_hook_function(module):
365
+ if is_supported_instance(module):
366
+ if hasattr(module, '__flops_handle__'):
367
+ return
368
+
369
+ else:
370
+ handle = module.register_forward_hook(
371
+ get_modules_mapping()[type(module)])
372
+
373
+ module.__flops_handle__ = handle
374
+
375
+ self.apply(partial(add_flops_counter_hook_function))
376
+
377
+
378
+ def stop_flops_count(self):
379
+ """Stop computing the mean flops consumption per image.
380
+
381
+ A method to stop computing the mean flops consumption per image, which will
382
+ be available after ``add_flops_counting_methods()`` is called on a desired
383
+ net object. It can be called to pause the computation whenever.
384
+ """
385
+ remove_batch_counter_hook_function(self)
386
+ self.apply(remove_flops_counter_hook_function)
387
+
388
+
389
+ def reset_flops_count(self):
390
+ """Reset statistics computed so far.
391
+
392
+ A method to Reset computed statistics, which will be available after
393
+ `add_flops_counting_methods()` is called on a desired net object.
394
+ """
395
+ add_batch_counter_variables_or_reset(self)
396
+ self.apply(add_flops_counter_variable_or_reset)
397
+
398
+
399
+ # ---- Internal functions
400
+ def empty_flops_counter_hook(module, input, output):
401
+ module.__flops__ += 0
402
+
403
+
404
+ def upsample_flops_counter_hook(module, input, output):
405
+ output_size = output[0]
406
+ batch_size = output_size.shape[0]
407
+ output_elements_count = batch_size
408
+ for val in output_size.shape[1:]:
409
+ output_elements_count *= val
410
+ module.__flops__ += int(output_elements_count)
411
+
412
+
413
+ def relu_flops_counter_hook(module, input, output):
414
+ active_elements_count = output.numel()
415
+ module.__flops__ += int(active_elements_count)
416
+
417
+
418
+ def linear_flops_counter_hook(module, input, output):
419
+ input = input[0]
420
+ output_last_dim = output.shape[
421
+ -1] # pytorch checks dimensions, so here we don't care much
422
+ module.__flops__ += int(np.prod(input.shape) * output_last_dim)
423
+
424
+
425
+ def pool_flops_counter_hook(module, input, output):
426
+ input = input[0]
427
+ module.__flops__ += int(np.prod(input.shape))
428
+
429
+
430
+ def norm_flops_counter_hook(module, input, output):
431
+ input = input[0]
432
+
433
+ batch_flops = np.prod(input.shape)
434
+ if (getattr(module, 'affine', False)
435
+ or getattr(module, 'elementwise_affine', False)):
436
+ batch_flops *= 2
437
+ module.__flops__ += int(batch_flops)
438
+
439
+
440
+ def deconv_flops_counter_hook(conv_module, input, output):
441
+ # Can have multiple inputs, getting the first one
442
+ input = input[0]
443
+
444
+ batch_size = input.shape[0]
445
+ input_height, input_width = input.shape[2:]
446
+
447
+ kernel_height, kernel_width = conv_module.kernel_size
448
+ in_channels = conv_module.in_channels
449
+ out_channels = conv_module.out_channels
450
+ groups = conv_module.groups
451
+
452
+ filters_per_channel = out_channels // groups
453
+ conv_per_position_flops = (
454
+ kernel_height * kernel_width * in_channels * filters_per_channel)
455
+
456
+ active_elements_count = batch_size * input_height * input_width
457
+ overall_conv_flops = conv_per_position_flops * active_elements_count
458
+ bias_flops = 0
459
+ if conv_module.bias is not None:
460
+ output_height, output_width = output.shape[2:]
461
+ bias_flops = out_channels * batch_size * output_height * output_height
462
+ overall_flops = overall_conv_flops + bias_flops
463
+
464
+ conv_module.__flops__ += int(overall_flops)
465
+
466
+
467
+ def conv_flops_counter_hook(conv_module, input, output):
468
+ # Can have multiple inputs, getting the first one
469
+ input = input[0]
470
+
471
+ batch_size = input.shape[0]
472
+ output_dims = list(output.shape[2:])
473
+
474
+ kernel_dims = list(conv_module.kernel_size)
475
+ in_channels = conv_module.in_channels
476
+ out_channels = conv_module.out_channels
477
+ groups = conv_module.groups
478
+
479
+ filters_per_channel = out_channels // groups
480
+ conv_per_position_flops = int(
481
+ np.prod(kernel_dims)) * in_channels * filters_per_channel
482
+
483
+ active_elements_count = batch_size * int(np.prod(output_dims))
484
+
485
+ overall_conv_flops = conv_per_position_flops * active_elements_count
486
+
487
+ bias_flops = 0
488
+
489
+ if conv_module.bias is not None:
490
+
491
+ bias_flops = out_channels * active_elements_count
492
+
493
+ overall_flops = overall_conv_flops + bias_flops
494
+
495
+ conv_module.__flops__ += int(overall_flops)
496
+
497
+
498
+ def batch_counter_hook(module, input, output):
499
+ batch_size = 1
500
+ if len(input) > 0:
501
+ # Can have multiple inputs, getting the first one
502
+ input = input[0]
503
+ batch_size = len(input)
504
+ else:
505
+ pass
506
+ print('Warning! No positional inputs found for a module, '
507
+ 'assuming batch size is 1.')
508
+ module.__batch_counter__ += batch_size
509
+
510
+
511
+ def add_batch_counter_variables_or_reset(module):
512
+
513
+ module.__batch_counter__ = 0
514
+
515
+
516
+ def add_batch_counter_hook_function(module):
517
+ if hasattr(module, '__batch_counter_handle__'):
518
+ return
519
+
520
+ handle = module.register_forward_hook(batch_counter_hook)
521
+ module.__batch_counter_handle__ = handle
522
+
523
+
524
+ def remove_batch_counter_hook_function(module):
525
+ if hasattr(module, '__batch_counter_handle__'):
526
+ module.__batch_counter_handle__.remove()
527
+ del module.__batch_counter_handle__
528
+
529
+
530
+ def add_flops_counter_variable_or_reset(module):
531
+ if is_supported_instance(module):
532
+ if hasattr(module, '__flops__') or hasattr(module, '__params__'):
533
+ print('Warning: variables __flops__ or __params__ are already '
534
+ 'defined for the module' + type(module).__name__ +
535
+ ' ptflops can affect your code!')
536
+ module.__flops__ = 0
537
+ module.__params__ = get_model_parameters_number(module)
538
+
539
+
540
+ def is_supported_instance(module):
541
+ if type(module) in get_modules_mapping():
542
+ return True
543
+ return False
544
+
545
+
546
+ def remove_flops_counter_hook_function(module):
547
+ if is_supported_instance(module):
548
+ if hasattr(module, '__flops_handle__'):
549
+ module.__flops_handle__.remove()
550
+ del module.__flops_handle__
551
+
552
+
553
+ def get_modules_mapping():
554
+ return {
555
+ # convolutions
556
+ nn.Conv1d: conv_flops_counter_hook,
557
+ nn.Conv2d: conv_flops_counter_hook,
558
+ mmcv.cnn.bricks.Conv2d: conv_flops_counter_hook,
559
+ nn.Conv3d: conv_flops_counter_hook,
560
+ mmcv.cnn.bricks.Conv3d: conv_flops_counter_hook,
561
+ # activations
562
+ nn.ReLU: relu_flops_counter_hook,
563
+ nn.PReLU: relu_flops_counter_hook,
564
+ nn.ELU: relu_flops_counter_hook,
565
+ nn.LeakyReLU: relu_flops_counter_hook,
566
+ nn.ReLU6: relu_flops_counter_hook,
567
+ # poolings
568
+ nn.MaxPool1d: pool_flops_counter_hook,
569
+ nn.AvgPool1d: pool_flops_counter_hook,
570
+ nn.AvgPool2d: pool_flops_counter_hook,
571
+ nn.MaxPool2d: pool_flops_counter_hook,
572
+ mmcv.cnn.bricks.MaxPool2d: pool_flops_counter_hook,
573
+ nn.MaxPool3d: pool_flops_counter_hook,
574
+ mmcv.cnn.bricks.MaxPool3d: pool_flops_counter_hook,
575
+ nn.AvgPool3d: pool_flops_counter_hook,
576
+ nn.AdaptiveMaxPool1d: pool_flops_counter_hook,
577
+ nn.AdaptiveAvgPool1d: pool_flops_counter_hook,
578
+ nn.AdaptiveMaxPool2d: pool_flops_counter_hook,
579
+ nn.AdaptiveAvgPool2d: pool_flops_counter_hook,
580
+ nn.AdaptiveMaxPool3d: pool_flops_counter_hook,
581
+ nn.AdaptiveAvgPool3d: pool_flops_counter_hook,
582
+ # normalizations
583
+ nn.BatchNorm1d: norm_flops_counter_hook,
584
+ nn.BatchNorm2d: norm_flops_counter_hook,
585
+ nn.BatchNorm3d: norm_flops_counter_hook,
586
+ nn.GroupNorm: norm_flops_counter_hook,
587
+ nn.InstanceNorm1d: norm_flops_counter_hook,
588
+ nn.InstanceNorm2d: norm_flops_counter_hook,
589
+ nn.InstanceNorm3d: norm_flops_counter_hook,
590
+ nn.LayerNorm: norm_flops_counter_hook,
591
+ # FC
592
+ nn.Linear: linear_flops_counter_hook,
593
+ mmcv.cnn.bricks.Linear: linear_flops_counter_hook,
594
+ # Upscale
595
+ nn.Upsample: upsample_flops_counter_hook,
596
+ # Deconvolution
597
+ nn.ConvTranspose2d: deconv_flops_counter_hook,
598
+ mmcv.cnn.bricks.ConvTranspose2d: deconv_flops_counter_hook,
599
+ }
Text2Video-Zero-main/annotator/uniformer/mmcv/cnn/utils/fuse_conv_bn.py ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) OpenMMLab. All rights reserved.
2
+ import torch
3
+ import torch.nn as nn
4
+
5
+
6
+ def _fuse_conv_bn(conv, bn):
7
+ """Fuse conv and bn into one module.
8
+
9
+ Args:
10
+ conv (nn.Module): Conv to be fused.
11
+ bn (nn.Module): BN to be fused.
12
+
13
+ Returns:
14
+ nn.Module: Fused module.
15
+ """
16
+ conv_w = conv.weight
17
+ conv_b = conv.bias if conv.bias is not None else torch.zeros_like(
18
+ bn.running_mean)
19
+
20
+ factor = bn.weight / torch.sqrt(bn.running_var + bn.eps)
21
+ conv.weight = nn.Parameter(conv_w *
22
+ factor.reshape([conv.out_channels, 1, 1, 1]))
23
+ conv.bias = nn.Parameter((conv_b - bn.running_mean) * factor + bn.bias)
24
+ return conv
25
+
26
+
27
+ def fuse_conv_bn(module):
28
+ """Recursively fuse conv and bn in a module.
29
+
30
+ During inference, the functionary of batch norm layers is turned off
31
+ but only the mean and var alone channels are used, which exposes the
32
+ chance to fuse it with the preceding conv layers to save computations and
33
+ simplify network structures.
34
+
35
+ Args:
36
+ module (nn.Module): Module to be fused.
37
+
38
+ Returns:
39
+ nn.Module: Fused module.
40
+ """
41
+ last_conv = None
42
+ last_conv_name = None
43
+
44
+ for name, child in module.named_children():
45
+ if isinstance(child,
46
+ (nn.modules.batchnorm._BatchNorm, nn.SyncBatchNorm)):
47
+ if last_conv is None: # only fuse BN that is after Conv
48
+ continue
49
+ fused_conv = _fuse_conv_bn(last_conv, child)
50
+ module._modules[last_conv_name] = fused_conv
51
+ # To reduce changes, set BN as Identity instead of deleting it.
52
+ module._modules[name] = nn.Identity()
53
+ last_conv = None
54
+ elif isinstance(child, nn.Conv2d):
55
+ last_conv = child
56
+ last_conv_name = name
57
+ else:
58
+ fuse_conv_bn(child)
59
+ return module
Text2Video-Zero-main/annotator/uniformer/mmcv/cnn/utils/sync_bn.py ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+ import annotator.uniformer.mmcv as mmcv
4
+
5
+
6
+ class _BatchNormXd(torch.nn.modules.batchnorm._BatchNorm):
7
+ """A general BatchNorm layer without input dimension check.
8
+
9
+ Reproduced from @kapily's work:
10
+ (https://github.com/pytorch/pytorch/issues/41081#issuecomment-783961547)
11
+ The only difference between BatchNorm1d, BatchNorm2d, BatchNorm3d, etc
12
+ is `_check_input_dim` that is designed for tensor sanity checks.
13
+ The check has been bypassed in this class for the convenience of converting
14
+ SyncBatchNorm.
15
+ """
16
+
17
+ def _check_input_dim(self, input):
18
+ return
19
+
20
+
21
+ def revert_sync_batchnorm(module):
22
+ """Helper function to convert all `SyncBatchNorm` (SyncBN) and
23
+ `mmcv.ops.sync_bn.SyncBatchNorm`(MMSyncBN) layers in the model to
24
+ `BatchNormXd` layers.
25
+
26
+ Adapted from @kapily's work:
27
+ (https://github.com/pytorch/pytorch/issues/41081#issuecomment-783961547)
28
+
29
+ Args:
30
+ module (nn.Module): The module containing `SyncBatchNorm` layers.
31
+
32
+ Returns:
33
+ module_output: The converted module with `BatchNormXd` layers.
34
+ """
35
+ module_output = module
36
+ module_checklist = [torch.nn.modules.batchnorm.SyncBatchNorm]
37
+ if hasattr(mmcv, 'ops'):
38
+ module_checklist.append(mmcv.ops.SyncBatchNorm)
39
+ if isinstance(module, tuple(module_checklist)):
40
+ module_output = _BatchNormXd(module.num_features, module.eps,
41
+ module.momentum, module.affine,
42
+ module.track_running_stats)
43
+ if module.affine:
44
+ # no_grad() may not be needed here but
45
+ # just to be consistent with `convert_sync_batchnorm()`
46
+ with torch.no_grad():
47
+ module_output.weight = module.weight
48
+ module_output.bias = module.bias
49
+ module_output.running_mean = module.running_mean
50
+ module_output.running_var = module.running_var
51
+ module_output.num_batches_tracked = module.num_batches_tracked
52
+ module_output.training = module.training
53
+ # qconfig exists in quantized models
54
+ if hasattr(module, 'qconfig'):
55
+ module_output.qconfig = module.qconfig
56
+ for name, child in module.named_children():
57
+ module_output.add_module(name, revert_sync_batchnorm(child))
58
+ del module
59
+ return module_output
Text2Video-Zero-main/annotator/uniformer/mmcv/cnn/utils/weight_init.py ADDED
@@ -0,0 +1,684 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) OpenMMLab. All rights reserved.
2
+ import copy
3
+ import math
4
+ import warnings
5
+
6
+ import numpy as np
7
+ import torch
8
+ import torch.nn as nn
9
+ from torch import Tensor
10
+
11
+ from annotator.uniformer.mmcv.utils import Registry, build_from_cfg, get_logger, print_log
12
+
13
+ INITIALIZERS = Registry('initializer')
14
+
15
+
16
+ def update_init_info(module, init_info):
17
+ """Update the `_params_init_info` in the module if the value of parameters
18
+ are changed.
19
+
20
+ Args:
21
+ module (obj:`nn.Module`): The module of PyTorch with a user-defined
22
+ attribute `_params_init_info` which records the initialization
23
+ information.
24
+ init_info (str): The string that describes the initialization.
25
+ """
26
+ assert hasattr(
27
+ module,
28
+ '_params_init_info'), f'Can not find `_params_init_info` in {module}'
29
+ for name, param in module.named_parameters():
30
+
31
+ assert param in module._params_init_info, (
32
+ f'Find a new :obj:`Parameter` '
33
+ f'named `{name}` during executing the '
34
+ f'`init_weights` of '
35
+ f'`{module.__class__.__name__}`. '
36
+ f'Please do not add or '
37
+ f'replace parameters during executing '
38
+ f'the `init_weights`. ')
39
+
40
+ # The parameter has been changed during executing the
41
+ # `init_weights` of module
42
+ mean_value = param.data.mean()
43
+ if module._params_init_info[param]['tmp_mean_value'] != mean_value:
44
+ module._params_init_info[param]['init_info'] = init_info
45
+ module._params_init_info[param]['tmp_mean_value'] = mean_value
46
+
47
+
48
+ def constant_init(module, val, bias=0):
49
+ if hasattr(module, 'weight') and module.weight is not None:
50
+ nn.init.constant_(module.weight, val)
51
+ if hasattr(module, 'bias') and module.bias is not None:
52
+ nn.init.constant_(module.bias, bias)
53
+
54
+
55
+ def xavier_init(module, gain=1, bias=0, distribution='normal'):
56
+ assert distribution in ['uniform', 'normal']
57
+ if hasattr(module, 'weight') and module.weight is not None:
58
+ if distribution == 'uniform':
59
+ nn.init.xavier_uniform_(module.weight, gain=gain)
60
+ else:
61
+ nn.init.xavier_normal_(module.weight, gain=gain)
62
+ if hasattr(module, 'bias') and module.bias is not None:
63
+ nn.init.constant_(module.bias, bias)
64
+
65
+
66
+ def normal_init(module, mean=0, std=1, bias=0):
67
+ if hasattr(module, 'weight') and module.weight is not None:
68
+ nn.init.normal_(module.weight, mean, std)
69
+ if hasattr(module, 'bias') and module.bias is not None:
70
+ nn.init.constant_(module.bias, bias)
71
+
72
+
73
+ def trunc_normal_init(module: nn.Module,
74
+ mean: float = 0,
75
+ std: float = 1,
76
+ a: float = -2,
77
+ b: float = 2,
78
+ bias: float = 0) -> None:
79
+ if hasattr(module, 'weight') and module.weight is not None:
80
+ trunc_normal_(module.weight, mean, std, a, b) # type: ignore
81
+ if hasattr(module, 'bias') and module.bias is not None:
82
+ nn.init.constant_(module.bias, bias) # type: ignore
83
+
84
+
85
+ def uniform_init(module, a=0, b=1, bias=0):
86
+ if hasattr(module, 'weight') and module.weight is not None:
87
+ nn.init.uniform_(module.weight, a, b)
88
+ if hasattr(module, 'bias') and module.bias is not None:
89
+ nn.init.constant_(module.bias, bias)
90
+
91
+
92
+ def kaiming_init(module,
93
+ a=0,
94
+ mode='fan_out',
95
+ nonlinearity='relu',
96
+ bias=0,
97
+ distribution='normal'):
98
+ assert distribution in ['uniform', 'normal']
99
+ if hasattr(module, 'weight') and module.weight is not None:
100
+ if distribution == 'uniform':
101
+ nn.init.kaiming_uniform_(
102
+ module.weight, a=a, mode=mode, nonlinearity=nonlinearity)
103
+ else:
104
+ nn.init.kaiming_normal_(
105
+ module.weight, a=a, mode=mode, nonlinearity=nonlinearity)
106
+ if hasattr(module, 'bias') and module.bias is not None:
107
+ nn.init.constant_(module.bias, bias)
108
+
109
+
110
+ def caffe2_xavier_init(module, bias=0):
111
+ # `XavierFill` in Caffe2 corresponds to `kaiming_uniform_` in PyTorch
112
+ # Acknowledgment to FAIR's internal code
113
+ kaiming_init(
114
+ module,
115
+ a=1,
116
+ mode='fan_in',
117
+ nonlinearity='leaky_relu',
118
+ bias=bias,
119
+ distribution='uniform')
120
+
121
+
122
+ def bias_init_with_prob(prior_prob):
123
+ """initialize conv/fc bias value according to a given probability value."""
124
+ bias_init = float(-np.log((1 - prior_prob) / prior_prob))
125
+ return bias_init
126
+
127
+
128
+ def _get_bases_name(m):
129
+ return [b.__name__ for b in m.__class__.__bases__]
130
+
131
+
132
+ class BaseInit(object):
133
+
134
+ def __init__(self, *, bias=0, bias_prob=None, layer=None):
135
+ self.wholemodule = False
136
+ if not isinstance(bias, (int, float)):
137
+ raise TypeError(f'bias must be a number, but got a {type(bias)}')
138
+
139
+ if bias_prob is not None:
140
+ if not isinstance(bias_prob, float):
141
+ raise TypeError(f'bias_prob type must be float, \
142
+ but got {type(bias_prob)}')
143
+
144
+ if layer is not None:
145
+ if not isinstance(layer, (str, list)):
146
+ raise TypeError(f'layer must be a str or a list of str, \
147
+ but got a {type(layer)}')
148
+ else:
149
+ layer = []
150
+
151
+ if bias_prob is not None:
152
+ self.bias = bias_init_with_prob(bias_prob)
153
+ else:
154
+ self.bias = bias
155
+ self.layer = [layer] if isinstance(layer, str) else layer
156
+
157
+ def _get_init_info(self):
158
+ info = f'{self.__class__.__name__}, bias={self.bias}'
159
+ return info
160
+
161
+
162
+ @INITIALIZERS.register_module(name='Constant')
163
+ class ConstantInit(BaseInit):
164
+ """Initialize module parameters with constant values.
165
+
166
+ Args:
167
+ val (int | float): the value to fill the weights in the module with
168
+ bias (int | float): the value to fill the bias. Defaults to 0.
169
+ bias_prob (float, optional): the probability for bias initialization.
170
+ Defaults to None.
171
+ layer (str | list[str], optional): the layer will be initialized.
172
+ Defaults to None.
173
+ """
174
+
175
+ def __init__(self, val, **kwargs):
176
+ super().__init__(**kwargs)
177
+ self.val = val
178
+
179
+ def __call__(self, module):
180
+
181
+ def init(m):
182
+ if self.wholemodule:
183
+ constant_init(m, self.val, self.bias)
184
+ else:
185
+ layername = m.__class__.__name__
186
+ basesname = _get_bases_name(m)
187
+ if len(set(self.layer) & set([layername] + basesname)):
188
+ constant_init(m, self.val, self.bias)
189
+
190
+ module.apply(init)
191
+ if hasattr(module, '_params_init_info'):
192
+ update_init_info(module, init_info=self._get_init_info())
193
+
194
+ def _get_init_info(self):
195
+ info = f'{self.__class__.__name__}: val={self.val}, bias={self.bias}'
196
+ return info
197
+
198
+
199
+ @INITIALIZERS.register_module(name='Xavier')
200
+ class XavierInit(BaseInit):
201
+ r"""Initialize module parameters with values according to the method
202
+ described in `Understanding the difficulty of training deep feedforward
203
+ neural networks - Glorot, X. & Bengio, Y. (2010).
204
+ <http://proceedings.mlr.press/v9/glorot10a/glorot10a.pdf>`_
205
+
206
+ Args:
207
+ gain (int | float): an optional scaling factor. Defaults to 1.
208
+ bias (int | float): the value to fill the bias. Defaults to 0.
209
+ bias_prob (float, optional): the probability for bias initialization.
210
+ Defaults to None.
211
+ distribution (str): distribution either be ``'normal'``
212
+ or ``'uniform'``. Defaults to ``'normal'``.
213
+ layer (str | list[str], optional): the layer will be initialized.
214
+ Defaults to None.
215
+ """
216
+
217
+ def __init__(self, gain=1, distribution='normal', **kwargs):
218
+ super().__init__(**kwargs)
219
+ self.gain = gain
220
+ self.distribution = distribution
221
+
222
+ def __call__(self, module):
223
+
224
+ def init(m):
225
+ if self.wholemodule:
226
+ xavier_init(m, self.gain, self.bias, self.distribution)
227
+ else:
228
+ layername = m.__class__.__name__
229
+ basesname = _get_bases_name(m)
230
+ if len(set(self.layer) & set([layername] + basesname)):
231
+ xavier_init(m, self.gain, self.bias, self.distribution)
232
+
233
+ module.apply(init)
234
+ if hasattr(module, '_params_init_info'):
235
+ update_init_info(module, init_info=self._get_init_info())
236
+
237
+ def _get_init_info(self):
238
+ info = f'{self.__class__.__name__}: gain={self.gain}, ' \
239
+ f'distribution={self.distribution}, bias={self.bias}'
240
+ return info
241
+
242
+
243
+ @INITIALIZERS.register_module(name='Normal')
244
+ class NormalInit(BaseInit):
245
+ r"""Initialize module parameters with the values drawn from the normal
246
+ distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)`.
247
+
248
+ Args:
249
+ mean (int | float):the mean of the normal distribution. Defaults to 0.
250
+ std (int | float): the standard deviation of the normal distribution.
251
+ Defaults to 1.
252
+ bias (int | float): the value to fill the bias. Defaults to 0.
253
+ bias_prob (float, optional): the probability for bias initialization.
254
+ Defaults to None.
255
+ layer (str | list[str], optional): the layer will be initialized.
256
+ Defaults to None.
257
+
258
+ """
259
+
260
+ def __init__(self, mean=0, std=1, **kwargs):
261
+ super().__init__(**kwargs)
262
+ self.mean = mean
263
+ self.std = std
264
+
265
+ def __call__(self, module):
266
+
267
+ def init(m):
268
+ if self.wholemodule:
269
+ normal_init(m, self.mean, self.std, self.bias)
270
+ else:
271
+ layername = m.__class__.__name__
272
+ basesname = _get_bases_name(m)
273
+ if len(set(self.layer) & set([layername] + basesname)):
274
+ normal_init(m, self.mean, self.std, self.bias)
275
+
276
+ module.apply(init)
277
+ if hasattr(module, '_params_init_info'):
278
+ update_init_info(module, init_info=self._get_init_info())
279
+
280
+ def _get_init_info(self):
281
+ info = f'{self.__class__.__name__}: mean={self.mean},' \
282
+ f' std={self.std}, bias={self.bias}'
283
+ return info
284
+
285
+
286
+ @INITIALIZERS.register_module(name='TruncNormal')
287
+ class TruncNormalInit(BaseInit):
288
+ r"""Initialize module parameters with the values drawn from the normal
289
+ distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)` with values
290
+ outside :math:`[a, b]`.
291
+
292
+ Args:
293
+ mean (float): the mean of the normal distribution. Defaults to 0.
294
+ std (float): the standard deviation of the normal distribution.
295
+ Defaults to 1.
296
+ a (float): The minimum cutoff value.
297
+ b ( float): The maximum cutoff value.
298
+ bias (float): the value to fill the bias. Defaults to 0.
299
+ bias_prob (float, optional): the probability for bias initialization.
300
+ Defaults to None.
301
+ layer (str | list[str], optional): the layer will be initialized.
302
+ Defaults to None.
303
+
304
+ """
305
+
306
+ def __init__(self,
307
+ mean: float = 0,
308
+ std: float = 1,
309
+ a: float = -2,
310
+ b: float = 2,
311
+ **kwargs) -> None:
312
+ super().__init__(**kwargs)
313
+ self.mean = mean
314
+ self.std = std
315
+ self.a = a
316
+ self.b = b
317
+
318
+ def __call__(self, module: nn.Module) -> None:
319
+
320
+ def init(m):
321
+ if self.wholemodule:
322
+ trunc_normal_init(m, self.mean, self.std, self.a, self.b,
323
+ self.bias)
324
+ else:
325
+ layername = m.__class__.__name__
326
+ basesname = _get_bases_name(m)
327
+ if len(set(self.layer) & set([layername] + basesname)):
328
+ trunc_normal_init(m, self.mean, self.std, self.a, self.b,
329
+ self.bias)
330
+
331
+ module.apply(init)
332
+ if hasattr(module, '_params_init_info'):
333
+ update_init_info(module, init_info=self._get_init_info())
334
+
335
+ def _get_init_info(self):
336
+ info = f'{self.__class__.__name__}: a={self.a}, b={self.b},' \
337
+ f' mean={self.mean}, std={self.std}, bias={self.bias}'
338
+ return info
339
+
340
+
341
+ @INITIALIZERS.register_module(name='Uniform')
342
+ class UniformInit(BaseInit):
343
+ r"""Initialize module parameters with values drawn from the uniform
344
+ distribution :math:`\mathcal{U}(a, b)`.
345
+
346
+ Args:
347
+ a (int | float): the lower bound of the uniform distribution.
348
+ Defaults to 0.
349
+ b (int | float): the upper bound of the uniform distribution.
350
+ Defaults to 1.
351
+ bias (int | float): the value to fill the bias. Defaults to 0.
352
+ bias_prob (float, optional): the probability for bias initialization.
353
+ Defaults to None.
354
+ layer (str | list[str], optional): the layer will be initialized.
355
+ Defaults to None.
356
+ """
357
+
358
+ def __init__(self, a=0, b=1, **kwargs):
359
+ super().__init__(**kwargs)
360
+ self.a = a
361
+ self.b = b
362
+
363
+ def __call__(self, module):
364
+
365
+ def init(m):
366
+ if self.wholemodule:
367
+ uniform_init(m, self.a, self.b, self.bias)
368
+ else:
369
+ layername = m.__class__.__name__
370
+ basesname = _get_bases_name(m)
371
+ if len(set(self.layer) & set([layername] + basesname)):
372
+ uniform_init(m, self.a, self.b, self.bias)
373
+
374
+ module.apply(init)
375
+ if hasattr(module, '_params_init_info'):
376
+ update_init_info(module, init_info=self._get_init_info())
377
+
378
+ def _get_init_info(self):
379
+ info = f'{self.__class__.__name__}: a={self.a},' \
380
+ f' b={self.b}, bias={self.bias}'
381
+ return info
382
+
383
+
384
+ @INITIALIZERS.register_module(name='Kaiming')
385
+ class KaimingInit(BaseInit):
386
+ r"""Initialize module parameters with the values according to the method
387
+ described in `Delving deep into rectifiers: Surpassing human-level
388
+ performance on ImageNet classification - He, K. et al. (2015).
389
+ <https://www.cv-foundation.org/openaccess/content_iccv_2015/
390
+ papers/He_Delving_Deep_into_ICCV_2015_paper.pdf>`_
391
+
392
+ Args:
393
+ a (int | float): the negative slope of the rectifier used after this
394
+ layer (only used with ``'leaky_relu'``). Defaults to 0.
395
+ mode (str): either ``'fan_in'`` or ``'fan_out'``. Choosing
396
+ ``'fan_in'`` preserves the magnitude of the variance of the weights
397
+ in the forward pass. Choosing ``'fan_out'`` preserves the
398
+ magnitudes in the backwards pass. Defaults to ``'fan_out'``.
399
+ nonlinearity (str): the non-linear function (`nn.functional` name),
400
+ recommended to use only with ``'relu'`` or ``'leaky_relu'`` .
401
+ Defaults to 'relu'.
402
+ bias (int | float): the value to fill the bias. Defaults to 0.
403
+ bias_prob (float, optional): the probability for bias initialization.
404
+ Defaults to None.
405
+ distribution (str): distribution either be ``'normal'`` or
406
+ ``'uniform'``. Defaults to ``'normal'``.
407
+ layer (str | list[str], optional): the layer will be initialized.
408
+ Defaults to None.
409
+ """
410
+
411
+ def __init__(self,
412
+ a=0,
413
+ mode='fan_out',
414
+ nonlinearity='relu',
415
+ distribution='normal',
416
+ **kwargs):
417
+ super().__init__(**kwargs)
418
+ self.a = a
419
+ self.mode = mode
420
+ self.nonlinearity = nonlinearity
421
+ self.distribution = distribution
422
+
423
+ def __call__(self, module):
424
+
425
+ def init(m):
426
+ if self.wholemodule:
427
+ kaiming_init(m, self.a, self.mode, self.nonlinearity,
428
+ self.bias, self.distribution)
429
+ else:
430
+ layername = m.__class__.__name__
431
+ basesname = _get_bases_name(m)
432
+ if len(set(self.layer) & set([layername] + basesname)):
433
+ kaiming_init(m, self.a, self.mode, self.nonlinearity,
434
+ self.bias, self.distribution)
435
+
436
+ module.apply(init)
437
+ if hasattr(module, '_params_init_info'):
438
+ update_init_info(module, init_info=self._get_init_info())
439
+
440
+ def _get_init_info(self):
441
+ info = f'{self.__class__.__name__}: a={self.a}, mode={self.mode}, ' \
442
+ f'nonlinearity={self.nonlinearity}, ' \
443
+ f'distribution ={self.distribution}, bias={self.bias}'
444
+ return info
445
+
446
+
447
+ @INITIALIZERS.register_module(name='Caffe2Xavier')
448
+ class Caffe2XavierInit(KaimingInit):
449
+ # `XavierFill` in Caffe2 corresponds to `kaiming_uniform_` in PyTorch
450
+ # Acknowledgment to FAIR's internal code
451
+ def __init__(self, **kwargs):
452
+ super().__init__(
453
+ a=1,
454
+ mode='fan_in',
455
+ nonlinearity='leaky_relu',
456
+ distribution='uniform',
457
+ **kwargs)
458
+
459
+ def __call__(self, module):
460
+ super().__call__(module)
461
+
462
+
463
+ @INITIALIZERS.register_module(name='Pretrained')
464
+ class PretrainedInit(object):
465
+ """Initialize module by loading a pretrained model.
466
+
467
+ Args:
468
+ checkpoint (str): the checkpoint file of the pretrained model should
469
+ be load.
470
+ prefix (str, optional): the prefix of a sub-module in the pretrained
471
+ model. it is for loading a part of the pretrained model to
472
+ initialize. For example, if we would like to only load the
473
+ backbone of a detector model, we can set ``prefix='backbone.'``.
474
+ Defaults to None.
475
+ map_location (str): map tensors into proper locations.
476
+ """
477
+
478
+ def __init__(self, checkpoint, prefix=None, map_location=None):
479
+ self.checkpoint = checkpoint
480
+ self.prefix = prefix
481
+ self.map_location = map_location
482
+
483
+ def __call__(self, module):
484
+ from annotator.uniformer.mmcv.runner import (_load_checkpoint_with_prefix, load_checkpoint,
485
+ load_state_dict)
486
+ logger = get_logger('mmcv')
487
+ if self.prefix is None:
488
+ print_log(f'load model from: {self.checkpoint}', logger=logger)
489
+ load_checkpoint(
490
+ module,
491
+ self.checkpoint,
492
+ map_location=self.map_location,
493
+ strict=False,
494
+ logger=logger)
495
+ else:
496
+ print_log(
497
+ f'load {self.prefix} in model from: {self.checkpoint}',
498
+ logger=logger)
499
+ state_dict = _load_checkpoint_with_prefix(
500
+ self.prefix, self.checkpoint, map_location=self.map_location)
501
+ load_state_dict(module, state_dict, strict=False, logger=logger)
502
+
503
+ if hasattr(module, '_params_init_info'):
504
+ update_init_info(module, init_info=self._get_init_info())
505
+
506
+ def _get_init_info(self):
507
+ info = f'{self.__class__.__name__}: load from {self.checkpoint}'
508
+ return info
509
+
510
+
511
+ def _initialize(module, cfg, wholemodule=False):
512
+ func = build_from_cfg(cfg, INITIALIZERS)
513
+ # wholemodule flag is for override mode, there is no layer key in override
514
+ # and initializer will give init values for the whole module with the name
515
+ # in override.
516
+ func.wholemodule = wholemodule
517
+ func(module)
518
+
519
+
520
+ def _initialize_override(module, override, cfg):
521
+ if not isinstance(override, (dict, list)):
522
+ raise TypeError(f'override must be a dict or a list of dict, \
523
+ but got {type(override)}')
524
+
525
+ override = [override] if isinstance(override, dict) else override
526
+
527
+ for override_ in override:
528
+
529
+ cp_override = copy.deepcopy(override_)
530
+ name = cp_override.pop('name', None)
531
+ if name is None:
532
+ raise ValueError('`override` must contain the key "name",'
533
+ f'but got {cp_override}')
534
+ # if override only has name key, it means use args in init_cfg
535
+ if not cp_override:
536
+ cp_override.update(cfg)
537
+ # if override has name key and other args except type key, it will
538
+ # raise error
539
+ elif 'type' not in cp_override.keys():
540
+ raise ValueError(
541
+ f'`override` need "type" key, but got {cp_override}')
542
+
543
+ if hasattr(module, name):
544
+ _initialize(getattr(module, name), cp_override, wholemodule=True)
545
+ else:
546
+ raise RuntimeError(f'module did not have attribute {name}, '
547
+ f'but init_cfg is {cp_override}.')
548
+
549
+
550
+ def initialize(module, init_cfg):
551
+ """Initialize a module.
552
+
553
+ Args:
554
+ module (``torch.nn.Module``): the module will be initialized.
555
+ init_cfg (dict | list[dict]): initialization configuration dict to
556
+ define initializer. OpenMMLab has implemented 6 initializers
557
+ including ``Constant``, ``Xavier``, ``Normal``, ``Uniform``,
558
+ ``Kaiming``, and ``Pretrained``.
559
+ Example:
560
+ >>> module = nn.Linear(2, 3, bias=True)
561
+ >>> init_cfg = dict(type='Constant', layer='Linear', val =1 , bias =2)
562
+ >>> initialize(module, init_cfg)
563
+
564
+ >>> module = nn.Sequential(nn.Conv1d(3, 1, 3), nn.Linear(1,2))
565
+ >>> # define key ``'layer'`` for initializing layer with different
566
+ >>> # configuration
567
+ >>> init_cfg = [dict(type='Constant', layer='Conv1d', val=1),
568
+ dict(type='Constant', layer='Linear', val=2)]
569
+ >>> initialize(module, init_cfg)
570
+
571
+ >>> # define key``'override'`` to initialize some specific part in
572
+ >>> # module
573
+ >>> class FooNet(nn.Module):
574
+ >>> def __init__(self):
575
+ >>> super().__init__()
576
+ >>> self.feat = nn.Conv2d(3, 16, 3)
577
+ >>> self.reg = nn.Conv2d(16, 10, 3)
578
+ >>> self.cls = nn.Conv2d(16, 5, 3)
579
+ >>> model = FooNet()
580
+ >>> init_cfg = dict(type='Constant', val=1, bias=2, layer='Conv2d',
581
+ >>> override=dict(type='Constant', name='reg', val=3, bias=4))
582
+ >>> initialize(model, init_cfg)
583
+
584
+ >>> model = ResNet(depth=50)
585
+ >>> # Initialize weights with the pretrained model.
586
+ >>> init_cfg = dict(type='Pretrained',
587
+ checkpoint='torchvision://resnet50')
588
+ >>> initialize(model, init_cfg)
589
+
590
+ >>> # Initialize weights of a sub-module with the specific part of
591
+ >>> # a pretrained model by using "prefix".
592
+ >>> url = 'http://download.openmmlab.com/mmdetection/v2.0/retinanet/'\
593
+ >>> 'retinanet_r50_fpn_1x_coco/'\
594
+ >>> 'retinanet_r50_fpn_1x_coco_20200130-c2398f9e.pth'
595
+ >>> init_cfg = dict(type='Pretrained',
596
+ checkpoint=url, prefix='backbone.')
597
+ """
598
+ if not isinstance(init_cfg, (dict, list)):
599
+ raise TypeError(f'init_cfg must be a dict or a list of dict, \
600
+ but got {type(init_cfg)}')
601
+
602
+ if isinstance(init_cfg, dict):
603
+ init_cfg = [init_cfg]
604
+
605
+ for cfg in init_cfg:
606
+ # should deeply copy the original config because cfg may be used by
607
+ # other modules, e.g., one init_cfg shared by multiple bottleneck
608
+ # blocks, the expected cfg will be changed after pop and will change
609
+ # the initialization behavior of other modules
610
+ cp_cfg = copy.deepcopy(cfg)
611
+ override = cp_cfg.pop('override', None)
612
+ _initialize(module, cp_cfg)
613
+
614
+ if override is not None:
615
+ cp_cfg.pop('layer', None)
616
+ _initialize_override(module, override, cp_cfg)
617
+ else:
618
+ # All attributes in module have same initialization.
619
+ pass
620
+
621
+
622
+ def _no_grad_trunc_normal_(tensor: Tensor, mean: float, std: float, a: float,
623
+ b: float) -> Tensor:
624
+ # Method based on
625
+ # https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf
626
+ # Modified from
627
+ # https://github.com/pytorch/pytorch/blob/master/torch/nn/init.py
628
+ def norm_cdf(x):
629
+ # Computes standard normal cumulative distribution function
630
+ return (1. + math.erf(x / math.sqrt(2.))) / 2.
631
+
632
+ if (mean < a - 2 * std) or (mean > b + 2 * std):
633
+ warnings.warn(
634
+ 'mean is more than 2 std from [a, b] in nn.init.trunc_normal_. '
635
+ 'The distribution of values may be incorrect.',
636
+ stacklevel=2)
637
+
638
+ with torch.no_grad():
639
+ # Values are generated by using a truncated uniform distribution and
640
+ # then using the inverse CDF for the normal distribution.
641
+ # Get upper and lower cdf values
642
+ lower = norm_cdf((a - mean) / std)
643
+ upper = norm_cdf((b - mean) / std)
644
+
645
+ # Uniformly fill tensor with values from [lower, upper], then translate
646
+ # to [2lower-1, 2upper-1].
647
+ tensor.uniform_(2 * lower - 1, 2 * upper - 1)
648
+
649
+ # Use inverse cdf transform for normal distribution to get truncated
650
+ # standard normal
651
+ tensor.erfinv_()
652
+
653
+ # Transform to proper mean, std
654
+ tensor.mul_(std * math.sqrt(2.))
655
+ tensor.add_(mean)
656
+
657
+ # Clamp to ensure it's in the proper range
658
+ tensor.clamp_(min=a, max=b)
659
+ return tensor
660
+
661
+
662
+ def trunc_normal_(tensor: Tensor,
663
+ mean: float = 0.,
664
+ std: float = 1.,
665
+ a: float = -2.,
666
+ b: float = 2.) -> Tensor:
667
+ r"""Fills the input Tensor with values drawn from a truncated
668
+ normal distribution. The values are effectively drawn from the
669
+ normal distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)`
670
+ with values outside :math:`[a, b]` redrawn until they are within
671
+ the bounds. The method used for generating the random values works
672
+ best when :math:`a \leq \text{mean} \leq b`.
673
+
674
+ Modified from
675
+ https://github.com/pytorch/pytorch/blob/master/torch/nn/init.py
676
+
677
+ Args:
678
+ tensor (``torch.Tensor``): an n-dimensional `torch.Tensor`.
679
+ mean (float): the mean of the normal distribution.
680
+ std (float): the standard deviation of the normal distribution.
681
+ a (float): the minimum cutoff value.
682
+ b (float): the maximum cutoff value.
683
+ """
684
+ return _no_grad_trunc_normal_(tensor, mean, std, a, b)
Text2Video-Zero-main/annotator/uniformer/mmcv/cnn/vgg.py ADDED
@@ -0,0 +1,175 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) OpenMMLab. All rights reserved.
2
+ import logging
3
+
4
+ import torch.nn as nn
5
+
6
+ from .utils import constant_init, kaiming_init, normal_init
7
+
8
+
9
+ def conv3x3(in_planes, out_planes, dilation=1):
10
+ """3x3 convolution with padding."""
11
+ return nn.Conv2d(
12
+ in_planes,
13
+ out_planes,
14
+ kernel_size=3,
15
+ padding=dilation,
16
+ dilation=dilation)
17
+
18
+
19
+ def make_vgg_layer(inplanes,
20
+ planes,
21
+ num_blocks,
22
+ dilation=1,
23
+ with_bn=False,
24
+ ceil_mode=False):
25
+ layers = []
26
+ for _ in range(num_blocks):
27
+ layers.append(conv3x3(inplanes, planes, dilation))
28
+ if with_bn:
29
+ layers.append(nn.BatchNorm2d(planes))
30
+ layers.append(nn.ReLU(inplace=True))
31
+ inplanes = planes
32
+ layers.append(nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=ceil_mode))
33
+
34
+ return layers
35
+
36
+
37
+ class VGG(nn.Module):
38
+ """VGG backbone.
39
+
40
+ Args:
41
+ depth (int): Depth of vgg, from {11, 13, 16, 19}.
42
+ with_bn (bool): Use BatchNorm or not.
43
+ num_classes (int): number of classes for classification.
44
+ num_stages (int): VGG stages, normally 5.
45
+ dilations (Sequence[int]): Dilation of each stage.
46
+ out_indices (Sequence[int]): Output from which stages.
47
+ frozen_stages (int): Stages to be frozen (all param fixed). -1 means
48
+ not freezing any parameters.
49
+ bn_eval (bool): Whether to set BN layers as eval mode, namely, freeze
50
+ running stats (mean and var).
51
+ bn_frozen (bool): Whether to freeze weight and bias of BN layers.
52
+ """
53
+
54
+ arch_settings = {
55
+ 11: (1, 1, 2, 2, 2),
56
+ 13: (2, 2, 2, 2, 2),
57
+ 16: (2, 2, 3, 3, 3),
58
+ 19: (2, 2, 4, 4, 4)
59
+ }
60
+
61
+ def __init__(self,
62
+ depth,
63
+ with_bn=False,
64
+ num_classes=-1,
65
+ num_stages=5,
66
+ dilations=(1, 1, 1, 1, 1),
67
+ out_indices=(0, 1, 2, 3, 4),
68
+ frozen_stages=-1,
69
+ bn_eval=True,
70
+ bn_frozen=False,
71
+ ceil_mode=False,
72
+ with_last_pool=True):
73
+ super(VGG, self).__init__()
74
+ if depth not in self.arch_settings:
75
+ raise KeyError(f'invalid depth {depth} for vgg')
76
+ assert num_stages >= 1 and num_stages <= 5
77
+ stage_blocks = self.arch_settings[depth]
78
+ self.stage_blocks = stage_blocks[:num_stages]
79
+ assert len(dilations) == num_stages
80
+ assert max(out_indices) <= num_stages
81
+
82
+ self.num_classes = num_classes
83
+ self.out_indices = out_indices
84
+ self.frozen_stages = frozen_stages
85
+ self.bn_eval = bn_eval
86
+ self.bn_frozen = bn_frozen
87
+
88
+ self.inplanes = 3
89
+ start_idx = 0
90
+ vgg_layers = []
91
+ self.range_sub_modules = []
92
+ for i, num_blocks in enumerate(self.stage_blocks):
93
+ num_modules = num_blocks * (2 + with_bn) + 1
94
+ end_idx = start_idx + num_modules
95
+ dilation = dilations[i]
96
+ planes = 64 * 2**i if i < 4 else 512
97
+ vgg_layer = make_vgg_layer(
98
+ self.inplanes,
99
+ planes,
100
+ num_blocks,
101
+ dilation=dilation,
102
+ with_bn=with_bn,
103
+ ceil_mode=ceil_mode)
104
+ vgg_layers.extend(vgg_layer)
105
+ self.inplanes = planes
106
+ self.range_sub_modules.append([start_idx, end_idx])
107
+ start_idx = end_idx
108
+ if not with_last_pool:
109
+ vgg_layers.pop(-1)
110
+ self.range_sub_modules[-1][1] -= 1
111
+ self.module_name = 'features'
112
+ self.add_module(self.module_name, nn.Sequential(*vgg_layers))
113
+
114
+ if self.num_classes > 0:
115
+ self.classifier = nn.Sequential(
116
+ nn.Linear(512 * 7 * 7, 4096),
117
+ nn.ReLU(True),
118
+ nn.Dropout(),
119
+ nn.Linear(4096, 4096),
120
+ nn.ReLU(True),
121
+ nn.Dropout(),
122
+ nn.Linear(4096, num_classes),
123
+ )
124
+
125
+ def init_weights(self, pretrained=None):
126
+ if isinstance(pretrained, str):
127
+ logger = logging.getLogger()
128
+ from ..runner import load_checkpoint
129
+ load_checkpoint(self, pretrained, strict=False, logger=logger)
130
+ elif pretrained is None:
131
+ for m in self.modules():
132
+ if isinstance(m, nn.Conv2d):
133
+ kaiming_init(m)
134
+ elif isinstance(m, nn.BatchNorm2d):
135
+ constant_init(m, 1)
136
+ elif isinstance(m, nn.Linear):
137
+ normal_init(m, std=0.01)
138
+ else:
139
+ raise TypeError('pretrained must be a str or None')
140
+
141
+ def forward(self, x):
142
+ outs = []
143
+ vgg_layers = getattr(self, self.module_name)
144
+ for i in range(len(self.stage_blocks)):
145
+ for j in range(*self.range_sub_modules[i]):
146
+ vgg_layer = vgg_layers[j]
147
+ x = vgg_layer(x)
148
+ if i in self.out_indices:
149
+ outs.append(x)
150
+ if self.num_classes > 0:
151
+ x = x.view(x.size(0), -1)
152
+ x = self.classifier(x)
153
+ outs.append(x)
154
+ if len(outs) == 1:
155
+ return outs[0]
156
+ else:
157
+ return tuple(outs)
158
+
159
+ def train(self, mode=True):
160
+ super(VGG, self).train(mode)
161
+ if self.bn_eval:
162
+ for m in self.modules():
163
+ if isinstance(m, nn.BatchNorm2d):
164
+ m.eval()
165
+ if self.bn_frozen:
166
+ for params in m.parameters():
167
+ params.requires_grad = False
168
+ vgg_layers = getattr(self, self.module_name)
169
+ if mode and self.frozen_stages >= 0:
170
+ for i in range(self.frozen_stages):
171
+ for j in range(*self.range_sub_modules[i]):
172
+ mod = vgg_layers[j]
173
+ mod.eval()
174
+ for param in mod.parameters():
175
+ param.requires_grad = False
Text2Video-Zero-main/annotator/uniformer/mmcv/ops/__init__.py ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) OpenMMLab. All rights reserved.
2
+ from .assign_score_withk import assign_score_withk
3
+ from .ball_query import ball_query
4
+ from .bbox import bbox_overlaps
5
+ from .border_align import BorderAlign, border_align
6
+ from .box_iou_rotated import box_iou_rotated
7
+ from .carafe import CARAFE, CARAFENaive, CARAFEPack, carafe, carafe_naive
8
+ from .cc_attention import CrissCrossAttention
9
+ from .contour_expand import contour_expand
10
+ from .corner_pool import CornerPool
11
+ from .correlation import Correlation
12
+ from .deform_conv import DeformConv2d, DeformConv2dPack, deform_conv2d
13
+ from .deform_roi_pool import (DeformRoIPool, DeformRoIPoolPack,
14
+ ModulatedDeformRoIPoolPack, deform_roi_pool)
15
+ from .deprecated_wrappers import Conv2d_deprecated as Conv2d
16
+ from .deprecated_wrappers import ConvTranspose2d_deprecated as ConvTranspose2d
17
+ from .deprecated_wrappers import Linear_deprecated as Linear
18
+ from .deprecated_wrappers import MaxPool2d_deprecated as MaxPool2d
19
+ from .focal_loss import (SigmoidFocalLoss, SoftmaxFocalLoss,
20
+ sigmoid_focal_loss, softmax_focal_loss)
21
+ from .furthest_point_sample import (furthest_point_sample,
22
+ furthest_point_sample_with_dist)
23
+ from .fused_bias_leakyrelu import FusedBiasLeakyReLU, fused_bias_leakyrelu
24
+ from .gather_points import gather_points
25
+ from .group_points import GroupAll, QueryAndGroup, grouping_operation
26
+ from .info import (get_compiler_version, get_compiling_cuda_version,
27
+ get_onnxruntime_op_path)
28
+ from .iou3d import boxes_iou_bev, nms_bev, nms_normal_bev
29
+ from .knn import knn
30
+ from .masked_conv import MaskedConv2d, masked_conv2d
31
+ from .modulated_deform_conv import (ModulatedDeformConv2d,
32
+ ModulatedDeformConv2dPack,
33
+ modulated_deform_conv2d)
34
+ from .multi_scale_deform_attn import MultiScaleDeformableAttention
35
+ from .nms import batched_nms, nms, nms_match, nms_rotated, soft_nms
36
+ from .pixel_group import pixel_group
37
+ from .point_sample import (SimpleRoIAlign, point_sample,
38
+ rel_roi_point_to_rel_img_point)
39
+ from .points_in_boxes import (points_in_boxes_all, points_in_boxes_cpu,
40
+ points_in_boxes_part)
41
+ from .points_sampler import PointsSampler
42
+ from .psa_mask import PSAMask
43
+ from .roi_align import RoIAlign, roi_align
44
+ from .roi_align_rotated import RoIAlignRotated, roi_align_rotated
45
+ from .roi_pool import RoIPool, roi_pool
46
+ from .roiaware_pool3d import RoIAwarePool3d
47
+ from .roipoint_pool3d import RoIPointPool3d
48
+ from .saconv import SAConv2d
49
+ from .scatter_points import DynamicScatter, dynamic_scatter
50
+ from .sync_bn import SyncBatchNorm
51
+ from .three_interpolate import three_interpolate
52
+ from .three_nn import three_nn
53
+ from .tin_shift import TINShift, tin_shift
54
+ from .upfirdn2d import upfirdn2d
55
+ from .voxelize import Voxelization, voxelization
56
+
57
+ __all__ = [
58
+ 'bbox_overlaps', 'CARAFE', 'CARAFENaive', 'CARAFEPack', 'carafe',
59
+ 'carafe_naive', 'CornerPool', 'DeformConv2d', 'DeformConv2dPack',
60
+ 'deform_conv2d', 'DeformRoIPool', 'DeformRoIPoolPack',
61
+ 'ModulatedDeformRoIPoolPack', 'deform_roi_pool', 'SigmoidFocalLoss',
62
+ 'SoftmaxFocalLoss', 'sigmoid_focal_loss', 'softmax_focal_loss',
63
+ 'get_compiler_version', 'get_compiling_cuda_version',
64
+ 'get_onnxruntime_op_path', 'MaskedConv2d', 'masked_conv2d',
65
+ 'ModulatedDeformConv2d', 'ModulatedDeformConv2dPack',
66
+ 'modulated_deform_conv2d', 'batched_nms', 'nms', 'soft_nms', 'nms_match',
67
+ 'RoIAlign', 'roi_align', 'RoIPool', 'roi_pool', 'SyncBatchNorm', 'Conv2d',
68
+ 'ConvTranspose2d', 'Linear', 'MaxPool2d', 'CrissCrossAttention', 'PSAMask',
69
+ 'point_sample', 'rel_roi_point_to_rel_img_point', 'SimpleRoIAlign',
70
+ 'SAConv2d', 'TINShift', 'tin_shift', 'assign_score_withk',
71
+ 'box_iou_rotated', 'RoIPointPool3d', 'nms_rotated', 'knn', 'ball_query',
72
+ 'upfirdn2d', 'FusedBiasLeakyReLU', 'fused_bias_leakyrelu',
73
+ 'RoIAlignRotated', 'roi_align_rotated', 'pixel_group', 'QueryAndGroup',
74
+ 'GroupAll', 'grouping_operation', 'contour_expand', 'three_nn',
75
+ 'three_interpolate', 'MultiScaleDeformableAttention', 'BorderAlign',
76
+ 'border_align', 'gather_points', 'furthest_point_sample',
77
+ 'furthest_point_sample_with_dist', 'PointsSampler', 'Correlation',
78
+ 'boxes_iou_bev', 'nms_bev', 'nms_normal_bev', 'Voxelization',
79
+ 'voxelization', 'dynamic_scatter', 'DynamicScatter', 'RoIAwarePool3d',
80
+ 'points_in_boxes_part', 'points_in_boxes_cpu', 'points_in_boxes_all'
81
+ ]
Text2Video-Zero-main/annotator/uniformer/mmcv/ops/assign_score_withk.py ADDED
@@ -0,0 +1,123 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from torch.autograd import Function
2
+
3
+ from ..utils import ext_loader
4
+
5
+ ext_module = ext_loader.load_ext(
6
+ '_ext', ['assign_score_withk_forward', 'assign_score_withk_backward'])
7
+
8
+
9
+ class AssignScoreWithK(Function):
10
+ r"""Perform weighted sum to generate output features according to scores.
11
+ Modified from `PAConv <https://github.com/CVMI-Lab/PAConv/tree/main/
12
+ scene_seg/lib/paconv_lib/src/gpu>`_.
13
+
14
+ This is a memory-efficient CUDA implementation of assign_scores operation,
15
+ which first transform all point features with weight bank, then assemble
16
+ neighbor features with ``knn_idx`` and perform weighted sum of ``scores``.
17
+
18
+ See the `paper <https://arxiv.org/pdf/2103.14635.pdf>`_ appendix Sec. D for
19
+ more detailed descriptions.
20
+
21
+ Note:
22
+ This implementation assumes using ``neighbor`` kernel input, which is
23
+ (point_features - center_features, point_features).
24
+ See https://github.com/CVMI-Lab/PAConv/blob/main/scene_seg/model/
25
+ pointnet2/paconv.py#L128 for more details.
26
+ """
27
+
28
+ @staticmethod
29
+ def forward(ctx,
30
+ scores,
31
+ point_features,
32
+ center_features,
33
+ knn_idx,
34
+ aggregate='sum'):
35
+ """
36
+ Args:
37
+ scores (torch.Tensor): (B, npoint, K, M), predicted scores to
38
+ aggregate weight matrices in the weight bank.
39
+ ``npoint`` is the number of sampled centers.
40
+ ``K`` is the number of queried neighbors.
41
+ ``M`` is the number of weight matrices in the weight bank.
42
+ point_features (torch.Tensor): (B, N, M, out_dim)
43
+ Pre-computed point features to be aggregated.
44
+ center_features (torch.Tensor): (B, N, M, out_dim)
45
+ Pre-computed center features to be aggregated.
46
+ knn_idx (torch.Tensor): (B, npoint, K), index of sampled kNN.
47
+ We assume the first idx in each row is the idx of the center.
48
+ aggregate (str, optional): Aggregation method.
49
+ Can be 'sum', 'avg' or 'max'. Defaults: 'sum'.
50
+
51
+ Returns:
52
+ torch.Tensor: (B, out_dim, npoint, K), the aggregated features.
53
+ """
54
+ agg = {'sum': 0, 'avg': 1, 'max': 2}
55
+
56
+ B, N, M, out_dim = point_features.size()
57
+ _, npoint, K, _ = scores.size()
58
+
59
+ output = point_features.new_zeros((B, out_dim, npoint, K))
60
+ ext_module.assign_score_withk_forward(
61
+ point_features.contiguous(),
62
+ center_features.contiguous(),
63
+ scores.contiguous(),
64
+ knn_idx.contiguous(),
65
+ output,
66
+ B=B,
67
+ N0=N,
68
+ N1=npoint,
69
+ M=M,
70
+ K=K,
71
+ O=out_dim,
72
+ aggregate=agg[aggregate])
73
+
74
+ ctx.save_for_backward(output, point_features, center_features, scores,
75
+ knn_idx)
76
+ ctx.agg = agg[aggregate]
77
+
78
+ return output
79
+
80
+ @staticmethod
81
+ def backward(ctx, grad_out):
82
+ """
83
+ Args:
84
+ grad_out (torch.Tensor): (B, out_dim, npoint, K)
85
+
86
+ Returns:
87
+ grad_scores (torch.Tensor): (B, npoint, K, M)
88
+ grad_point_features (torch.Tensor): (B, N, M, out_dim)
89
+ grad_center_features (torch.Tensor): (B, N, M, out_dim)
90
+ """
91
+ _, point_features, center_features, scores, knn_idx = ctx.saved_tensors
92
+
93
+ agg = ctx.agg
94
+
95
+ B, N, M, out_dim = point_features.size()
96
+ _, npoint, K, _ = scores.size()
97
+
98
+ grad_point_features = point_features.new_zeros(point_features.shape)
99
+ grad_center_features = center_features.new_zeros(center_features.shape)
100
+ grad_scores = scores.new_zeros(scores.shape)
101
+
102
+ ext_module.assign_score_withk_backward(
103
+ grad_out.contiguous(),
104
+ point_features.contiguous(),
105
+ center_features.contiguous(),
106
+ scores.contiguous(),
107
+ knn_idx.contiguous(),
108
+ grad_point_features,
109
+ grad_center_features,
110
+ grad_scores,
111
+ B=B,
112
+ N0=N,
113
+ N1=npoint,
114
+ M=M,
115
+ K=K,
116
+ O=out_dim,
117
+ aggregate=agg)
118
+
119
+ return grad_scores, grad_point_features, \
120
+ grad_center_features, None, None
121
+
122
+
123
+ assign_score_withk = AssignScoreWithK.apply
Text2Video-Zero-main/annotator/uniformer/mmcv/ops/ball_query.py ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) OpenMMLab. All rights reserved.
2
+ import torch
3
+ from torch.autograd import Function
4
+
5
+ from ..utils import ext_loader
6
+
7
+ ext_module = ext_loader.load_ext('_ext', ['ball_query_forward'])
8
+
9
+
10
+ class BallQuery(Function):
11
+ """Find nearby points in spherical space."""
12
+
13
+ @staticmethod
14
+ def forward(ctx, min_radius: float, max_radius: float, sample_num: int,
15
+ xyz: torch.Tensor, center_xyz: torch.Tensor) -> torch.Tensor:
16
+ """
17
+ Args:
18
+ min_radius (float): minimum radius of the balls.
19
+ max_radius (float): maximum radius of the balls.
20
+ sample_num (int): maximum number of features in the balls.
21
+ xyz (Tensor): (B, N, 3) xyz coordinates of the features.
22
+ center_xyz (Tensor): (B, npoint, 3) centers of the ball query.
23
+
24
+ Returns:
25
+ Tensor: (B, npoint, nsample) tensor with the indices of
26
+ the features that form the query balls.
27
+ """
28
+ assert center_xyz.is_contiguous()
29
+ assert xyz.is_contiguous()
30
+ assert min_radius < max_radius
31
+
32
+ B, N, _ = xyz.size()
33
+ npoint = center_xyz.size(1)
34
+ idx = xyz.new_zeros(B, npoint, sample_num, dtype=torch.int)
35
+
36
+ ext_module.ball_query_forward(
37
+ center_xyz,
38
+ xyz,
39
+ idx,
40
+ b=B,
41
+ n=N,
42
+ m=npoint,
43
+ min_radius=min_radius,
44
+ max_radius=max_radius,
45
+ nsample=sample_num)
46
+ if torch.__version__ != 'parrots':
47
+ ctx.mark_non_differentiable(idx)
48
+ return idx
49
+
50
+ @staticmethod
51
+ def backward(ctx, a=None):
52
+ return None, None, None, None
53
+
54
+
55
+ ball_query = BallQuery.apply
Text2Video-Zero-main/annotator/uniformer/mmcv/ops/box_iou_rotated.py ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) OpenMMLab. All rights reserved.
2
+ from ..utils import ext_loader
3
+
4
+ ext_module = ext_loader.load_ext('_ext', ['box_iou_rotated'])
5
+
6
+
7
+ def box_iou_rotated(bboxes1, bboxes2, mode='iou', aligned=False):
8
+ """Return intersection-over-union (Jaccard index) of boxes.
9
+
10
+ Both sets of boxes are expected to be in
11
+ (x_center, y_center, width, height, angle) format.
12
+
13
+ If ``aligned`` is ``False``, then calculate the ious between each bbox
14
+ of bboxes1 and bboxes2, otherwise the ious between each aligned pair of
15
+ bboxes1 and bboxes2.
16
+
17
+ Arguments:
18
+ boxes1 (Tensor): rotated bboxes 1. \
19
+ It has shape (N, 5), indicating (x, y, w, h, theta) for each row.
20
+ Note that theta is in radian.
21
+ boxes2 (Tensor): rotated bboxes 2. \
22
+ It has shape (M, 5), indicating (x, y, w, h, theta) for each row.
23
+ Note that theta is in radian.
24
+ mode (str): "iou" (intersection over union) or iof (intersection over
25
+ foreground).
26
+
27
+ Returns:
28
+ ious(Tensor): shape (N, M) if aligned == False else shape (N,)
29
+ """
30
+ assert mode in ['iou', 'iof']
31
+ mode_dict = {'iou': 0, 'iof': 1}
32
+ mode_flag = mode_dict[mode]
33
+ rows = bboxes1.size(0)
34
+ cols = bboxes2.size(0)
35
+ if aligned:
36
+ ious = bboxes1.new_zeros(rows)
37
+ else:
38
+ ious = bboxes1.new_zeros((rows * cols))
39
+ bboxes1 = bboxes1.contiguous()
40
+ bboxes2 = bboxes2.contiguous()
41
+ ext_module.box_iou_rotated(
42
+ bboxes1, bboxes2, ious, mode_flag=mode_flag, aligned=aligned)
43
+ if not aligned:
44
+ ious = ious.view(rows, cols)
45
+ return ious
Text2Video-Zero-main/annotator/uniformer/mmcv/ops/cc_attention.py ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) OpenMMLab. All rights reserved.
2
+ import torch
3
+ import torch.nn as nn
4
+ import torch.nn.functional as F
5
+
6
+ from annotator.uniformer.mmcv.cnn import PLUGIN_LAYERS, Scale
7
+
8
+
9
+ def NEG_INF_DIAG(n, device):
10
+ """Returns a diagonal matrix of size [n, n].
11
+
12
+ The diagonal are all "-inf". This is for avoiding calculating the
13
+ overlapped element in the Criss-Cross twice.
14
+ """
15
+ return torch.diag(torch.tensor(float('-inf')).to(device).repeat(n), 0)
16
+
17
+
18
+ @PLUGIN_LAYERS.register_module()
19
+ class CrissCrossAttention(nn.Module):
20
+ """Criss-Cross Attention Module.
21
+
22
+ .. note::
23
+ Before v1.3.13, we use a CUDA op. Since v1.3.13, we switch
24
+ to a pure PyTorch and equivalent implementation. For more
25
+ details, please refer to https://github.com/open-mmlab/mmcv/pull/1201.
26
+
27
+ Speed comparison for one forward pass
28
+
29
+ - Input size: [2,512,97,97]
30
+ - Device: 1 NVIDIA GeForce RTX 2080 Ti
31
+
32
+ +-----------------------+---------------+------------+---------------+
33
+ | |PyTorch version|CUDA version|Relative speed |
34
+ +=======================+===============+============+===============+
35
+ |with torch.no_grad() |0.00554402 s |0.0299619 s |5.4x |
36
+ +-----------------------+---------------+------------+---------------+
37
+ |no with torch.no_grad()|0.00562803 s |0.0301349 s |5.4x |
38
+ +-----------------------+---------------+------------+---------------+
39
+
40
+ Args:
41
+ in_channels (int): Channels of the input feature map.
42
+ """
43
+
44
+ def __init__(self, in_channels):
45
+ super().__init__()
46
+ self.query_conv = nn.Conv2d(in_channels, in_channels // 8, 1)
47
+ self.key_conv = nn.Conv2d(in_channels, in_channels // 8, 1)
48
+ self.value_conv = nn.Conv2d(in_channels, in_channels, 1)
49
+ self.gamma = Scale(0.)
50
+ self.in_channels = in_channels
51
+
52
+ def forward(self, x):
53
+ """forward function of Criss-Cross Attention.
54
+
55
+ Args:
56
+ x (Tensor): Input feature. \
57
+ shape (batch_size, in_channels, height, width)
58
+ Returns:
59
+ Tensor: Output of the layer, with shape of \
60
+ (batch_size, in_channels, height, width)
61
+ """
62
+ B, C, H, W = x.size()
63
+ query = self.query_conv(x)
64
+ key = self.key_conv(x)
65
+ value = self.value_conv(x)
66
+ energy_H = torch.einsum('bchw,bciw->bwhi', query, key) + NEG_INF_DIAG(
67
+ H, query.device)
68
+ energy_H = energy_H.transpose(1, 2)
69
+ energy_W = torch.einsum('bchw,bchj->bhwj', query, key)
70
+ attn = F.softmax(
71
+ torch.cat([energy_H, energy_W], dim=-1), dim=-1) # [B,H,W,(H+W)]
72
+ out = torch.einsum('bciw,bhwi->bchw', value, attn[..., :H])
73
+ out += torch.einsum('bchj,bhwj->bchw', value, attn[..., H:])
74
+
75
+ out = self.gamma(out) + x
76
+ out = out.contiguous()
77
+
78
+ return out
79
+
80
+ def __repr__(self):
81
+ s = self.__class__.__name__
82
+ s += f'(in_channels={self.in_channels})'
83
+ return s
Text2Video-Zero-main/annotator/uniformer/mmcv/ops/correlation.py ADDED
@@ -0,0 +1,196 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) OpenMMLab. All rights reserved.
2
+ import torch
3
+ from torch import Tensor, nn
4
+ from torch.autograd import Function
5
+ from torch.autograd.function import once_differentiable
6
+ from torch.nn.modules.utils import _pair
7
+
8
+ from ..utils import ext_loader
9
+
10
+ ext_module = ext_loader.load_ext(
11
+ '_ext', ['correlation_forward', 'correlation_backward'])
12
+
13
+
14
+ class CorrelationFunction(Function):
15
+
16
+ @staticmethod
17
+ def forward(ctx,
18
+ input1,
19
+ input2,
20
+ kernel_size=1,
21
+ max_displacement=1,
22
+ stride=1,
23
+ padding=1,
24
+ dilation=1,
25
+ dilation_patch=1):
26
+
27
+ ctx.save_for_backward(input1, input2)
28
+
29
+ kH, kW = ctx.kernel_size = _pair(kernel_size)
30
+ patch_size = max_displacement * 2 + 1
31
+ ctx.patch_size = patch_size
32
+ dH, dW = ctx.stride = _pair(stride)
33
+ padH, padW = ctx.padding = _pair(padding)
34
+ dilationH, dilationW = ctx.dilation = _pair(dilation)
35
+ dilation_patchH, dilation_patchW = ctx.dilation_patch = _pair(
36
+ dilation_patch)
37
+
38
+ output_size = CorrelationFunction._output_size(ctx, input1)
39
+
40
+ output = input1.new_zeros(output_size)
41
+
42
+ ext_module.correlation_forward(
43
+ input1,
44
+ input2,
45
+ output,
46
+ kH=kH,
47
+ kW=kW,
48
+ patchH=patch_size,
49
+ patchW=patch_size,
50
+ padH=padH,
51
+ padW=padW,
52
+ dilationH=dilationH,
53
+ dilationW=dilationW,
54
+ dilation_patchH=dilation_patchH,
55
+ dilation_patchW=dilation_patchW,
56
+ dH=dH,
57
+ dW=dW)
58
+
59
+ return output
60
+
61
+ @staticmethod
62
+ @once_differentiable
63
+ def backward(ctx, grad_output):
64
+ input1, input2 = ctx.saved_tensors
65
+
66
+ kH, kW = ctx.kernel_size
67
+ patch_size = ctx.patch_size
68
+ padH, padW = ctx.padding
69
+ dilationH, dilationW = ctx.dilation
70
+ dilation_patchH, dilation_patchW = ctx.dilation_patch
71
+ dH, dW = ctx.stride
72
+ grad_input1 = torch.zeros_like(input1)
73
+ grad_input2 = torch.zeros_like(input2)
74
+
75
+ ext_module.correlation_backward(
76
+ grad_output,
77
+ input1,
78
+ input2,
79
+ grad_input1,
80
+ grad_input2,
81
+ kH=kH,
82
+ kW=kW,
83
+ patchH=patch_size,
84
+ patchW=patch_size,
85
+ padH=padH,
86
+ padW=padW,
87
+ dilationH=dilationH,
88
+ dilationW=dilationW,
89
+ dilation_patchH=dilation_patchH,
90
+ dilation_patchW=dilation_patchW,
91
+ dH=dH,
92
+ dW=dW)
93
+ return grad_input1, grad_input2, None, None, None, None, None, None
94
+
95
+ @staticmethod
96
+ def _output_size(ctx, input1):
97
+ iH, iW = input1.size(2), input1.size(3)
98
+ batch_size = input1.size(0)
99
+ kH, kW = ctx.kernel_size
100
+ patch_size = ctx.patch_size
101
+ dH, dW = ctx.stride
102
+ padH, padW = ctx.padding
103
+ dilationH, dilationW = ctx.dilation
104
+ dilatedKH = (kH - 1) * dilationH + 1
105
+ dilatedKW = (kW - 1) * dilationW + 1
106
+
107
+ oH = int((iH + 2 * padH - dilatedKH) / dH + 1)
108
+ oW = int((iW + 2 * padW - dilatedKW) / dW + 1)
109
+
110
+ output_size = (batch_size, patch_size, patch_size, oH, oW)
111
+ return output_size
112
+
113
+
114
+ class Correlation(nn.Module):
115
+ r"""Correlation operator
116
+
117
+ This correlation operator works for optical flow correlation computation.
118
+
119
+ There are two batched tensors with shape :math:`(N, C, H, W)`,
120
+ and the correlation output's shape is :math:`(N, max\_displacement \times
121
+ 2 + 1, max\_displacement * 2 + 1, H_{out}, W_{out})`
122
+
123
+ where
124
+
125
+ .. math::
126
+ H_{out} = \left\lfloor\frac{H_{in} + 2 \times padding -
127
+ dilation \times (kernel\_size - 1) - 1}
128
+ {stride} + 1\right\rfloor
129
+
130
+ .. math::
131
+ W_{out} = \left\lfloor\frac{W_{in} + 2 \times padding - dilation
132
+ \times (kernel\_size - 1) - 1}
133
+ {stride} + 1\right\rfloor
134
+
135
+ the correlation item :math:`(N_i, dy, dx)` is formed by taking the sliding
136
+ window convolution between input1 and shifted input2,
137
+
138
+ .. math::
139
+ Corr(N_i, dx, dy) =
140
+ \sum_{c=0}^{C-1}
141
+ input1(N_i, c) \star
142
+ \mathcal{S}(input2(N_i, c), dy, dx)
143
+
144
+ where :math:`\star` is the valid 2d sliding window convolution operator,
145
+ and :math:`\mathcal{S}` means shifting the input features (auto-complete
146
+ zero marginal), and :math:`dx, dy` are shifting distance, :math:`dx, dy \in
147
+ [-max\_displacement \times dilation\_patch, max\_displacement \times
148
+ dilation\_patch]`.
149
+
150
+ Args:
151
+ kernel_size (int): The size of sliding window i.e. local neighborhood
152
+ representing the center points and involved in correlation
153
+ computation. Defaults to 1.
154
+ max_displacement (int): The radius for computing correlation volume,
155
+ but the actual working space can be dilated by dilation_patch.
156
+ Defaults to 1.
157
+ stride (int): The stride of the sliding blocks in the input spatial
158
+ dimensions. Defaults to 1.
159
+ padding (int): Zero padding added to all four sides of the input1.
160
+ Defaults to 0.
161
+ dilation (int): The spacing of local neighborhood that will involved
162
+ in correlation. Defaults to 1.
163
+ dilation_patch (int): The spacing between position need to compute
164
+ correlation. Defaults to 1.
165
+ """
166
+
167
+ def __init__(self,
168
+ kernel_size: int = 1,
169
+ max_displacement: int = 1,
170
+ stride: int = 1,
171
+ padding: int = 0,
172
+ dilation: int = 1,
173
+ dilation_patch: int = 1) -> None:
174
+ super().__init__()
175
+ self.kernel_size = kernel_size
176
+ self.max_displacement = max_displacement
177
+ self.stride = stride
178
+ self.padding = padding
179
+ self.dilation = dilation
180
+ self.dilation_patch = dilation_patch
181
+
182
+ def forward(self, input1: Tensor, input2: Tensor) -> Tensor:
183
+ return CorrelationFunction.apply(input1, input2, self.kernel_size,
184
+ self.max_displacement, self.stride,
185
+ self.padding, self.dilation,
186
+ self.dilation_patch)
187
+
188
+ def __repr__(self) -> str:
189
+ s = self.__class__.__name__
190
+ s += f'(kernel_size={self.kernel_size}, '
191
+ s += f'max_displacement={self.max_displacement}, '
192
+ s += f'stride={self.stride}, '
193
+ s += f'padding={self.padding}, '
194
+ s += f'dilation={self.dilation}, '
195
+ s += f'dilation_patch={self.dilation_patch})'
196
+ return s
Text2Video-Zero-main/annotator/uniformer/mmcv/ops/deform_conv.py ADDED
@@ -0,0 +1,405 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) OpenMMLab. All rights reserved.
2
+ from typing import Tuple, Union
3
+
4
+ import torch
5
+ import torch.nn as nn
6
+ import torch.nn.functional as F
7
+ from torch import Tensor
8
+ from torch.autograd import Function
9
+ from torch.autograd.function import once_differentiable
10
+ from torch.nn.modules.utils import _pair, _single
11
+
12
+ from annotator.uniformer.mmcv.utils import deprecated_api_warning
13
+ from ..cnn import CONV_LAYERS
14
+ from ..utils import ext_loader, print_log
15
+
16
+ ext_module = ext_loader.load_ext('_ext', [
17
+ 'deform_conv_forward', 'deform_conv_backward_input',
18
+ 'deform_conv_backward_parameters'
19
+ ])
20
+
21
+
22
+ class DeformConv2dFunction(Function):
23
+
24
+ @staticmethod
25
+ def symbolic(g,
26
+ input,
27
+ offset,
28
+ weight,
29
+ stride,
30
+ padding,
31
+ dilation,
32
+ groups,
33
+ deform_groups,
34
+ bias=False,
35
+ im2col_step=32):
36
+ return g.op(
37
+ 'mmcv::MMCVDeformConv2d',
38
+ input,
39
+ offset,
40
+ weight,
41
+ stride_i=stride,
42
+ padding_i=padding,
43
+ dilation_i=dilation,
44
+ groups_i=groups,
45
+ deform_groups_i=deform_groups,
46
+ bias_i=bias,
47
+ im2col_step_i=im2col_step)
48
+
49
+ @staticmethod
50
+ def forward(ctx,
51
+ input,
52
+ offset,
53
+ weight,
54
+ stride=1,
55
+ padding=0,
56
+ dilation=1,
57
+ groups=1,
58
+ deform_groups=1,
59
+ bias=False,
60
+ im2col_step=32):
61
+ if input is not None and input.dim() != 4:
62
+ raise ValueError(
63
+ f'Expected 4D tensor as input, got {input.dim()}D tensor \
64
+ instead.')
65
+ assert bias is False, 'Only support bias is False.'
66
+ ctx.stride = _pair(stride)
67
+ ctx.padding = _pair(padding)
68
+ ctx.dilation = _pair(dilation)
69
+ ctx.groups = groups
70
+ ctx.deform_groups = deform_groups
71
+ ctx.im2col_step = im2col_step
72
+
73
+ # When pytorch version >= 1.6.0, amp is adopted for fp16 mode;
74
+ # amp won't cast the type of model (float32), but "offset" is cast
75
+ # to float16 by nn.Conv2d automatically, leading to the type
76
+ # mismatch with input (when it is float32) or weight.
77
+ # The flag for whether to use fp16 or amp is the type of "offset",
78
+ # we cast weight and input to temporarily support fp16 and amp
79
+ # whatever the pytorch version is.
80
+ input = input.type_as(offset)
81
+ weight = weight.type_as(input)
82
+ ctx.save_for_backward(input, offset, weight)
83
+
84
+ output = input.new_empty(
85
+ DeformConv2dFunction._output_size(ctx, input, weight))
86
+
87
+ ctx.bufs_ = [input.new_empty(0), input.new_empty(0)] # columns, ones
88
+
89
+ cur_im2col_step = min(ctx.im2col_step, input.size(0))
90
+ assert (input.size(0) %
91
+ cur_im2col_step) == 0, 'im2col step must divide batchsize'
92
+ ext_module.deform_conv_forward(
93
+ input,
94
+ weight,
95
+ offset,
96
+ output,
97
+ ctx.bufs_[0],
98
+ ctx.bufs_[1],
99
+ kW=weight.size(3),
100
+ kH=weight.size(2),
101
+ dW=ctx.stride[1],
102
+ dH=ctx.stride[0],
103
+ padW=ctx.padding[1],
104
+ padH=ctx.padding[0],
105
+ dilationW=ctx.dilation[1],
106
+ dilationH=ctx.dilation[0],
107
+ group=ctx.groups,
108
+ deformable_group=ctx.deform_groups,
109
+ im2col_step=cur_im2col_step)
110
+ return output
111
+
112
+ @staticmethod
113
+ @once_differentiable
114
+ def backward(ctx, grad_output):
115
+ input, offset, weight = ctx.saved_tensors
116
+
117
+ grad_input = grad_offset = grad_weight = None
118
+
119
+ cur_im2col_step = min(ctx.im2col_step, input.size(0))
120
+ assert (input.size(0) % cur_im2col_step
121
+ ) == 0, 'batch size must be divisible by im2col_step'
122
+
123
+ grad_output = grad_output.contiguous()
124
+ if ctx.needs_input_grad[0] or ctx.needs_input_grad[1]:
125
+ grad_input = torch.zeros_like(input)
126
+ grad_offset = torch.zeros_like(offset)
127
+ ext_module.deform_conv_backward_input(
128
+ input,
129
+ offset,
130
+ grad_output,
131
+ grad_input,
132
+ grad_offset,
133
+ weight,
134
+ ctx.bufs_[0],
135
+ kW=weight.size(3),
136
+ kH=weight.size(2),
137
+ dW=ctx.stride[1],
138
+ dH=ctx.stride[0],
139
+ padW=ctx.padding[1],
140
+ padH=ctx.padding[0],
141
+ dilationW=ctx.dilation[1],
142
+ dilationH=ctx.dilation[0],
143
+ group=ctx.groups,
144
+ deformable_group=ctx.deform_groups,
145
+ im2col_step=cur_im2col_step)
146
+
147
+ if ctx.needs_input_grad[2]:
148
+ grad_weight = torch.zeros_like(weight)
149
+ ext_module.deform_conv_backward_parameters(
150
+ input,
151
+ offset,
152
+ grad_output,
153
+ grad_weight,
154
+ ctx.bufs_[0],
155
+ ctx.bufs_[1],
156
+ kW=weight.size(3),
157
+ kH=weight.size(2),
158
+ dW=ctx.stride[1],
159
+ dH=ctx.stride[0],
160
+ padW=ctx.padding[1],
161
+ padH=ctx.padding[0],
162
+ dilationW=ctx.dilation[1],
163
+ dilationH=ctx.dilation[0],
164
+ group=ctx.groups,
165
+ deformable_group=ctx.deform_groups,
166
+ scale=1,
167
+ im2col_step=cur_im2col_step)
168
+
169
+ return grad_input, grad_offset, grad_weight, \
170
+ None, None, None, None, None, None, None
171
+
172
+ @staticmethod
173
+ def _output_size(ctx, input, weight):
174
+ channels = weight.size(0)
175
+ output_size = (input.size(0), channels)
176
+ for d in range(input.dim() - 2):
177
+ in_size = input.size(d + 2)
178
+ pad = ctx.padding[d]
179
+ kernel = ctx.dilation[d] * (weight.size(d + 2) - 1) + 1
180
+ stride_ = ctx.stride[d]
181
+ output_size += ((in_size + (2 * pad) - kernel) // stride_ + 1, )
182
+ if not all(map(lambda s: s > 0, output_size)):
183
+ raise ValueError(
184
+ 'convolution input is too small (output would be ' +
185
+ 'x'.join(map(str, output_size)) + ')')
186
+ return output_size
187
+
188
+
189
+ deform_conv2d = DeformConv2dFunction.apply
190
+
191
+
192
+ class DeformConv2d(nn.Module):
193
+ r"""Deformable 2D convolution.
194
+
195
+ Applies a deformable 2D convolution over an input signal composed of
196
+ several input planes. DeformConv2d was described in the paper
197
+ `Deformable Convolutional Networks
198
+ <https://arxiv.org/pdf/1703.06211.pdf>`_
199
+
200
+ Note:
201
+ The argument ``im2col_step`` was added in version 1.3.17, which means
202
+ number of samples processed by the ``im2col_cuda_kernel`` per call.
203
+ It enables users to define ``batch_size`` and ``im2col_step`` more
204
+ flexibly and solved `issue mmcv#1440
205
+ <https://github.com/open-mmlab/mmcv/issues/1440>`_.
206
+
207
+ Args:
208
+ in_channels (int): Number of channels in the input image.
209
+ out_channels (int): Number of channels produced by the convolution.
210
+ kernel_size(int, tuple): Size of the convolving kernel.
211
+ stride(int, tuple): Stride of the convolution. Default: 1.
212
+ padding (int or tuple): Zero-padding added to both sides of the input.
213
+ Default: 0.
214
+ dilation (int or tuple): Spacing between kernel elements. Default: 1.
215
+ groups (int): Number of blocked connections from input.
216
+ channels to output channels. Default: 1.
217
+ deform_groups (int): Number of deformable group partitions.
218
+ bias (bool): If True, adds a learnable bias to the output.
219
+ Default: False.
220
+ im2col_step (int): Number of samples processed by im2col_cuda_kernel
221
+ per call. It will work when ``batch_size`` > ``im2col_step``, but
222
+ ``batch_size`` must be divisible by ``im2col_step``. Default: 32.
223
+ `New in version 1.3.17.`
224
+ """
225
+
226
+ @deprecated_api_warning({'deformable_groups': 'deform_groups'},
227
+ cls_name='DeformConv2d')
228
+ def __init__(self,
229
+ in_channels: int,
230
+ out_channels: int,
231
+ kernel_size: Union[int, Tuple[int, ...]],
232
+ stride: Union[int, Tuple[int, ...]] = 1,
233
+ padding: Union[int, Tuple[int, ...]] = 0,
234
+ dilation: Union[int, Tuple[int, ...]] = 1,
235
+ groups: int = 1,
236
+ deform_groups: int = 1,
237
+ bias: bool = False,
238
+ im2col_step: int = 32) -> None:
239
+ super(DeformConv2d, self).__init__()
240
+
241
+ assert not bias, \
242
+ f'bias={bias} is not supported in DeformConv2d.'
243
+ assert in_channels % groups == 0, \
244
+ f'in_channels {in_channels} cannot be divisible by groups {groups}'
245
+ assert out_channels % groups == 0, \
246
+ f'out_channels {out_channels} cannot be divisible by groups \
247
+ {groups}'
248
+
249
+ self.in_channels = in_channels
250
+ self.out_channels = out_channels
251
+ self.kernel_size = _pair(kernel_size)
252
+ self.stride = _pair(stride)
253
+ self.padding = _pair(padding)
254
+ self.dilation = _pair(dilation)
255
+ self.groups = groups
256
+ self.deform_groups = deform_groups
257
+ self.im2col_step = im2col_step
258
+ # enable compatibility with nn.Conv2d
259
+ self.transposed = False
260
+ self.output_padding = _single(0)
261
+
262
+ # only weight, no bias
263
+ self.weight = nn.Parameter(
264
+ torch.Tensor(out_channels, in_channels // self.groups,
265
+ *self.kernel_size))
266
+
267
+ self.reset_parameters()
268
+
269
+ def reset_parameters(self):
270
+ # switch the initialization of `self.weight` to the standard kaiming
271
+ # method described in `Delving deep into rectifiers: Surpassing
272
+ # human-level performance on ImageNet classification` - He, K. et al.
273
+ # (2015), using a uniform distribution
274
+ nn.init.kaiming_uniform_(self.weight, nonlinearity='relu')
275
+
276
+ def forward(self, x: Tensor, offset: Tensor) -> Tensor:
277
+ """Deformable Convolutional forward function.
278
+
279
+ Args:
280
+ x (Tensor): Input feature, shape (B, C_in, H_in, W_in)
281
+ offset (Tensor): Offset for deformable convolution, shape
282
+ (B, deform_groups*kernel_size[0]*kernel_size[1]*2,
283
+ H_out, W_out), H_out, W_out are equal to the output's.
284
+
285
+ An offset is like `[y0, x0, y1, x1, y2, x2, ..., y8, x8]`.
286
+ The spatial arrangement is like:
287
+
288
+ .. code:: text
289
+
290
+ (x0, y0) (x1, y1) (x2, y2)
291
+ (x3, y3) (x4, y4) (x5, y5)
292
+ (x6, y6) (x7, y7) (x8, y8)
293
+
294
+ Returns:
295
+ Tensor: Output of the layer.
296
+ """
297
+ # To fix an assert error in deform_conv_cuda.cpp:128
298
+ # input image is smaller than kernel
299
+ input_pad = (x.size(2) < self.kernel_size[0]) or (x.size(3) <
300
+ self.kernel_size[1])
301
+ if input_pad:
302
+ pad_h = max(self.kernel_size[0] - x.size(2), 0)
303
+ pad_w = max(self.kernel_size[1] - x.size(3), 0)
304
+ x = F.pad(x, (0, pad_w, 0, pad_h), 'constant', 0).contiguous()
305
+ offset = F.pad(offset, (0, pad_w, 0, pad_h), 'constant', 0)
306
+ offset = offset.contiguous()
307
+ out = deform_conv2d(x, offset, self.weight, self.stride, self.padding,
308
+ self.dilation, self.groups, self.deform_groups,
309
+ False, self.im2col_step)
310
+ if input_pad:
311
+ out = out[:, :, :out.size(2) - pad_h, :out.size(3) -
312
+ pad_w].contiguous()
313
+ return out
314
+
315
+ def __repr__(self):
316
+ s = self.__class__.__name__
317
+ s += f'(in_channels={self.in_channels},\n'
318
+ s += f'out_channels={self.out_channels},\n'
319
+ s += f'kernel_size={self.kernel_size},\n'
320
+ s += f'stride={self.stride},\n'
321
+ s += f'padding={self.padding},\n'
322
+ s += f'dilation={self.dilation},\n'
323
+ s += f'groups={self.groups},\n'
324
+ s += f'deform_groups={self.deform_groups},\n'
325
+ # bias is not supported in DeformConv2d.
326
+ s += 'bias=False)'
327
+ return s
328
+
329
+
330
+ @CONV_LAYERS.register_module('DCN')
331
+ class DeformConv2dPack(DeformConv2d):
332
+ """A Deformable Conv Encapsulation that acts as normal Conv layers.
333
+
334
+ The offset tensor is like `[y0, x0, y1, x1, y2, x2, ..., y8, x8]`.
335
+ The spatial arrangement is like:
336
+
337
+ .. code:: text
338
+
339
+ (x0, y0) (x1, y1) (x2, y2)
340
+ (x3, y3) (x4, y4) (x5, y5)
341
+ (x6, y6) (x7, y7) (x8, y8)
342
+
343
+ Args:
344
+ in_channels (int): Same as nn.Conv2d.
345
+ out_channels (int): Same as nn.Conv2d.
346
+ kernel_size (int or tuple[int]): Same as nn.Conv2d.
347
+ stride (int or tuple[int]): Same as nn.Conv2d.
348
+ padding (int or tuple[int]): Same as nn.Conv2d.
349
+ dilation (int or tuple[int]): Same as nn.Conv2d.
350
+ groups (int): Same as nn.Conv2d.
351
+ bias (bool or str): If specified as `auto`, it will be decided by the
352
+ norm_cfg. Bias will be set as True if norm_cfg is None, otherwise
353
+ False.
354
+ """
355
+
356
+ _version = 2
357
+
358
+ def __init__(self, *args, **kwargs):
359
+ super(DeformConv2dPack, self).__init__(*args, **kwargs)
360
+ self.conv_offset = nn.Conv2d(
361
+ self.in_channels,
362
+ self.deform_groups * 2 * self.kernel_size[0] * self.kernel_size[1],
363
+ kernel_size=self.kernel_size,
364
+ stride=_pair(self.stride),
365
+ padding=_pair(self.padding),
366
+ dilation=_pair(self.dilation),
367
+ bias=True)
368
+ self.init_offset()
369
+
370
+ def init_offset(self):
371
+ self.conv_offset.weight.data.zero_()
372
+ self.conv_offset.bias.data.zero_()
373
+
374
+ def forward(self, x):
375
+ offset = self.conv_offset(x)
376
+ return deform_conv2d(x, offset, self.weight, self.stride, self.padding,
377
+ self.dilation, self.groups, self.deform_groups,
378
+ False, self.im2col_step)
379
+
380
+ def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict,
381
+ missing_keys, unexpected_keys, error_msgs):
382
+ version = local_metadata.get('version', None)
383
+
384
+ if version is None or version < 2:
385
+ # the key is different in early versions
386
+ # In version < 2, DeformConvPack loads previous benchmark models.
387
+ if (prefix + 'conv_offset.weight' not in state_dict
388
+ and prefix[:-1] + '_offset.weight' in state_dict):
389
+ state_dict[prefix + 'conv_offset.weight'] = state_dict.pop(
390
+ prefix[:-1] + '_offset.weight')
391
+ if (prefix + 'conv_offset.bias' not in state_dict
392
+ and prefix[:-1] + '_offset.bias' in state_dict):
393
+ state_dict[prefix +
394
+ 'conv_offset.bias'] = state_dict.pop(prefix[:-1] +
395
+ '_offset.bias')
396
+
397
+ if version is not None and version > 1:
398
+ print_log(
399
+ f'DeformConv2dPack {prefix.rstrip(".")} is upgraded to '
400
+ 'version 2.',
401
+ logger='root')
402
+
403
+ super()._load_from_state_dict(state_dict, prefix, local_metadata,
404
+ strict, missing_keys, unexpected_keys,
405
+ error_msgs)
Text2Video-Zero-main/annotator/uniformer/mmcv/ops/gather_points.py ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from torch.autograd import Function
3
+
4
+ from ..utils import ext_loader
5
+
6
+ ext_module = ext_loader.load_ext(
7
+ '_ext', ['gather_points_forward', 'gather_points_backward'])
8
+
9
+
10
+ class GatherPoints(Function):
11
+ """Gather points with given index."""
12
+
13
+ @staticmethod
14
+ def forward(ctx, features: torch.Tensor,
15
+ indices: torch.Tensor) -> torch.Tensor:
16
+ """
17
+ Args:
18
+ features (Tensor): (B, C, N) features to gather.
19
+ indices (Tensor): (B, M) where M is the number of points.
20
+
21
+ Returns:
22
+ Tensor: (B, C, M) where M is the number of points.
23
+ """
24
+ assert features.is_contiguous()
25
+ assert indices.is_contiguous()
26
+
27
+ B, npoint = indices.size()
28
+ _, C, N = features.size()
29
+ output = torch.cuda.FloatTensor(B, C, npoint)
30
+
31
+ ext_module.gather_points_forward(
32
+ features, indices, output, b=B, c=C, n=N, npoints=npoint)
33
+
34
+ ctx.for_backwards = (indices, C, N)
35
+ if torch.__version__ != 'parrots':
36
+ ctx.mark_non_differentiable(indices)
37
+ return output
38
+
39
+ @staticmethod
40
+ def backward(ctx, grad_out):
41
+ idx, C, N = ctx.for_backwards
42
+ B, npoint = idx.size()
43
+
44
+ grad_features = torch.cuda.FloatTensor(B, C, N).zero_()
45
+ grad_out_data = grad_out.data.contiguous()
46
+ ext_module.gather_points_backward(
47
+ grad_out_data,
48
+ idx,
49
+ grad_features.data,
50
+ b=B,
51
+ c=C,
52
+ n=N,
53
+ npoints=npoint)
54
+ return grad_features, None
55
+
56
+
57
+ gather_points = GatherPoints.apply
Text2Video-Zero-main/annotator/uniformer/mmcv/ops/multi_scale_deform_attn.py ADDED
@@ -0,0 +1,358 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) OpenMMLab. All rights reserved.
2
+ import math
3
+ import warnings
4
+
5
+ import torch
6
+ import torch.nn as nn
7
+ import torch.nn.functional as F
8
+ from torch.autograd.function import Function, once_differentiable
9
+
10
+ from annotator.uniformer.mmcv import deprecated_api_warning
11
+ from annotator.uniformer.mmcv.cnn import constant_init, xavier_init
12
+ from annotator.uniformer.mmcv.cnn.bricks.registry import ATTENTION
13
+ from annotator.uniformer.mmcv.runner import BaseModule
14
+ from ..utils import ext_loader
15
+
16
+ ext_module = ext_loader.load_ext(
17
+ '_ext', ['ms_deform_attn_backward', 'ms_deform_attn_forward'])
18
+
19
+
20
+ class MultiScaleDeformableAttnFunction(Function):
21
+
22
+ @staticmethod
23
+ def forward(ctx, value, value_spatial_shapes, value_level_start_index,
24
+ sampling_locations, attention_weights, im2col_step):
25
+ """GPU version of multi-scale deformable attention.
26
+
27
+ Args:
28
+ value (Tensor): The value has shape
29
+ (bs, num_keys, mum_heads, embed_dims//num_heads)
30
+ value_spatial_shapes (Tensor): Spatial shape of
31
+ each feature map, has shape (num_levels, 2),
32
+ last dimension 2 represent (h, w)
33
+ sampling_locations (Tensor): The location of sampling points,
34
+ has shape
35
+ (bs ,num_queries, num_heads, num_levels, num_points, 2),
36
+ the last dimension 2 represent (x, y).
37
+ attention_weights (Tensor): The weight of sampling points used
38
+ when calculate the attention, has shape
39
+ (bs ,num_queries, num_heads, num_levels, num_points),
40
+ im2col_step (Tensor): The step used in image to column.
41
+
42
+ Returns:
43
+ Tensor: has shape (bs, num_queries, embed_dims)
44
+ """
45
+
46
+ ctx.im2col_step = im2col_step
47
+ output = ext_module.ms_deform_attn_forward(
48
+ value,
49
+ value_spatial_shapes,
50
+ value_level_start_index,
51
+ sampling_locations,
52
+ attention_weights,
53
+ im2col_step=ctx.im2col_step)
54
+ ctx.save_for_backward(value, value_spatial_shapes,
55
+ value_level_start_index, sampling_locations,
56
+ attention_weights)
57
+ return output
58
+
59
+ @staticmethod
60
+ @once_differentiable
61
+ def backward(ctx, grad_output):
62
+ """GPU version of backward function.
63
+
64
+ Args:
65
+ grad_output (Tensor): Gradient
66
+ of output tensor of forward.
67
+
68
+ Returns:
69
+ Tuple[Tensor]: Gradient
70
+ of input tensors in forward.
71
+ """
72
+ value, value_spatial_shapes, value_level_start_index,\
73
+ sampling_locations, attention_weights = ctx.saved_tensors
74
+ grad_value = torch.zeros_like(value)
75
+ grad_sampling_loc = torch.zeros_like(sampling_locations)
76
+ grad_attn_weight = torch.zeros_like(attention_weights)
77
+
78
+ ext_module.ms_deform_attn_backward(
79
+ value,
80
+ value_spatial_shapes,
81
+ value_level_start_index,
82
+ sampling_locations,
83
+ attention_weights,
84
+ grad_output.contiguous(),
85
+ grad_value,
86
+ grad_sampling_loc,
87
+ grad_attn_weight,
88
+ im2col_step=ctx.im2col_step)
89
+
90
+ return grad_value, None, None, \
91
+ grad_sampling_loc, grad_attn_weight, None
92
+
93
+
94
+ def multi_scale_deformable_attn_pytorch(value, value_spatial_shapes,
95
+ sampling_locations, attention_weights):
96
+ """CPU version of multi-scale deformable attention.
97
+
98
+ Args:
99
+ value (Tensor): The value has shape
100
+ (bs, num_keys, mum_heads, embed_dims//num_heads)
101
+ value_spatial_shapes (Tensor): Spatial shape of
102
+ each feature map, has shape (num_levels, 2),
103
+ last dimension 2 represent (h, w)
104
+ sampling_locations (Tensor): The location of sampling points,
105
+ has shape
106
+ (bs ,num_queries, num_heads, num_levels, num_points, 2),
107
+ the last dimension 2 represent (x, y).
108
+ attention_weights (Tensor): The weight of sampling points used
109
+ when calculate the attention, has shape
110
+ (bs ,num_queries, num_heads, num_levels, num_points),
111
+
112
+ Returns:
113
+ Tensor: has shape (bs, num_queries, embed_dims)
114
+ """
115
+
116
+ bs, _, num_heads, embed_dims = value.shape
117
+ _, num_queries, num_heads, num_levels, num_points, _ =\
118
+ sampling_locations.shape
119
+ value_list = value.split([H_ * W_ for H_, W_ in value_spatial_shapes],
120
+ dim=1)
121
+ sampling_grids = 2 * sampling_locations - 1
122
+ sampling_value_list = []
123
+ for level, (H_, W_) in enumerate(value_spatial_shapes):
124
+ # bs, H_*W_, num_heads, embed_dims ->
125
+ # bs, H_*W_, num_heads*embed_dims ->
126
+ # bs, num_heads*embed_dims, H_*W_ ->
127
+ # bs*num_heads, embed_dims, H_, W_
128
+ value_l_ = value_list[level].flatten(2).transpose(1, 2).reshape(
129
+ bs * num_heads, embed_dims, H_, W_)
130
+ # bs, num_queries, num_heads, num_points, 2 ->
131
+ # bs, num_heads, num_queries, num_points, 2 ->
132
+ # bs*num_heads, num_queries, num_points, 2
133
+ sampling_grid_l_ = sampling_grids[:, :, :,
134
+ level].transpose(1, 2).flatten(0, 1)
135
+ # bs*num_heads, embed_dims, num_queries, num_points
136
+ sampling_value_l_ = F.grid_sample(
137
+ value_l_,
138
+ sampling_grid_l_,
139
+ mode='bilinear',
140
+ padding_mode='zeros',
141
+ align_corners=False)
142
+ sampling_value_list.append(sampling_value_l_)
143
+ # (bs, num_queries, num_heads, num_levels, num_points) ->
144
+ # (bs, num_heads, num_queries, num_levels, num_points) ->
145
+ # (bs, num_heads, 1, num_queries, num_levels*num_points)
146
+ attention_weights = attention_weights.transpose(1, 2).reshape(
147
+ bs * num_heads, 1, num_queries, num_levels * num_points)
148
+ output = (torch.stack(sampling_value_list, dim=-2).flatten(-2) *
149
+ attention_weights).sum(-1).view(bs, num_heads * embed_dims,
150
+ num_queries)
151
+ return output.transpose(1, 2).contiguous()
152
+
153
+
154
+ @ATTENTION.register_module()
155
+ class MultiScaleDeformableAttention(BaseModule):
156
+ """An attention module used in Deformable-Detr.
157
+
158
+ `Deformable DETR: Deformable Transformers for End-to-End Object Detection.
159
+ <https://arxiv.org/pdf/2010.04159.pdf>`_.
160
+
161
+ Args:
162
+ embed_dims (int): The embedding dimension of Attention.
163
+ Default: 256.
164
+ num_heads (int): Parallel attention heads. Default: 64.
165
+ num_levels (int): The number of feature map used in
166
+ Attention. Default: 4.
167
+ num_points (int): The number of sampling points for
168
+ each query in each head. Default: 4.
169
+ im2col_step (int): The step used in image_to_column.
170
+ Default: 64.
171
+ dropout (float): A Dropout layer on `inp_identity`.
172
+ Default: 0.1.
173
+ batch_first (bool): Key, Query and Value are shape of
174
+ (batch, n, embed_dim)
175
+ or (n, batch, embed_dim). Default to False.
176
+ norm_cfg (dict): Config dict for normalization layer.
177
+ Default: None.
178
+ init_cfg (obj:`mmcv.ConfigDict`): The Config for initialization.
179
+ Default: None.
180
+ """
181
+
182
+ def __init__(self,
183
+ embed_dims=256,
184
+ num_heads=8,
185
+ num_levels=4,
186
+ num_points=4,
187
+ im2col_step=64,
188
+ dropout=0.1,
189
+ batch_first=False,
190
+ norm_cfg=None,
191
+ init_cfg=None):
192
+ super().__init__(init_cfg)
193
+ if embed_dims % num_heads != 0:
194
+ raise ValueError(f'embed_dims must be divisible by num_heads, '
195
+ f'but got {embed_dims} and {num_heads}')
196
+ dim_per_head = embed_dims // num_heads
197
+ self.norm_cfg = norm_cfg
198
+ self.dropout = nn.Dropout(dropout)
199
+ self.batch_first = batch_first
200
+
201
+ # you'd better set dim_per_head to a power of 2
202
+ # which is more efficient in the CUDA implementation
203
+ def _is_power_of_2(n):
204
+ if (not isinstance(n, int)) or (n < 0):
205
+ raise ValueError(
206
+ 'invalid input for _is_power_of_2: {} (type: {})'.format(
207
+ n, type(n)))
208
+ return (n & (n - 1) == 0) and n != 0
209
+
210
+ if not _is_power_of_2(dim_per_head):
211
+ warnings.warn(
212
+ "You'd better set embed_dims in "
213
+ 'MultiScaleDeformAttention to make '
214
+ 'the dimension of each attention head a power of 2 '
215
+ 'which is more efficient in our CUDA implementation.')
216
+
217
+ self.im2col_step = im2col_step
218
+ self.embed_dims = embed_dims
219
+ self.num_levels = num_levels
220
+ self.num_heads = num_heads
221
+ self.num_points = num_points
222
+ self.sampling_offsets = nn.Linear(
223
+ embed_dims, num_heads * num_levels * num_points * 2)
224
+ self.attention_weights = nn.Linear(embed_dims,
225
+ num_heads * num_levels * num_points)
226
+ self.value_proj = nn.Linear(embed_dims, embed_dims)
227
+ self.output_proj = nn.Linear(embed_dims, embed_dims)
228
+ self.init_weights()
229
+
230
+ def init_weights(self):
231
+ """Default initialization for Parameters of Module."""
232
+ constant_init(self.sampling_offsets, 0.)
233
+ thetas = torch.arange(
234
+ self.num_heads,
235
+ dtype=torch.float32) * (2.0 * math.pi / self.num_heads)
236
+ grid_init = torch.stack([thetas.cos(), thetas.sin()], -1)
237
+ grid_init = (grid_init /
238
+ grid_init.abs().max(-1, keepdim=True)[0]).view(
239
+ self.num_heads, 1, 1,
240
+ 2).repeat(1, self.num_levels, self.num_points, 1)
241
+ for i in range(self.num_points):
242
+ grid_init[:, :, i, :] *= i + 1
243
+
244
+ self.sampling_offsets.bias.data = grid_init.view(-1)
245
+ constant_init(self.attention_weights, val=0., bias=0.)
246
+ xavier_init(self.value_proj, distribution='uniform', bias=0.)
247
+ xavier_init(self.output_proj, distribution='uniform', bias=0.)
248
+ self._is_init = True
249
+
250
+ @deprecated_api_warning({'residual': 'identity'},
251
+ cls_name='MultiScaleDeformableAttention')
252
+ def forward(self,
253
+ query,
254
+ key=None,
255
+ value=None,
256
+ identity=None,
257
+ query_pos=None,
258
+ key_padding_mask=None,
259
+ reference_points=None,
260
+ spatial_shapes=None,
261
+ level_start_index=None,
262
+ **kwargs):
263
+ """Forward Function of MultiScaleDeformAttention.
264
+
265
+ Args:
266
+ query (Tensor): Query of Transformer with shape
267
+ (num_query, bs, embed_dims).
268
+ key (Tensor): The key tensor with shape
269
+ `(num_key, bs, embed_dims)`.
270
+ value (Tensor): The value tensor with shape
271
+ `(num_key, bs, embed_dims)`.
272
+ identity (Tensor): The tensor used for addition, with the
273
+ same shape as `query`. Default None. If None,
274
+ `query` will be used.
275
+ query_pos (Tensor): The positional encoding for `query`.
276
+ Default: None.
277
+ key_pos (Tensor): The positional encoding for `key`. Default
278
+ None.
279
+ reference_points (Tensor): The normalized reference
280
+ points with shape (bs, num_query, num_levels, 2),
281
+ all elements is range in [0, 1], top-left (0,0),
282
+ bottom-right (1, 1), including padding area.
283
+ or (N, Length_{query}, num_levels, 4), add
284
+ additional two dimensions is (w, h) to
285
+ form reference boxes.
286
+ key_padding_mask (Tensor): ByteTensor for `query`, with
287
+ shape [bs, num_key].
288
+ spatial_shapes (Tensor): Spatial shape of features in
289
+ different levels. With shape (num_levels, 2),
290
+ last dimension represents (h, w).
291
+ level_start_index (Tensor): The start index of each level.
292
+ A tensor has shape ``(num_levels, )`` and can be represented
293
+ as [0, h_0*w_0, h_0*w_0+h_1*w_1, ...].
294
+
295
+ Returns:
296
+ Tensor: forwarded results with shape [num_query, bs, embed_dims].
297
+ """
298
+
299
+ if value is None:
300
+ value = query
301
+
302
+ if identity is None:
303
+ identity = query
304
+ if query_pos is not None:
305
+ query = query + query_pos
306
+ if not self.batch_first:
307
+ # change to (bs, num_query ,embed_dims)
308
+ query = query.permute(1, 0, 2)
309
+ value = value.permute(1, 0, 2)
310
+
311
+ bs, num_query, _ = query.shape
312
+ bs, num_value, _ = value.shape
313
+ assert (spatial_shapes[:, 0] * spatial_shapes[:, 1]).sum() == num_value
314
+
315
+ value = self.value_proj(value)
316
+ if key_padding_mask is not None:
317
+ value = value.masked_fill(key_padding_mask[..., None], 0.0)
318
+ value = value.view(bs, num_value, self.num_heads, -1)
319
+ sampling_offsets = self.sampling_offsets(query).view(
320
+ bs, num_query, self.num_heads, self.num_levels, self.num_points, 2)
321
+ attention_weights = self.attention_weights(query).view(
322
+ bs, num_query, self.num_heads, self.num_levels * self.num_points)
323
+ attention_weights = attention_weights.softmax(-1)
324
+
325
+ attention_weights = attention_weights.view(bs, num_query,
326
+ self.num_heads,
327
+ self.num_levels,
328
+ self.num_points)
329
+ if reference_points.shape[-1] == 2:
330
+ offset_normalizer = torch.stack(
331
+ [spatial_shapes[..., 1], spatial_shapes[..., 0]], -1)
332
+ sampling_locations = reference_points[:, :, None, :, None, :] \
333
+ + sampling_offsets \
334
+ / offset_normalizer[None, None, None, :, None, :]
335
+ elif reference_points.shape[-1] == 4:
336
+ sampling_locations = reference_points[:, :, None, :, None, :2] \
337
+ + sampling_offsets / self.num_points \
338
+ * reference_points[:, :, None, :, None, 2:] \
339
+ * 0.5
340
+ else:
341
+ raise ValueError(
342
+ f'Last dim of reference_points must be'
343
+ f' 2 or 4, but get {reference_points.shape[-1]} instead.')
344
+ if torch.cuda.is_available() and value.is_cuda:
345
+ output = MultiScaleDeformableAttnFunction.apply(
346
+ value, spatial_shapes, level_start_index, sampling_locations,
347
+ attention_weights, self.im2col_step)
348
+ else:
349
+ output = multi_scale_deformable_attn_pytorch(
350
+ value, spatial_shapes, sampling_locations, attention_weights)
351
+
352
+ output = self.output_proj(output)
353
+
354
+ if not self.batch_first:
355
+ # (num_query, bs ,embed_dims)
356
+ output = output.permute(1, 0, 2)
357
+
358
+ return self.dropout(output) + identity
Text2Video-Zero-main/annotator/uniformer/mmcv/ops/roi_pool.py ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) OpenMMLab. All rights reserved.
2
+ import torch
3
+ import torch.nn as nn
4
+ from torch.autograd import Function
5
+ from torch.autograd.function import once_differentiable
6
+ from torch.nn.modules.utils import _pair
7
+
8
+ from ..utils import ext_loader
9
+
10
+ ext_module = ext_loader.load_ext('_ext',
11
+ ['roi_pool_forward', 'roi_pool_backward'])
12
+
13
+
14
+ class RoIPoolFunction(Function):
15
+
16
+ @staticmethod
17
+ def symbolic(g, input, rois, output_size, spatial_scale):
18
+ return g.op(
19
+ 'MaxRoiPool',
20
+ input,
21
+ rois,
22
+ pooled_shape_i=output_size,
23
+ spatial_scale_f=spatial_scale)
24
+
25
+ @staticmethod
26
+ def forward(ctx, input, rois, output_size, spatial_scale=1.0):
27
+ ctx.output_size = _pair(output_size)
28
+ ctx.spatial_scale = spatial_scale
29
+ ctx.input_shape = input.size()
30
+
31
+ assert rois.size(1) == 5, 'RoI must be (idx, x1, y1, x2, y2)!'
32
+
33
+ output_shape = (rois.size(0), input.size(1), ctx.output_size[0],
34
+ ctx.output_size[1])
35
+ output = input.new_zeros(output_shape)
36
+ argmax = input.new_zeros(output_shape, dtype=torch.int)
37
+
38
+ ext_module.roi_pool_forward(
39
+ input,
40
+ rois,
41
+ output,
42
+ argmax,
43
+ pooled_height=ctx.output_size[0],
44
+ pooled_width=ctx.output_size[1],
45
+ spatial_scale=ctx.spatial_scale)
46
+
47
+ ctx.save_for_backward(rois, argmax)
48
+ return output
49
+
50
+ @staticmethod
51
+ @once_differentiable
52
+ def backward(ctx, grad_output):
53
+ rois, argmax = ctx.saved_tensors
54
+ grad_input = grad_output.new_zeros(ctx.input_shape)
55
+
56
+ ext_module.roi_pool_backward(
57
+ grad_output,
58
+ rois,
59
+ argmax,
60
+ grad_input,
61
+ pooled_height=ctx.output_size[0],
62
+ pooled_width=ctx.output_size[1],
63
+ spatial_scale=ctx.spatial_scale)
64
+
65
+ return grad_input, None, None, None
66
+
67
+
68
+ roi_pool = RoIPoolFunction.apply
69
+
70
+
71
+ class RoIPool(nn.Module):
72
+
73
+ def __init__(self, output_size, spatial_scale=1.0):
74
+ super(RoIPool, self).__init__()
75
+
76
+ self.output_size = _pair(output_size)
77
+ self.spatial_scale = float(spatial_scale)
78
+
79
+ def forward(self, input, rois):
80
+ return roi_pool(input, rois, self.output_size, self.spatial_scale)
81
+
82
+ def __repr__(self):
83
+ s = self.__class__.__name__
84
+ s += f'(output_size={self.output_size}, '
85
+ s += f'spatial_scale={self.spatial_scale})'
86
+ return s
Text2Video-Zero-main/annotator/uniformer/mmcv/ops/saconv.py ADDED
@@ -0,0 +1,145 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) OpenMMLab. All rights reserved.
2
+ import torch
3
+ import torch.nn as nn
4
+ import torch.nn.functional as F
5
+
6
+ from annotator.uniformer.mmcv.cnn import CONV_LAYERS, ConvAWS2d, constant_init
7
+ from annotator.uniformer.mmcv.ops.deform_conv import deform_conv2d
8
+ from annotator.uniformer.mmcv.utils import TORCH_VERSION, digit_version
9
+
10
+
11
+ @CONV_LAYERS.register_module(name='SAC')
12
+ class SAConv2d(ConvAWS2d):
13
+ """SAC (Switchable Atrous Convolution)
14
+
15
+ This is an implementation of SAC in DetectoRS
16
+ (https://arxiv.org/pdf/2006.02334.pdf).
17
+
18
+ Args:
19
+ in_channels (int): Number of channels in the input image
20
+ out_channels (int): Number of channels produced by the convolution
21
+ kernel_size (int or tuple): Size of the convolving kernel
22
+ stride (int or tuple, optional): Stride of the convolution. Default: 1
23
+ padding (int or tuple, optional): Zero-padding added to both sides of
24
+ the input. Default: 0
25
+ padding_mode (string, optional): ``'zeros'``, ``'reflect'``,
26
+ ``'replicate'`` or ``'circular'``. Default: ``'zeros'``
27
+ dilation (int or tuple, optional): Spacing between kernel elements.
28
+ Default: 1
29
+ groups (int, optional): Number of blocked connections from input
30
+ channels to output channels. Default: 1
31
+ bias (bool, optional): If ``True``, adds a learnable bias to the
32
+ output. Default: ``True``
33
+ use_deform: If ``True``, replace convolution with deformable
34
+ convolution. Default: ``False``.
35
+ """
36
+
37
+ def __init__(self,
38
+ in_channels,
39
+ out_channels,
40
+ kernel_size,
41
+ stride=1,
42
+ padding=0,
43
+ dilation=1,
44
+ groups=1,
45
+ bias=True,
46
+ use_deform=False):
47
+ super().__init__(
48
+ in_channels,
49
+ out_channels,
50
+ kernel_size,
51
+ stride=stride,
52
+ padding=padding,
53
+ dilation=dilation,
54
+ groups=groups,
55
+ bias=bias)
56
+ self.use_deform = use_deform
57
+ self.switch = nn.Conv2d(
58
+ self.in_channels, 1, kernel_size=1, stride=stride, bias=True)
59
+ self.weight_diff = nn.Parameter(torch.Tensor(self.weight.size()))
60
+ self.pre_context = nn.Conv2d(
61
+ self.in_channels, self.in_channels, kernel_size=1, bias=True)
62
+ self.post_context = nn.Conv2d(
63
+ self.out_channels, self.out_channels, kernel_size=1, bias=True)
64
+ if self.use_deform:
65
+ self.offset_s = nn.Conv2d(
66
+ self.in_channels,
67
+ 18,
68
+ kernel_size=3,
69
+ padding=1,
70
+ stride=stride,
71
+ bias=True)
72
+ self.offset_l = nn.Conv2d(
73
+ self.in_channels,
74
+ 18,
75
+ kernel_size=3,
76
+ padding=1,
77
+ stride=stride,
78
+ bias=True)
79
+ self.init_weights()
80
+
81
+ def init_weights(self):
82
+ constant_init(self.switch, 0, bias=1)
83
+ self.weight_diff.data.zero_()
84
+ constant_init(self.pre_context, 0)
85
+ constant_init(self.post_context, 0)
86
+ if self.use_deform:
87
+ constant_init(self.offset_s, 0)
88
+ constant_init(self.offset_l, 0)
89
+
90
+ def forward(self, x):
91
+ # pre-context
92
+ avg_x = F.adaptive_avg_pool2d(x, output_size=1)
93
+ avg_x = self.pre_context(avg_x)
94
+ avg_x = avg_x.expand_as(x)
95
+ x = x + avg_x
96
+ # switch
97
+ avg_x = F.pad(x, pad=(2, 2, 2, 2), mode='reflect')
98
+ avg_x = F.avg_pool2d(avg_x, kernel_size=5, stride=1, padding=0)
99
+ switch = self.switch(avg_x)
100
+ # sac
101
+ weight = self._get_weight(self.weight)
102
+ zero_bias = torch.zeros(
103
+ self.out_channels, device=weight.device, dtype=weight.dtype)
104
+
105
+ if self.use_deform:
106
+ offset = self.offset_s(avg_x)
107
+ out_s = deform_conv2d(x, offset, weight, self.stride, self.padding,
108
+ self.dilation, self.groups, 1)
109
+ else:
110
+ if (TORCH_VERSION == 'parrots'
111
+ or digit_version(TORCH_VERSION) < digit_version('1.5.0')):
112
+ out_s = super().conv2d_forward(x, weight)
113
+ elif digit_version(TORCH_VERSION) >= digit_version('1.8.0'):
114
+ # bias is a required argument of _conv_forward in torch 1.8.0
115
+ out_s = super()._conv_forward(x, weight, zero_bias)
116
+ else:
117
+ out_s = super()._conv_forward(x, weight)
118
+ ori_p = self.padding
119
+ ori_d = self.dilation
120
+ self.padding = tuple(3 * p for p in self.padding)
121
+ self.dilation = tuple(3 * d for d in self.dilation)
122
+ weight = weight + self.weight_diff
123
+ if self.use_deform:
124
+ offset = self.offset_l(avg_x)
125
+ out_l = deform_conv2d(x, offset, weight, self.stride, self.padding,
126
+ self.dilation, self.groups, 1)
127
+ else:
128
+ if (TORCH_VERSION == 'parrots'
129
+ or digit_version(TORCH_VERSION) < digit_version('1.5.0')):
130
+ out_l = super().conv2d_forward(x, weight)
131
+ elif digit_version(TORCH_VERSION) >= digit_version('1.8.0'):
132
+ # bias is a required argument of _conv_forward in torch 1.8.0
133
+ out_l = super()._conv_forward(x, weight, zero_bias)
134
+ else:
135
+ out_l = super()._conv_forward(x, weight)
136
+
137
+ out = switch * out_s + (1 - switch) * out_l
138
+ self.padding = ori_p
139
+ self.dilation = ori_d
140
+ # post-context
141
+ avg_x = F.adaptive_avg_pool2d(out, output_size=1)
142
+ avg_x = self.post_context(avg_x)
143
+ avg_x = avg_x.expand_as(out)
144
+ out = out + avg_x
145
+ return out
Text2Video-Zero-main/annotator/uniformer/mmcv/runner/__init__.py ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) OpenMMLab. All rights reserved.
2
+ from .base_module import BaseModule, ModuleList, Sequential
3
+ from .base_runner import BaseRunner
4
+ from .builder import RUNNERS, build_runner
5
+ from .checkpoint import (CheckpointLoader, _load_checkpoint,
6
+ _load_checkpoint_with_prefix, load_checkpoint,
7
+ load_state_dict, save_checkpoint, weights_to_cpu)
8
+ from .default_constructor import DefaultRunnerConstructor
9
+ from .dist_utils import (allreduce_grads, allreduce_params, get_dist_info,
10
+ init_dist, master_only)
11
+ from .epoch_based_runner import EpochBasedRunner, Runner
12
+ from .fp16_utils import LossScaler, auto_fp16, force_fp32, wrap_fp16_model
13
+ from .hooks import (HOOKS, CheckpointHook, ClosureHook, DistEvalHook,
14
+ DistSamplerSeedHook, DvcliveLoggerHook, EMAHook, EvalHook,
15
+ Fp16OptimizerHook, GradientCumulativeFp16OptimizerHook,
16
+ GradientCumulativeOptimizerHook, Hook, IterTimerHook,
17
+ LoggerHook, LrUpdaterHook, MlflowLoggerHook,
18
+ NeptuneLoggerHook, OptimizerHook, PaviLoggerHook,
19
+ SyncBuffersHook, TensorboardLoggerHook, TextLoggerHook,
20
+ WandbLoggerHook)
21
+ from .iter_based_runner import IterBasedRunner, IterLoader
22
+ from .log_buffer import LogBuffer
23
+ from .optimizer import (OPTIMIZER_BUILDERS, OPTIMIZERS,
24
+ DefaultOptimizerConstructor, build_optimizer,
25
+ build_optimizer_constructor)
26
+ from .priority import Priority, get_priority
27
+ from .utils import get_host_info, get_time_str, obj_from_dict, set_random_seed
28
+
29
+ __all__ = [
30
+ 'BaseRunner', 'Runner', 'EpochBasedRunner', 'IterBasedRunner', 'LogBuffer',
31
+ 'HOOKS', 'Hook', 'CheckpointHook', 'ClosureHook', 'LrUpdaterHook',
32
+ 'OptimizerHook', 'IterTimerHook', 'DistSamplerSeedHook', 'LoggerHook',
33
+ 'PaviLoggerHook', 'TextLoggerHook', 'TensorboardLoggerHook',
34
+ 'NeptuneLoggerHook', 'WandbLoggerHook', 'MlflowLoggerHook',
35
+ 'DvcliveLoggerHook', '_load_checkpoint', 'load_state_dict',
36
+ 'load_checkpoint', 'weights_to_cpu', 'save_checkpoint', 'Priority',
37
+ 'get_priority', 'get_host_info', 'get_time_str', 'obj_from_dict',
38
+ 'init_dist', 'get_dist_info', 'master_only', 'OPTIMIZER_BUILDERS',
39
+ 'OPTIMIZERS', 'DefaultOptimizerConstructor', 'build_optimizer',
40
+ 'build_optimizer_constructor', 'IterLoader', 'set_random_seed',
41
+ 'auto_fp16', 'force_fp32', 'wrap_fp16_model', 'Fp16OptimizerHook',
42
+ 'SyncBuffersHook', 'EMAHook', 'build_runner', 'RUNNERS', 'allreduce_grads',
43
+ 'allreduce_params', 'LossScaler', 'CheckpointLoader', 'BaseModule',
44
+ '_load_checkpoint_with_prefix', 'EvalHook', 'DistEvalHook', 'Sequential',
45
+ 'ModuleList', 'GradientCumulativeOptimizerHook',
46
+ 'GradientCumulativeFp16OptimizerHook', 'DefaultRunnerConstructor'
47
+ ]
Text2Video-Zero-main/annotator/uniformer/mmcv/runner/base_module.py ADDED
@@ -0,0 +1,195 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) OpenMMLab. All rights reserved.
2
+ import copy
3
+ import warnings
4
+ from abc import ABCMeta
5
+ from collections import defaultdict
6
+ from logging import FileHandler
7
+
8
+ import torch.nn as nn
9
+
10
+ from annotator.uniformer.mmcv.runner.dist_utils import master_only
11
+ from annotator.uniformer.mmcv.utils.logging import get_logger, logger_initialized, print_log
12
+
13
+
14
+ class BaseModule(nn.Module, metaclass=ABCMeta):
15
+ """Base module for all modules in openmmlab.
16
+
17
+ ``BaseModule`` is a wrapper of ``torch.nn.Module`` with additional
18
+ functionality of parameter initialization. Compared with
19
+ ``torch.nn.Module``, ``BaseModule`` mainly adds three attributes.
20
+
21
+ - ``init_cfg``: the config to control the initialization.
22
+ - ``init_weights``: The function of parameter
23
+ initialization and recording initialization
24
+ information.
25
+ - ``_params_init_info``: Used to track the parameter
26
+ initialization information. This attribute only
27
+ exists during executing the ``init_weights``.
28
+
29
+ Args:
30
+ init_cfg (dict, optional): Initialization config dict.
31
+ """
32
+
33
+ def __init__(self, init_cfg=None):
34
+ """Initialize BaseModule, inherited from `torch.nn.Module`"""
35
+
36
+ # NOTE init_cfg can be defined in different levels, but init_cfg
37
+ # in low levels has a higher priority.
38
+
39
+ super(BaseModule, self).__init__()
40
+ # define default value of init_cfg instead of hard code
41
+ # in init_weights() function
42
+ self._is_init = False
43
+
44
+ self.init_cfg = copy.deepcopy(init_cfg)
45
+
46
+ # Backward compatibility in derived classes
47
+ # if pretrained is not None:
48
+ # warnings.warn('DeprecationWarning: pretrained is a deprecated \
49
+ # key, please consider using init_cfg')
50
+ # self.init_cfg = dict(type='Pretrained', checkpoint=pretrained)
51
+
52
+ @property
53
+ def is_init(self):
54
+ return self._is_init
55
+
56
+ def init_weights(self):
57
+ """Initialize the weights."""
58
+
59
+ is_top_level_module = False
60
+ # check if it is top-level module
61
+ if not hasattr(self, '_params_init_info'):
62
+ # The `_params_init_info` is used to record the initialization
63
+ # information of the parameters
64
+ # the key should be the obj:`nn.Parameter` of model and the value
65
+ # should be a dict containing
66
+ # - init_info (str): The string that describes the initialization.
67
+ # - tmp_mean_value (FloatTensor): The mean of the parameter,
68
+ # which indicates whether the parameter has been modified.
69
+ # this attribute would be deleted after all parameters
70
+ # is initialized.
71
+ self._params_init_info = defaultdict(dict)
72
+ is_top_level_module = True
73
+
74
+ # Initialize the `_params_init_info`,
75
+ # When detecting the `tmp_mean_value` of
76
+ # the corresponding parameter is changed, update related
77
+ # initialization information
78
+ for name, param in self.named_parameters():
79
+ self._params_init_info[param][
80
+ 'init_info'] = f'The value is the same before and ' \
81
+ f'after calling `init_weights` ' \
82
+ f'of {self.__class__.__name__} '
83
+ self._params_init_info[param][
84
+ 'tmp_mean_value'] = param.data.mean()
85
+
86
+ # pass `params_init_info` to all submodules
87
+ # All submodules share the same `params_init_info`,
88
+ # so it will be updated when parameters are
89
+ # modified at any level of the model.
90
+ for sub_module in self.modules():
91
+ sub_module._params_init_info = self._params_init_info
92
+
93
+ # Get the initialized logger, if not exist,
94
+ # create a logger named `mmcv`
95
+ logger_names = list(logger_initialized.keys())
96
+ logger_name = logger_names[0] if logger_names else 'mmcv'
97
+
98
+ from ..cnn import initialize
99
+ from ..cnn.utils.weight_init import update_init_info
100
+ module_name = self.__class__.__name__
101
+ if not self._is_init:
102
+ if self.init_cfg:
103
+ print_log(
104
+ f'initialize {module_name} with init_cfg {self.init_cfg}',
105
+ logger=logger_name)
106
+ initialize(self, self.init_cfg)
107
+ if isinstance(self.init_cfg, dict):
108
+ # prevent the parameters of
109
+ # the pre-trained model
110
+ # from being overwritten by
111
+ # the `init_weights`
112
+ if self.init_cfg['type'] == 'Pretrained':
113
+ return
114
+
115
+ for m in self.children():
116
+ if hasattr(m, 'init_weights'):
117
+ m.init_weights()
118
+ # users may overload the `init_weights`
119
+ update_init_info(
120
+ m,
121
+ init_info=f'Initialized by '
122
+ f'user-defined `init_weights`'
123
+ f' in {m.__class__.__name__} ')
124
+
125
+ self._is_init = True
126
+ else:
127
+ warnings.warn(f'init_weights of {self.__class__.__name__} has '
128
+ f'been called more than once.')
129
+
130
+ if is_top_level_module:
131
+ self._dump_init_info(logger_name)
132
+
133
+ for sub_module in self.modules():
134
+ del sub_module._params_init_info
135
+
136
+ @master_only
137
+ def _dump_init_info(self, logger_name):
138
+ """Dump the initialization information to a file named
139
+ `initialization.log.json` in workdir.
140
+
141
+ Args:
142
+ logger_name (str): The name of logger.
143
+ """
144
+
145
+ logger = get_logger(logger_name)
146
+
147
+ with_file_handler = False
148
+ # dump the information to the logger file if there is a `FileHandler`
149
+ for handler in logger.handlers:
150
+ if isinstance(handler, FileHandler):
151
+ handler.stream.write(
152
+ 'Name of parameter - Initialization information\n')
153
+ for name, param in self.named_parameters():
154
+ handler.stream.write(
155
+ f'\n{name} - {param.shape}: '
156
+ f"\n{self._params_init_info[param]['init_info']} \n")
157
+ handler.stream.flush()
158
+ with_file_handler = True
159
+ if not with_file_handler:
160
+ for name, param in self.named_parameters():
161
+ print_log(
162
+ f'\n{name} - {param.shape}: '
163
+ f"\n{self._params_init_info[param]['init_info']} \n ",
164
+ logger=logger_name)
165
+
166
+ def __repr__(self):
167
+ s = super().__repr__()
168
+ if self.init_cfg:
169
+ s += f'\ninit_cfg={self.init_cfg}'
170
+ return s
171
+
172
+
173
+ class Sequential(BaseModule, nn.Sequential):
174
+ """Sequential module in openmmlab.
175
+
176
+ Args:
177
+ init_cfg (dict, optional): Initialization config dict.
178
+ """
179
+
180
+ def __init__(self, *args, init_cfg=None):
181
+ BaseModule.__init__(self, init_cfg)
182
+ nn.Sequential.__init__(self, *args)
183
+
184
+
185
+ class ModuleList(BaseModule, nn.ModuleList):
186
+ """ModuleList in openmmlab.
187
+
188
+ Args:
189
+ modules (iterable, optional): an iterable of modules to add.
190
+ init_cfg (dict, optional): Initialization config dict.
191
+ """
192
+
193
+ def __init__(self, modules=None, init_cfg=None):
194
+ BaseModule.__init__(self, init_cfg)
195
+ nn.ModuleList.__init__(self, modules)
Text2Video-Zero-main/annotator/uniformer/mmcv/runner/base_runner.py ADDED
@@ -0,0 +1,542 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) OpenMMLab. All rights reserved.
2
+ import copy
3
+ import logging
4
+ import os.path as osp
5
+ import warnings
6
+ from abc import ABCMeta, abstractmethod
7
+
8
+ import torch
9
+ from torch.optim import Optimizer
10
+
11
+ import annotator.uniformer.mmcv as mmcv
12
+ from ..parallel import is_module_wrapper
13
+ from .checkpoint import load_checkpoint
14
+ from .dist_utils import get_dist_info
15
+ from .hooks import HOOKS, Hook
16
+ from .log_buffer import LogBuffer
17
+ from .priority import Priority, get_priority
18
+ from .utils import get_time_str
19
+
20
+
21
+ class BaseRunner(metaclass=ABCMeta):
22
+ """The base class of Runner, a training helper for PyTorch.
23
+
24
+ All subclasses should implement the following APIs:
25
+
26
+ - ``run()``
27
+ - ``train()``
28
+ - ``val()``
29
+ - ``save_checkpoint()``
30
+
31
+ Args:
32
+ model (:obj:`torch.nn.Module`): The model to be run.
33
+ batch_processor (callable): A callable method that process a data
34
+ batch. The interface of this method should be
35
+ `batch_processor(model, data, train_mode) -> dict`
36
+ optimizer (dict or :obj:`torch.optim.Optimizer`): It can be either an
37
+ optimizer (in most cases) or a dict of optimizers (in models that
38
+ requires more than one optimizer, e.g., GAN).
39
+ work_dir (str, optional): The working directory to save checkpoints
40
+ and logs. Defaults to None.
41
+ logger (:obj:`logging.Logger`): Logger used during training.
42
+ Defaults to None. (The default value is just for backward
43
+ compatibility)
44
+ meta (dict | None): A dict records some import information such as
45
+ environment info and seed, which will be logged in logger hook.
46
+ Defaults to None.
47
+ max_epochs (int, optional): Total training epochs.
48
+ max_iters (int, optional): Total training iterations.
49
+ """
50
+
51
+ def __init__(self,
52
+ model,
53
+ batch_processor=None,
54
+ optimizer=None,
55
+ work_dir=None,
56
+ logger=None,
57
+ meta=None,
58
+ max_iters=None,
59
+ max_epochs=None):
60
+ if batch_processor is not None:
61
+ if not callable(batch_processor):
62
+ raise TypeError('batch_processor must be callable, '
63
+ f'but got {type(batch_processor)}')
64
+ warnings.warn('batch_processor is deprecated, please implement '
65
+ 'train_step() and val_step() in the model instead.')
66
+ # raise an error is `batch_processor` is not None and
67
+ # `model.train_step()` exists.
68
+ if is_module_wrapper(model):
69
+ _model = model.module
70
+ else:
71
+ _model = model
72
+ if hasattr(_model, 'train_step') or hasattr(_model, 'val_step'):
73
+ raise RuntimeError(
74
+ 'batch_processor and model.train_step()/model.val_step() '
75
+ 'cannot be both available.')
76
+ else:
77
+ assert hasattr(model, 'train_step')
78
+
79
+ # check the type of `optimizer`
80
+ if isinstance(optimizer, dict):
81
+ for name, optim in optimizer.items():
82
+ if not isinstance(optim, Optimizer):
83
+ raise TypeError(
84
+ f'optimizer must be a dict of torch.optim.Optimizers, '
85
+ f'but optimizer["{name}"] is a {type(optim)}')
86
+ elif not isinstance(optimizer, Optimizer) and optimizer is not None:
87
+ raise TypeError(
88
+ f'optimizer must be a torch.optim.Optimizer object '
89
+ f'or dict or None, but got {type(optimizer)}')
90
+
91
+ # check the type of `logger`
92
+ if not isinstance(logger, logging.Logger):
93
+ raise TypeError(f'logger must be a logging.Logger object, '
94
+ f'but got {type(logger)}')
95
+
96
+ # check the type of `meta`
97
+ if meta is not None and not isinstance(meta, dict):
98
+ raise TypeError(
99
+ f'meta must be a dict or None, but got {type(meta)}')
100
+
101
+ self.model = model
102
+ self.batch_processor = batch_processor
103
+ self.optimizer = optimizer
104
+ self.logger = logger
105
+ self.meta = meta
106
+ # create work_dir
107
+ if mmcv.is_str(work_dir):
108
+ self.work_dir = osp.abspath(work_dir)
109
+ mmcv.mkdir_or_exist(self.work_dir)
110
+ elif work_dir is None:
111
+ self.work_dir = None
112
+ else:
113
+ raise TypeError('"work_dir" must be a str or None')
114
+
115
+ # get model name from the model class
116
+ if hasattr(self.model, 'module'):
117
+ self._model_name = self.model.module.__class__.__name__
118
+ else:
119
+ self._model_name = self.model.__class__.__name__
120
+
121
+ self._rank, self._world_size = get_dist_info()
122
+ self.timestamp = get_time_str()
123
+ self.mode = None
124
+ self._hooks = []
125
+ self._epoch = 0
126
+ self._iter = 0
127
+ self._inner_iter = 0
128
+
129
+ if max_epochs is not None and max_iters is not None:
130
+ raise ValueError(
131
+ 'Only one of `max_epochs` or `max_iters` can be set.')
132
+
133
+ self._max_epochs = max_epochs
134
+ self._max_iters = max_iters
135
+ # TODO: Redesign LogBuffer, it is not flexible and elegant enough
136
+ self.log_buffer = LogBuffer()
137
+
138
+ @property
139
+ def model_name(self):
140
+ """str: Name of the model, usually the module class name."""
141
+ return self._model_name
142
+
143
+ @property
144
+ def rank(self):
145
+ """int: Rank of current process. (distributed training)"""
146
+ return self._rank
147
+
148
+ @property
149
+ def world_size(self):
150
+ """int: Number of processes participating in the job.
151
+ (distributed training)"""
152
+ return self._world_size
153
+
154
+ @property
155
+ def hooks(self):
156
+ """list[:obj:`Hook`]: A list of registered hooks."""
157
+ return self._hooks
158
+
159
+ @property
160
+ def epoch(self):
161
+ """int: Current epoch."""
162
+ return self._epoch
163
+
164
+ @property
165
+ def iter(self):
166
+ """int: Current iteration."""
167
+ return self._iter
168
+
169
+ @property
170
+ def inner_iter(self):
171
+ """int: Iteration in an epoch."""
172
+ return self._inner_iter
173
+
174
+ @property
175
+ def max_epochs(self):
176
+ """int: Maximum training epochs."""
177
+ return self._max_epochs
178
+
179
+ @property
180
+ def max_iters(self):
181
+ """int: Maximum training iterations."""
182
+ return self._max_iters
183
+
184
+ @abstractmethod
185
+ def train(self):
186
+ pass
187
+
188
+ @abstractmethod
189
+ def val(self):
190
+ pass
191
+
192
+ @abstractmethod
193
+ def run(self, data_loaders, workflow, **kwargs):
194
+ pass
195
+
196
+ @abstractmethod
197
+ def save_checkpoint(self,
198
+ out_dir,
199
+ filename_tmpl,
200
+ save_optimizer=True,
201
+ meta=None,
202
+ create_symlink=True):
203
+ pass
204
+
205
+ def current_lr(self):
206
+ """Get current learning rates.
207
+
208
+ Returns:
209
+ list[float] | dict[str, list[float]]: Current learning rates of all
210
+ param groups. If the runner has a dict of optimizers, this
211
+ method will return a dict.
212
+ """
213
+ if isinstance(self.optimizer, torch.optim.Optimizer):
214
+ lr = [group['lr'] for group in self.optimizer.param_groups]
215
+ elif isinstance(self.optimizer, dict):
216
+ lr = dict()
217
+ for name, optim in self.optimizer.items():
218
+ lr[name] = [group['lr'] for group in optim.param_groups]
219
+ else:
220
+ raise RuntimeError(
221
+ 'lr is not applicable because optimizer does not exist.')
222
+ return lr
223
+
224
+ def current_momentum(self):
225
+ """Get current momentums.
226
+
227
+ Returns:
228
+ list[float] | dict[str, list[float]]: Current momentums of all
229
+ param groups. If the runner has a dict of optimizers, this
230
+ method will return a dict.
231
+ """
232
+
233
+ def _get_momentum(optimizer):
234
+ momentums = []
235
+ for group in optimizer.param_groups:
236
+ if 'momentum' in group.keys():
237
+ momentums.append(group['momentum'])
238
+ elif 'betas' in group.keys():
239
+ momentums.append(group['betas'][0])
240
+ else:
241
+ momentums.append(0)
242
+ return momentums
243
+
244
+ if self.optimizer is None:
245
+ raise RuntimeError(
246
+ 'momentum is not applicable because optimizer does not exist.')
247
+ elif isinstance(self.optimizer, torch.optim.Optimizer):
248
+ momentums = _get_momentum(self.optimizer)
249
+ elif isinstance(self.optimizer, dict):
250
+ momentums = dict()
251
+ for name, optim in self.optimizer.items():
252
+ momentums[name] = _get_momentum(optim)
253
+ return momentums
254
+
255
+ def register_hook(self, hook, priority='NORMAL'):
256
+ """Register a hook into the hook list.
257
+
258
+ The hook will be inserted into a priority queue, with the specified
259
+ priority (See :class:`Priority` for details of priorities).
260
+ For hooks with the same priority, they will be triggered in the same
261
+ order as they are registered.
262
+
263
+ Args:
264
+ hook (:obj:`Hook`): The hook to be registered.
265
+ priority (int or str or :obj:`Priority`): Hook priority.
266
+ Lower value means higher priority.
267
+ """
268
+ assert isinstance(hook, Hook)
269
+ if hasattr(hook, 'priority'):
270
+ raise ValueError('"priority" is a reserved attribute for hooks')
271
+ priority = get_priority(priority)
272
+ hook.priority = priority
273
+ # insert the hook to a sorted list
274
+ inserted = False
275
+ for i in range(len(self._hooks) - 1, -1, -1):
276
+ if priority >= self._hooks[i].priority:
277
+ self._hooks.insert(i + 1, hook)
278
+ inserted = True
279
+ break
280
+ if not inserted:
281
+ self._hooks.insert(0, hook)
282
+
283
+ def register_hook_from_cfg(self, hook_cfg):
284
+ """Register a hook from its cfg.
285
+
286
+ Args:
287
+ hook_cfg (dict): Hook config. It should have at least keys 'type'
288
+ and 'priority' indicating its type and priority.
289
+
290
+ Notes:
291
+ The specific hook class to register should not use 'type' and
292
+ 'priority' arguments during initialization.
293
+ """
294
+ hook_cfg = hook_cfg.copy()
295
+ priority = hook_cfg.pop('priority', 'NORMAL')
296
+ hook = mmcv.build_from_cfg(hook_cfg, HOOKS)
297
+ self.register_hook(hook, priority=priority)
298
+
299
+ def call_hook(self, fn_name):
300
+ """Call all hooks.
301
+
302
+ Args:
303
+ fn_name (str): The function name in each hook to be called, such as
304
+ "before_train_epoch".
305
+ """
306
+ for hook in self._hooks:
307
+ getattr(hook, fn_name)(self)
308
+
309
+ def get_hook_info(self):
310
+ # Get hooks info in each stage
311
+ stage_hook_map = {stage: [] for stage in Hook.stages}
312
+ for hook in self.hooks:
313
+ try:
314
+ priority = Priority(hook.priority).name
315
+ except ValueError:
316
+ priority = hook.priority
317
+ classname = hook.__class__.__name__
318
+ hook_info = f'({priority:<12}) {classname:<35}'
319
+ for trigger_stage in hook.get_triggered_stages():
320
+ stage_hook_map[trigger_stage].append(hook_info)
321
+
322
+ stage_hook_infos = []
323
+ for stage in Hook.stages:
324
+ hook_infos = stage_hook_map[stage]
325
+ if len(hook_infos) > 0:
326
+ info = f'{stage}:\n'
327
+ info += '\n'.join(hook_infos)
328
+ info += '\n -------------------- '
329
+ stage_hook_infos.append(info)
330
+ return '\n'.join(stage_hook_infos)
331
+
332
+ def load_checkpoint(self,
333
+ filename,
334
+ map_location='cpu',
335
+ strict=False,
336
+ revise_keys=[(r'^module.', '')]):
337
+ return load_checkpoint(
338
+ self.model,
339
+ filename,
340
+ map_location,
341
+ strict,
342
+ self.logger,
343
+ revise_keys=revise_keys)
344
+
345
+ def resume(self,
346
+ checkpoint,
347
+ resume_optimizer=True,
348
+ map_location='default'):
349
+ if map_location == 'default':
350
+ if torch.cuda.is_available():
351
+ device_id = torch.cuda.current_device()
352
+ checkpoint = self.load_checkpoint(
353
+ checkpoint,
354
+ map_location=lambda storage, loc: storage.cuda(device_id))
355
+ else:
356
+ checkpoint = self.load_checkpoint(checkpoint)
357
+ else:
358
+ checkpoint = self.load_checkpoint(
359
+ checkpoint, map_location=map_location)
360
+
361
+ self._epoch = checkpoint['meta']['epoch']
362
+ self._iter = checkpoint['meta']['iter']
363
+ if self.meta is None:
364
+ self.meta = {}
365
+ self.meta.setdefault('hook_msgs', {})
366
+ # load `last_ckpt`, `best_score`, `best_ckpt`, etc. for hook messages
367
+ self.meta['hook_msgs'].update(checkpoint['meta'].get('hook_msgs', {}))
368
+
369
+ # Re-calculate the number of iterations when resuming
370
+ # models with different number of GPUs
371
+ if 'config' in checkpoint['meta']:
372
+ config = mmcv.Config.fromstring(
373
+ checkpoint['meta']['config'], file_format='.py')
374
+ previous_gpu_ids = config.get('gpu_ids', None)
375
+ if previous_gpu_ids and len(previous_gpu_ids) > 0 and len(
376
+ previous_gpu_ids) != self.world_size:
377
+ self._iter = int(self._iter * len(previous_gpu_ids) /
378
+ self.world_size)
379
+ self.logger.info('the iteration number is changed due to '
380
+ 'change of GPU number')
381
+
382
+ # resume meta information meta
383
+ self.meta = checkpoint['meta']
384
+
385
+ if 'optimizer' in checkpoint and resume_optimizer:
386
+ if isinstance(self.optimizer, Optimizer):
387
+ self.optimizer.load_state_dict(checkpoint['optimizer'])
388
+ elif isinstance(self.optimizer, dict):
389
+ for k in self.optimizer.keys():
390
+ self.optimizer[k].load_state_dict(
391
+ checkpoint['optimizer'][k])
392
+ else:
393
+ raise TypeError(
394
+ 'Optimizer should be dict or torch.optim.Optimizer '
395
+ f'but got {type(self.optimizer)}')
396
+
397
+ self.logger.info('resumed epoch %d, iter %d', self.epoch, self.iter)
398
+
399
+ def register_lr_hook(self, lr_config):
400
+ if lr_config is None:
401
+ return
402
+ elif isinstance(lr_config, dict):
403
+ assert 'policy' in lr_config
404
+ policy_type = lr_config.pop('policy')
405
+ # If the type of policy is all in lower case, e.g., 'cyclic',
406
+ # then its first letter will be capitalized, e.g., to be 'Cyclic'.
407
+ # This is for the convenient usage of Lr updater.
408
+ # Since this is not applicable for `
409
+ # CosineAnnealingLrUpdater`,
410
+ # the string will not be changed if it contains capital letters.
411
+ if policy_type == policy_type.lower():
412
+ policy_type = policy_type.title()
413
+ hook_type = policy_type + 'LrUpdaterHook'
414
+ lr_config['type'] = hook_type
415
+ hook = mmcv.build_from_cfg(lr_config, HOOKS)
416
+ else:
417
+ hook = lr_config
418
+ self.register_hook(hook, priority='VERY_HIGH')
419
+
420
+ def register_momentum_hook(self, momentum_config):
421
+ if momentum_config is None:
422
+ return
423
+ if isinstance(momentum_config, dict):
424
+ assert 'policy' in momentum_config
425
+ policy_type = momentum_config.pop('policy')
426
+ # If the type of policy is all in lower case, e.g., 'cyclic',
427
+ # then its first letter will be capitalized, e.g., to be 'Cyclic'.
428
+ # This is for the convenient usage of momentum updater.
429
+ # Since this is not applicable for
430
+ # `CosineAnnealingMomentumUpdater`,
431
+ # the string will not be changed if it contains capital letters.
432
+ if policy_type == policy_type.lower():
433
+ policy_type = policy_type.title()
434
+ hook_type = policy_type + 'MomentumUpdaterHook'
435
+ momentum_config['type'] = hook_type
436
+ hook = mmcv.build_from_cfg(momentum_config, HOOKS)
437
+ else:
438
+ hook = momentum_config
439
+ self.register_hook(hook, priority='HIGH')
440
+
441
+ def register_optimizer_hook(self, optimizer_config):
442
+ if optimizer_config is None:
443
+ return
444
+ if isinstance(optimizer_config, dict):
445
+ optimizer_config.setdefault('type', 'OptimizerHook')
446
+ hook = mmcv.build_from_cfg(optimizer_config, HOOKS)
447
+ else:
448
+ hook = optimizer_config
449
+ self.register_hook(hook, priority='ABOVE_NORMAL')
450
+
451
+ def register_checkpoint_hook(self, checkpoint_config):
452
+ if checkpoint_config is None:
453
+ return
454
+ if isinstance(checkpoint_config, dict):
455
+ checkpoint_config.setdefault('type', 'CheckpointHook')
456
+ hook = mmcv.build_from_cfg(checkpoint_config, HOOKS)
457
+ else:
458
+ hook = checkpoint_config
459
+ self.register_hook(hook, priority='NORMAL')
460
+
461
+ def register_logger_hooks(self, log_config):
462
+ if log_config is None:
463
+ return
464
+ log_interval = log_config['interval']
465
+ for info in log_config['hooks']:
466
+ logger_hook = mmcv.build_from_cfg(
467
+ info, HOOKS, default_args=dict(interval=log_interval))
468
+ self.register_hook(logger_hook, priority='VERY_LOW')
469
+
470
+ def register_timer_hook(self, timer_config):
471
+ if timer_config is None:
472
+ return
473
+ if isinstance(timer_config, dict):
474
+ timer_config_ = copy.deepcopy(timer_config)
475
+ hook = mmcv.build_from_cfg(timer_config_, HOOKS)
476
+ else:
477
+ hook = timer_config
478
+ self.register_hook(hook, priority='LOW')
479
+
480
+ def register_custom_hooks(self, custom_config):
481
+ if custom_config is None:
482
+ return
483
+
484
+ if not isinstance(custom_config, list):
485
+ custom_config = [custom_config]
486
+
487
+ for item in custom_config:
488
+ if isinstance(item, dict):
489
+ self.register_hook_from_cfg(item)
490
+ else:
491
+ self.register_hook(item, priority='NORMAL')
492
+
493
+ def register_profiler_hook(self, profiler_config):
494
+ if profiler_config is None:
495
+ return
496
+ if isinstance(profiler_config, dict):
497
+ profiler_config.setdefault('type', 'ProfilerHook')
498
+ hook = mmcv.build_from_cfg(profiler_config, HOOKS)
499
+ else:
500
+ hook = profiler_config
501
+ self.register_hook(hook)
502
+
503
+ def register_training_hooks(self,
504
+ lr_config,
505
+ optimizer_config=None,
506
+ checkpoint_config=None,
507
+ log_config=None,
508
+ momentum_config=None,
509
+ timer_config=dict(type='IterTimerHook'),
510
+ custom_hooks_config=None):
511
+ """Register default and custom hooks for training.
512
+
513
+ Default and custom hooks include:
514
+
515
+ +----------------------+-------------------------+
516
+ | Hooks | Priority |
517
+ +======================+=========================+
518
+ | LrUpdaterHook | VERY_HIGH (10) |
519
+ +----------------------+-------------------------+
520
+ | MomentumUpdaterHook | HIGH (30) |
521
+ +----------------------+-------------------------+
522
+ | OptimizerStepperHook | ABOVE_NORMAL (40) |
523
+ +----------------------+-------------------------+
524
+ | CheckpointSaverHook | NORMAL (50) |
525
+ +----------------------+-------------------------+
526
+ | IterTimerHook | LOW (70) |
527
+ +----------------------+-------------------------+
528
+ | LoggerHook(s) | VERY_LOW (90) |
529
+ +----------------------+-------------------------+
530
+ | CustomHook(s) | defaults to NORMAL (50) |
531
+ +----------------------+-------------------------+
532
+
533
+ If custom hooks have same priority with default hooks, custom hooks
534
+ will be triggered after default hooks.
535
+ """
536
+ self.register_lr_hook(lr_config)
537
+ self.register_momentum_hook(momentum_config)
538
+ self.register_optimizer_hook(optimizer_config)
539
+ self.register_checkpoint_hook(checkpoint_config)
540
+ self.register_timer_hook(timer_config)
541
+ self.register_logger_hooks(log_config)
542
+ self.register_custom_hooks(custom_hooks_config)
Text2Video-Zero-main/annotator/uniformer/mmcv/runner/builder.py ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) OpenMMLab. All rights reserved.
2
+ import copy
3
+
4
+ from ..utils import Registry
5
+
6
+ RUNNERS = Registry('runner')
7
+ RUNNER_BUILDERS = Registry('runner builder')
8
+
9
+
10
+ def build_runner_constructor(cfg):
11
+ return RUNNER_BUILDERS.build(cfg)
12
+
13
+
14
+ def build_runner(cfg, default_args=None):
15
+ runner_cfg = copy.deepcopy(cfg)
16
+ constructor_type = runner_cfg.pop('constructor',
17
+ 'DefaultRunnerConstructor')
18
+ runner_constructor = build_runner_constructor(
19
+ dict(
20
+ type=constructor_type,
21
+ runner_cfg=runner_cfg,
22
+ default_args=default_args))
23
+ runner = runner_constructor()
24
+ return runner
Text2Video-Zero-main/annotator/uniformer/mmcv/runner/checkpoint.py ADDED
@@ -0,0 +1,707 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) OpenMMLab. All rights reserved.
2
+ import io
3
+ import os
4
+ import os.path as osp
5
+ import pkgutil
6
+ import re
7
+ import time
8
+ import warnings
9
+ from collections import OrderedDict
10
+ from importlib import import_module
11
+ from tempfile import TemporaryDirectory
12
+
13
+ import torch
14
+ import torchvision
15
+ from torch.optim import Optimizer
16
+ from torch.utils import model_zoo
17
+
18
+ import annotator.uniformer.mmcv as mmcv
19
+ from ..fileio import FileClient
20
+ from ..fileio import load as load_file
21
+ from ..parallel import is_module_wrapper
22
+ from ..utils import mkdir_or_exist
23
+ from .dist_utils import get_dist_info
24
+
25
+ ENV_MMCV_HOME = 'MMCV_HOME'
26
+ ENV_XDG_CACHE_HOME = 'XDG_CACHE_HOME'
27
+ DEFAULT_CACHE_DIR = '~/.cache'
28
+
29
+
30
+ def _get_mmcv_home():
31
+ mmcv_home = os.path.expanduser(
32
+ os.getenv(
33
+ ENV_MMCV_HOME,
34
+ os.path.join(
35
+ os.getenv(ENV_XDG_CACHE_HOME, DEFAULT_CACHE_DIR), 'mmcv')))
36
+
37
+ mkdir_or_exist(mmcv_home)
38
+ return mmcv_home
39
+
40
+
41
+ def load_state_dict(module, state_dict, strict=False, logger=None):
42
+ """Load state_dict to a module.
43
+
44
+ This method is modified from :meth:`torch.nn.Module.load_state_dict`.
45
+ Default value for ``strict`` is set to ``False`` and the message for
46
+ param mismatch will be shown even if strict is False.
47
+
48
+ Args:
49
+ module (Module): Module that receives the state_dict.
50
+ state_dict (OrderedDict): Weights.
51
+ strict (bool): whether to strictly enforce that the keys
52
+ in :attr:`state_dict` match the keys returned by this module's
53
+ :meth:`~torch.nn.Module.state_dict` function. Default: ``False``.
54
+ logger (:obj:`logging.Logger`, optional): Logger to log the error
55
+ message. If not specified, print function will be used.
56
+ """
57
+ unexpected_keys = []
58
+ all_missing_keys = []
59
+ err_msg = []
60
+
61
+ metadata = getattr(state_dict, '_metadata', None)
62
+ state_dict = state_dict.copy()
63
+ if metadata is not None:
64
+ state_dict._metadata = metadata
65
+
66
+ # use _load_from_state_dict to enable checkpoint version control
67
+ def load(module, prefix=''):
68
+ # recursively check parallel module in case that the model has a
69
+ # complicated structure, e.g., nn.Module(nn.Module(DDP))
70
+ if is_module_wrapper(module):
71
+ module = module.module
72
+ local_metadata = {} if metadata is None else metadata.get(
73
+ prefix[:-1], {})
74
+ module._load_from_state_dict(state_dict, prefix, local_metadata, True,
75
+ all_missing_keys, unexpected_keys,
76
+ err_msg)
77
+ for name, child in module._modules.items():
78
+ if child is not None:
79
+ load(child, prefix + name + '.')
80
+
81
+ load(module)
82
+ load = None # break load->load reference cycle
83
+
84
+ # ignore "num_batches_tracked" of BN layers
85
+ missing_keys = [
86
+ key for key in all_missing_keys if 'num_batches_tracked' not in key
87
+ ]
88
+
89
+ if unexpected_keys:
90
+ err_msg.append('unexpected key in source '
91
+ f'state_dict: {", ".join(unexpected_keys)}\n')
92
+ if missing_keys:
93
+ err_msg.append(
94
+ f'missing keys in source state_dict: {", ".join(missing_keys)}\n')
95
+
96
+ rank, _ = get_dist_info()
97
+ if len(err_msg) > 0 and rank == 0:
98
+ err_msg.insert(
99
+ 0, 'The model and loaded state dict do not match exactly\n')
100
+ err_msg = '\n'.join(err_msg)
101
+ if strict:
102
+ raise RuntimeError(err_msg)
103
+ elif logger is not None:
104
+ logger.warning(err_msg)
105
+ else:
106
+ print(err_msg)
107
+
108
+
109
+ def get_torchvision_models():
110
+ model_urls = dict()
111
+ for _, name, ispkg in pkgutil.walk_packages(torchvision.models.__path__):
112
+ if ispkg:
113
+ continue
114
+ _zoo = import_module(f'torchvision.models.{name}')
115
+ if hasattr(_zoo, 'model_urls'):
116
+ _urls = getattr(_zoo, 'model_urls')
117
+ model_urls.update(_urls)
118
+ return model_urls
119
+
120
+
121
+ def get_external_models():
122
+ mmcv_home = _get_mmcv_home()
123
+ default_json_path = osp.join(mmcv.__path__[0], 'model_zoo/open_mmlab.json')
124
+ default_urls = load_file(default_json_path)
125
+ assert isinstance(default_urls, dict)
126
+ external_json_path = osp.join(mmcv_home, 'open_mmlab.json')
127
+ if osp.exists(external_json_path):
128
+ external_urls = load_file(external_json_path)
129
+ assert isinstance(external_urls, dict)
130
+ default_urls.update(external_urls)
131
+
132
+ return default_urls
133
+
134
+
135
+ def get_mmcls_models():
136
+ mmcls_json_path = osp.join(mmcv.__path__[0], 'model_zoo/mmcls.json')
137
+ mmcls_urls = load_file(mmcls_json_path)
138
+
139
+ return mmcls_urls
140
+
141
+
142
+ def get_deprecated_model_names():
143
+ deprecate_json_path = osp.join(mmcv.__path__[0],
144
+ 'model_zoo/deprecated.json')
145
+ deprecate_urls = load_file(deprecate_json_path)
146
+ assert isinstance(deprecate_urls, dict)
147
+
148
+ return deprecate_urls
149
+
150
+
151
+ def _process_mmcls_checkpoint(checkpoint):
152
+ state_dict = checkpoint['state_dict']
153
+ new_state_dict = OrderedDict()
154
+ for k, v in state_dict.items():
155
+ if k.startswith('backbone.'):
156
+ new_state_dict[k[9:]] = v
157
+ new_checkpoint = dict(state_dict=new_state_dict)
158
+
159
+ return new_checkpoint
160
+
161
+
162
+ class CheckpointLoader:
163
+ """A general checkpoint loader to manage all schemes."""
164
+
165
+ _schemes = {}
166
+
167
+ @classmethod
168
+ def _register_scheme(cls, prefixes, loader, force=False):
169
+ if isinstance(prefixes, str):
170
+ prefixes = [prefixes]
171
+ else:
172
+ assert isinstance(prefixes, (list, tuple))
173
+ for prefix in prefixes:
174
+ if (prefix not in cls._schemes) or force:
175
+ cls._schemes[prefix] = loader
176
+ else:
177
+ raise KeyError(
178
+ f'{prefix} is already registered as a loader backend, '
179
+ 'add "force=True" if you want to override it')
180
+ # sort, longer prefixes take priority
181
+ cls._schemes = OrderedDict(
182
+ sorted(cls._schemes.items(), key=lambda t: t[0], reverse=True))
183
+
184
+ @classmethod
185
+ def register_scheme(cls, prefixes, loader=None, force=False):
186
+ """Register a loader to CheckpointLoader.
187
+
188
+ This method can be used as a normal class method or a decorator.
189
+
190
+ Args:
191
+ prefixes (str or list[str] or tuple[str]):
192
+ The prefix of the registered loader.
193
+ loader (function, optional): The loader function to be registered.
194
+ When this method is used as a decorator, loader is None.
195
+ Defaults to None.
196
+ force (bool, optional): Whether to override the loader
197
+ if the prefix has already been registered. Defaults to False.
198
+ """
199
+
200
+ if loader is not None:
201
+ cls._register_scheme(prefixes, loader, force=force)
202
+ return
203
+
204
+ def _register(loader_cls):
205
+ cls._register_scheme(prefixes, loader_cls, force=force)
206
+ return loader_cls
207
+
208
+ return _register
209
+
210
+ @classmethod
211
+ def _get_checkpoint_loader(cls, path):
212
+ """Finds a loader that supports the given path. Falls back to the local
213
+ loader if no other loader is found.
214
+
215
+ Args:
216
+ path (str): checkpoint path
217
+
218
+ Returns:
219
+ loader (function): checkpoint loader
220
+ """
221
+
222
+ for p in cls._schemes:
223
+ if path.startswith(p):
224
+ return cls._schemes[p]
225
+
226
+ @classmethod
227
+ def load_checkpoint(cls, filename, map_location=None, logger=None):
228
+ """load checkpoint through URL scheme path.
229
+
230
+ Args:
231
+ filename (str): checkpoint file name with given prefix
232
+ map_location (str, optional): Same as :func:`torch.load`.
233
+ Default: None
234
+ logger (:mod:`logging.Logger`, optional): The logger for message.
235
+ Default: None
236
+
237
+ Returns:
238
+ dict or OrderedDict: The loaded checkpoint.
239
+ """
240
+
241
+ checkpoint_loader = cls._get_checkpoint_loader(filename)
242
+ class_name = checkpoint_loader.__name__
243
+ mmcv.print_log(
244
+ f'load checkpoint from {class_name[10:]} path: {filename}', logger)
245
+ return checkpoint_loader(filename, map_location)
246
+
247
+
248
+ @CheckpointLoader.register_scheme(prefixes='')
249
+ def load_from_local(filename, map_location):
250
+ """load checkpoint by local file path.
251
+
252
+ Args:
253
+ filename (str): local checkpoint file path
254
+ map_location (str, optional): Same as :func:`torch.load`.
255
+
256
+ Returns:
257
+ dict or OrderedDict: The loaded checkpoint.
258
+ """
259
+
260
+ if not osp.isfile(filename):
261
+ raise IOError(f'{filename} is not a checkpoint file')
262
+ checkpoint = torch.load(filename, map_location=map_location)
263
+ return checkpoint
264
+
265
+
266
+ @CheckpointLoader.register_scheme(prefixes=('http://', 'https://'))
267
+ def load_from_http(filename, map_location=None, model_dir=None):
268
+ """load checkpoint through HTTP or HTTPS scheme path. In distributed
269
+ setting, this function only download checkpoint at local rank 0.
270
+
271
+ Args:
272
+ filename (str): checkpoint file path with modelzoo or
273
+ torchvision prefix
274
+ map_location (str, optional): Same as :func:`torch.load`.
275
+ model_dir (string, optional): directory in which to save the object,
276
+ Default: None
277
+
278
+ Returns:
279
+ dict or OrderedDict: The loaded checkpoint.
280
+ """
281
+ rank, world_size = get_dist_info()
282
+ rank = int(os.environ.get('LOCAL_RANK', rank))
283
+ if rank == 0:
284
+ checkpoint = model_zoo.load_url(
285
+ filename, model_dir=model_dir, map_location=map_location)
286
+ if world_size > 1:
287
+ torch.distributed.barrier()
288
+ if rank > 0:
289
+ checkpoint = model_zoo.load_url(
290
+ filename, model_dir=model_dir, map_location=map_location)
291
+ return checkpoint
292
+
293
+
294
+ @CheckpointLoader.register_scheme(prefixes='pavi://')
295
+ def load_from_pavi(filename, map_location=None):
296
+ """load checkpoint through the file path prefixed with pavi. In distributed
297
+ setting, this function download ckpt at all ranks to different temporary
298
+ directories.
299
+
300
+ Args:
301
+ filename (str): checkpoint file path with pavi prefix
302
+ map_location (str, optional): Same as :func:`torch.load`.
303
+ Default: None
304
+
305
+ Returns:
306
+ dict or OrderedDict: The loaded checkpoint.
307
+ """
308
+ assert filename.startswith('pavi://'), \
309
+ f'Expected filename startswith `pavi://`, but get {filename}'
310
+ model_path = filename[7:]
311
+
312
+ try:
313
+ from pavi import modelcloud
314
+ except ImportError:
315
+ raise ImportError(
316
+ 'Please install pavi to load checkpoint from modelcloud.')
317
+
318
+ model = modelcloud.get(model_path)
319
+ with TemporaryDirectory() as tmp_dir:
320
+ downloaded_file = osp.join(tmp_dir, model.name)
321
+ model.download(downloaded_file)
322
+ checkpoint = torch.load(downloaded_file, map_location=map_location)
323
+ return checkpoint
324
+
325
+
326
+ @CheckpointLoader.register_scheme(prefixes='s3://')
327
+ def load_from_ceph(filename, map_location=None, backend='petrel'):
328
+ """load checkpoint through the file path prefixed with s3. In distributed
329
+ setting, this function download ckpt at all ranks to different temporary
330
+ directories.
331
+
332
+ Args:
333
+ filename (str): checkpoint file path with s3 prefix
334
+ map_location (str, optional): Same as :func:`torch.load`.
335
+ backend (str, optional): The storage backend type. Options are 'ceph',
336
+ 'petrel'. Default: 'petrel'.
337
+
338
+ .. warning::
339
+ :class:`mmcv.fileio.file_client.CephBackend` will be deprecated,
340
+ please use :class:`mmcv.fileio.file_client.PetrelBackend` instead.
341
+
342
+ Returns:
343
+ dict or OrderedDict: The loaded checkpoint.
344
+ """
345
+ allowed_backends = ['ceph', 'petrel']
346
+ if backend not in allowed_backends:
347
+ raise ValueError(f'Load from Backend {backend} is not supported.')
348
+
349
+ if backend == 'ceph':
350
+ warnings.warn(
351
+ 'CephBackend will be deprecated, please use PetrelBackend instead')
352
+
353
+ # CephClient and PetrelBackend have the same prefix 's3://' and the latter
354
+ # will be chosen as default. If PetrelBackend can not be instantiated
355
+ # successfully, the CephClient will be chosen.
356
+ try:
357
+ file_client = FileClient(backend=backend)
358
+ except ImportError:
359
+ allowed_backends.remove(backend)
360
+ file_client = FileClient(backend=allowed_backends[0])
361
+
362
+ with io.BytesIO(file_client.get(filename)) as buffer:
363
+ checkpoint = torch.load(buffer, map_location=map_location)
364
+ return checkpoint
365
+
366
+
367
+ @CheckpointLoader.register_scheme(prefixes=('modelzoo://', 'torchvision://'))
368
+ def load_from_torchvision(filename, map_location=None):
369
+ """load checkpoint through the file path prefixed with modelzoo or
370
+ torchvision.
371
+
372
+ Args:
373
+ filename (str): checkpoint file path with modelzoo or
374
+ torchvision prefix
375
+ map_location (str, optional): Same as :func:`torch.load`.
376
+
377
+ Returns:
378
+ dict or OrderedDict: The loaded checkpoint.
379
+ """
380
+ model_urls = get_torchvision_models()
381
+ if filename.startswith('modelzoo://'):
382
+ warnings.warn('The URL scheme of "modelzoo://" is deprecated, please '
383
+ 'use "torchvision://" instead')
384
+ model_name = filename[11:]
385
+ else:
386
+ model_name = filename[14:]
387
+ return load_from_http(model_urls[model_name], map_location=map_location)
388
+
389
+
390
+ @CheckpointLoader.register_scheme(prefixes=('open-mmlab://', 'openmmlab://'))
391
+ def load_from_openmmlab(filename, map_location=None):
392
+ """load checkpoint through the file path prefixed with open-mmlab or
393
+ openmmlab.
394
+
395
+ Args:
396
+ filename (str): checkpoint file path with open-mmlab or
397
+ openmmlab prefix
398
+ map_location (str, optional): Same as :func:`torch.load`.
399
+ Default: None
400
+
401
+ Returns:
402
+ dict or OrderedDict: The loaded checkpoint.
403
+ """
404
+
405
+ model_urls = get_external_models()
406
+ prefix_str = 'open-mmlab://'
407
+ if filename.startswith(prefix_str):
408
+ model_name = filename[13:]
409
+ else:
410
+ model_name = filename[12:]
411
+ prefix_str = 'openmmlab://'
412
+
413
+ deprecated_urls = get_deprecated_model_names()
414
+ if model_name in deprecated_urls:
415
+ warnings.warn(f'{prefix_str}{model_name} is deprecated in favor '
416
+ f'of {prefix_str}{deprecated_urls[model_name]}')
417
+ model_name = deprecated_urls[model_name]
418
+ model_url = model_urls[model_name]
419
+ # check if is url
420
+ if model_url.startswith(('http://', 'https://')):
421
+ checkpoint = load_from_http(model_url, map_location=map_location)
422
+ else:
423
+ filename = osp.join(_get_mmcv_home(), model_url)
424
+ if not osp.isfile(filename):
425
+ raise IOError(f'{filename} is not a checkpoint file')
426
+ checkpoint = torch.load(filename, map_location=map_location)
427
+ return checkpoint
428
+
429
+
430
+ @CheckpointLoader.register_scheme(prefixes='mmcls://')
431
+ def load_from_mmcls(filename, map_location=None):
432
+ """load checkpoint through the file path prefixed with mmcls.
433
+
434
+ Args:
435
+ filename (str): checkpoint file path with mmcls prefix
436
+ map_location (str, optional): Same as :func:`torch.load`.
437
+
438
+ Returns:
439
+ dict or OrderedDict: The loaded checkpoint.
440
+ """
441
+
442
+ model_urls = get_mmcls_models()
443
+ model_name = filename[8:]
444
+ checkpoint = load_from_http(
445
+ model_urls[model_name], map_location=map_location)
446
+ checkpoint = _process_mmcls_checkpoint(checkpoint)
447
+ return checkpoint
448
+
449
+
450
+ def _load_checkpoint(filename, map_location=None, logger=None):
451
+ """Load checkpoint from somewhere (modelzoo, file, url).
452
+
453
+ Args:
454
+ filename (str): Accept local filepath, URL, ``torchvision://xxx``,
455
+ ``open-mmlab://xxx``. Please refer to ``docs/model_zoo.md`` for
456
+ details.
457
+ map_location (str, optional): Same as :func:`torch.load`.
458
+ Default: None.
459
+ logger (:mod:`logging.Logger`, optional): The logger for error message.
460
+ Default: None
461
+
462
+ Returns:
463
+ dict or OrderedDict: The loaded checkpoint. It can be either an
464
+ OrderedDict storing model weights or a dict containing other
465
+ information, which depends on the checkpoint.
466
+ """
467
+ return CheckpointLoader.load_checkpoint(filename, map_location, logger)
468
+
469
+
470
+ def _load_checkpoint_with_prefix(prefix, filename, map_location=None):
471
+ """Load partial pretrained model with specific prefix.
472
+
473
+ Args:
474
+ prefix (str): The prefix of sub-module.
475
+ filename (str): Accept local filepath, URL, ``torchvision://xxx``,
476
+ ``open-mmlab://xxx``. Please refer to ``docs/model_zoo.md`` for
477
+ details.
478
+ map_location (str | None): Same as :func:`torch.load`. Default: None.
479
+
480
+ Returns:
481
+ dict or OrderedDict: The loaded checkpoint.
482
+ """
483
+
484
+ checkpoint = _load_checkpoint(filename, map_location=map_location)
485
+
486
+ if 'state_dict' in checkpoint:
487
+ state_dict = checkpoint['state_dict']
488
+ else:
489
+ state_dict = checkpoint
490
+ if not prefix.endswith('.'):
491
+ prefix += '.'
492
+ prefix_len = len(prefix)
493
+
494
+ state_dict = {
495
+ k[prefix_len:]: v
496
+ for k, v in state_dict.items() if k.startswith(prefix)
497
+ }
498
+
499
+ assert state_dict, f'{prefix} is not in the pretrained model'
500
+ return state_dict
501
+
502
+
503
+ def load_checkpoint(model,
504
+ filename,
505
+ map_location=None,
506
+ strict=False,
507
+ logger=None,
508
+ revise_keys=[(r'^module\.', '')]):
509
+ """Load checkpoint from a file or URI.
510
+
511
+ Args:
512
+ model (Module): Module to load checkpoint.
513
+ filename (str): Accept local filepath, URL, ``torchvision://xxx``,
514
+ ``open-mmlab://xxx``. Please refer to ``docs/model_zoo.md`` for
515
+ details.
516
+ map_location (str): Same as :func:`torch.load`.
517
+ strict (bool): Whether to allow different params for the model and
518
+ checkpoint.
519
+ logger (:mod:`logging.Logger` or None): The logger for error message.
520
+ revise_keys (list): A list of customized keywords to modify the
521
+ state_dict in checkpoint. Each item is a (pattern, replacement)
522
+ pair of the regular expression operations. Default: strip
523
+ the prefix 'module.' by [(r'^module\\.', '')].
524
+
525
+ Returns:
526
+ dict or OrderedDict: The loaded checkpoint.
527
+ """
528
+ checkpoint = _load_checkpoint(filename, map_location, logger)
529
+ # OrderedDict is a subclass of dict
530
+ if not isinstance(checkpoint, dict):
531
+ raise RuntimeError(
532
+ f'No state_dict found in checkpoint file {filename}')
533
+ # get state_dict from checkpoint
534
+ if 'state_dict' in checkpoint:
535
+ state_dict = checkpoint['state_dict']
536
+ else:
537
+ state_dict = checkpoint
538
+
539
+ # strip prefix of state_dict
540
+ metadata = getattr(state_dict, '_metadata', OrderedDict())
541
+ for p, r in revise_keys:
542
+ state_dict = OrderedDict(
543
+ {re.sub(p, r, k): v
544
+ for k, v in state_dict.items()})
545
+ # Keep metadata in state_dict
546
+ state_dict._metadata = metadata
547
+
548
+ # load state_dict
549
+ load_state_dict(model, state_dict, strict, logger)
550
+ return checkpoint
551
+
552
+
553
+ def weights_to_cpu(state_dict):
554
+ """Copy a model state_dict to cpu.
555
+
556
+ Args:
557
+ state_dict (OrderedDict): Model weights on GPU.
558
+
559
+ Returns:
560
+ OrderedDict: Model weights on GPU.
561
+ """
562
+ state_dict_cpu = OrderedDict()
563
+ for key, val in state_dict.items():
564
+ state_dict_cpu[key] = val.cpu()
565
+ # Keep metadata in state_dict
566
+ state_dict_cpu._metadata = getattr(state_dict, '_metadata', OrderedDict())
567
+ return state_dict_cpu
568
+
569
+
570
+ def _save_to_state_dict(module, destination, prefix, keep_vars):
571
+ """Saves module state to `destination` dictionary.
572
+
573
+ This method is modified from :meth:`torch.nn.Module._save_to_state_dict`.
574
+
575
+ Args:
576
+ module (nn.Module): The module to generate state_dict.
577
+ destination (dict): A dict where state will be stored.
578
+ prefix (str): The prefix for parameters and buffers used in this
579
+ module.
580
+ """
581
+ for name, param in module._parameters.items():
582
+ if param is not None:
583
+ destination[prefix + name] = param if keep_vars else param.detach()
584
+ for name, buf in module._buffers.items():
585
+ # remove check of _non_persistent_buffers_set to allow nn.BatchNorm2d
586
+ if buf is not None:
587
+ destination[prefix + name] = buf if keep_vars else buf.detach()
588
+
589
+
590
+ def get_state_dict(module, destination=None, prefix='', keep_vars=False):
591
+ """Returns a dictionary containing a whole state of the module.
592
+
593
+ Both parameters and persistent buffers (e.g. running averages) are
594
+ included. Keys are corresponding parameter and buffer names.
595
+
596
+ This method is modified from :meth:`torch.nn.Module.state_dict` to
597
+ recursively check parallel module in case that the model has a complicated
598
+ structure, e.g., nn.Module(nn.Module(DDP)).
599
+
600
+ Args:
601
+ module (nn.Module): The module to generate state_dict.
602
+ destination (OrderedDict): Returned dict for the state of the
603
+ module.
604
+ prefix (str): Prefix of the key.
605
+ keep_vars (bool): Whether to keep the variable property of the
606
+ parameters. Default: False.
607
+
608
+ Returns:
609
+ dict: A dictionary containing a whole state of the module.
610
+ """
611
+ # recursively check parallel module in case that the model has a
612
+ # complicated structure, e.g., nn.Module(nn.Module(DDP))
613
+ if is_module_wrapper(module):
614
+ module = module.module
615
+
616
+ # below is the same as torch.nn.Module.state_dict()
617
+ if destination is None:
618
+ destination = OrderedDict()
619
+ destination._metadata = OrderedDict()
620
+ destination._metadata[prefix[:-1]] = local_metadata = dict(
621
+ version=module._version)
622
+ _save_to_state_dict(module, destination, prefix, keep_vars)
623
+ for name, child in module._modules.items():
624
+ if child is not None:
625
+ get_state_dict(
626
+ child, destination, prefix + name + '.', keep_vars=keep_vars)
627
+ for hook in module._state_dict_hooks.values():
628
+ hook_result = hook(module, destination, prefix, local_metadata)
629
+ if hook_result is not None:
630
+ destination = hook_result
631
+ return destination
632
+
633
+
634
+ def save_checkpoint(model,
635
+ filename,
636
+ optimizer=None,
637
+ meta=None,
638
+ file_client_args=None):
639
+ """Save checkpoint to file.
640
+
641
+ The checkpoint will have 3 fields: ``meta``, ``state_dict`` and
642
+ ``optimizer``. By default ``meta`` will contain version and time info.
643
+
644
+ Args:
645
+ model (Module): Module whose params are to be saved.
646
+ filename (str): Checkpoint filename.
647
+ optimizer (:obj:`Optimizer`, optional): Optimizer to be saved.
648
+ meta (dict, optional): Metadata to be saved in checkpoint.
649
+ file_client_args (dict, optional): Arguments to instantiate a
650
+ FileClient. See :class:`mmcv.fileio.FileClient` for details.
651
+ Default: None.
652
+ `New in version 1.3.16.`
653
+ """
654
+ if meta is None:
655
+ meta = {}
656
+ elif not isinstance(meta, dict):
657
+ raise TypeError(f'meta must be a dict or None, but got {type(meta)}')
658
+ meta.update(mmcv_version=mmcv.__version__, time=time.asctime())
659
+
660
+ if is_module_wrapper(model):
661
+ model = model.module
662
+
663
+ if hasattr(model, 'CLASSES') and model.CLASSES is not None:
664
+ # save class name to the meta
665
+ meta.update(CLASSES=model.CLASSES)
666
+
667
+ checkpoint = {
668
+ 'meta': meta,
669
+ 'state_dict': weights_to_cpu(get_state_dict(model))
670
+ }
671
+ # save optimizer state dict in the checkpoint
672
+ if isinstance(optimizer, Optimizer):
673
+ checkpoint['optimizer'] = optimizer.state_dict()
674
+ elif isinstance(optimizer, dict):
675
+ checkpoint['optimizer'] = {}
676
+ for name, optim in optimizer.items():
677
+ checkpoint['optimizer'][name] = optim.state_dict()
678
+
679
+ if filename.startswith('pavi://'):
680
+ if file_client_args is not None:
681
+ raise ValueError(
682
+ 'file_client_args should be "None" if filename starts with'
683
+ f'"pavi://", but got {file_client_args}')
684
+ try:
685
+ from pavi import modelcloud
686
+ from pavi import exception
687
+ except ImportError:
688
+ raise ImportError(
689
+ 'Please install pavi to load checkpoint from modelcloud.')
690
+ model_path = filename[7:]
691
+ root = modelcloud.Folder()
692
+ model_dir, model_name = osp.split(model_path)
693
+ try:
694
+ model = modelcloud.get(model_dir)
695
+ except exception.NodeNotFoundError:
696
+ model = root.create_training_model(model_dir)
697
+ with TemporaryDirectory() as tmp_dir:
698
+ checkpoint_file = osp.join(tmp_dir, model_name)
699
+ with open(checkpoint_file, 'wb') as f:
700
+ torch.save(checkpoint, f)
701
+ f.flush()
702
+ model.create_file(checkpoint_file, name=model_name)
703
+ else:
704
+ file_client = FileClient.infer_client(file_client_args, filename)
705
+ with io.BytesIO() as f:
706
+ torch.save(checkpoint, f)
707
+ file_client.put(f.getvalue(), filename)