Moyao001 commited on
Commit
96793d9
·
verified ·
1 Parent(s): 101f177

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. CCEdit-main/src/controlnet11/annotator/uniformer/mmcv/cnn/__init__.py +41 -0
  2. CCEdit-main/src/controlnet11/annotator/uniformer/mmcv/cnn/bricks/__init__.py +35 -0
  3. CCEdit-main/src/controlnet11/annotator/uniformer/mmcv/cnn/bricks/activation.py +92 -0
  4. CCEdit-main/src/controlnet11/annotator/uniformer/mmcv/cnn/bricks/context_block.py +125 -0
  5. CCEdit-main/src/controlnet11/annotator/uniformer/mmcv/cnn/bricks/conv.py +44 -0
  6. CCEdit-main/src/controlnet11/annotator/uniformer/mmcv/cnn/bricks/conv2d_adaptive_padding.py +62 -0
  7. CCEdit-main/src/controlnet11/annotator/uniformer/mmcv/cnn/bricks/conv_module.py +206 -0
  8. CCEdit-main/src/controlnet11/annotator/uniformer/mmcv/cnn/bricks/conv_ws.py +148 -0
  9. CCEdit-main/src/controlnet11/annotator/uniformer/mmcv/cnn/bricks/depthwise_separable_conv_module.py +96 -0
  10. CCEdit-main/src/controlnet11/annotator/uniformer/mmcv/cnn/bricks/drop.py +65 -0
  11. CCEdit-main/src/controlnet11/annotator/uniformer/mmcv/cnn/bricks/generalized_attention.py +412 -0
  12. CCEdit-main/src/controlnet11/annotator/uniformer/mmcv/cnn/bricks/hsigmoid.py +34 -0
  13. CCEdit-main/src/controlnet11/annotator/uniformer/mmcv/cnn/bricks/hswish.py +29 -0
  14. CCEdit-main/src/controlnet11/annotator/uniformer/mmcv/cnn/bricks/non_local.py +306 -0
  15. CCEdit-main/src/controlnet11/annotator/uniformer/mmcv/cnn/bricks/norm.py +144 -0
  16. CCEdit-main/src/controlnet11/annotator/uniformer/mmcv/cnn/bricks/padding.py +36 -0
  17. CCEdit-main/src/controlnet11/annotator/uniformer/mmcv/cnn/bricks/plugin.py +88 -0
  18. CCEdit-main/src/controlnet11/annotator/uniformer/mmcv/cnn/bricks/registry.py +16 -0
  19. CCEdit-main/src/controlnet11/annotator/uniformer/mmcv/cnn/bricks/scale.py +21 -0
  20. CCEdit-main/src/controlnet11/annotator/uniformer/mmcv/cnn/bricks/swish.py +25 -0
  21. CCEdit-main/src/controlnet11/annotator/uniformer/mmcv/cnn/bricks/transformer.py +595 -0
  22. CCEdit-main/src/controlnet11/annotator/uniformer/mmcv/cnn/bricks/upsample.py +84 -0
  23. CCEdit-main/src/controlnet11/annotator/uniformer/mmcv/cnn/bricks/wrappers.py +180 -0
  24. CCEdit-main/src/controlnet11/annotator/uniformer/mmcv/cnn/utils/__init__.py +19 -0
  25. CCEdit-main/src/controlnet11/annotator/uniformer/mmcv/cnn/utils/flops_counter.py +599 -0
  26. CCEdit-main/src/controlnet11/annotator/uniformer/mmcv/cnn/utils/fuse_conv_bn.py +59 -0
  27. CCEdit-main/src/controlnet11/annotator/uniformer/mmcv/cnn/utils/sync_bn.py +59 -0
  28. CCEdit-main/src/controlnet11/annotator/uniformer/mmcv/cnn/utils/weight_init.py +684 -0
  29. CCEdit-main/src/controlnet11/annotator/uniformer/mmcv/runner/__init__.py +47 -0
  30. CCEdit-main/src/controlnet11/annotator/uniformer/mmcv/runner/base_module.py +195 -0
  31. CCEdit-main/src/controlnet11/annotator/uniformer/mmcv/runner/base_runner.py +542 -0
  32. CCEdit-main/src/controlnet11/annotator/uniformer/mmcv/runner/builder.py +24 -0
  33. CCEdit-main/src/controlnet11/annotator/uniformer/mmcv/runner/checkpoint.py +707 -0
  34. CCEdit-main/src/controlnet11/annotator/uniformer/mmcv/runner/default_constructor.py +44 -0
  35. CCEdit-main/src/controlnet11/annotator/uniformer/mmcv/runner/dist_utils.py +164 -0
  36. CCEdit-main/src/controlnet11/annotator/uniformer/mmcv/runner/epoch_based_runner.py +187 -0
  37. CCEdit-main/src/controlnet11/annotator/uniformer/mmcv/runner/fp16_utils.py +410 -0
  38. CCEdit-main/src/controlnet11/annotator/uniformer/mmcv/runner/hooks/__init__.py +29 -0
  39. CCEdit-main/src/controlnet11/annotator/uniformer/mmcv/runner/hooks/checkpoint.py +167 -0
  40. CCEdit-main/src/controlnet11/annotator/uniformer/mmcv/runner/hooks/closure.py +11 -0
  41. CCEdit-main/src/controlnet11/annotator/uniformer/mmcv/runner/hooks/ema.py +89 -0
  42. CCEdit-main/src/controlnet11/annotator/uniformer/mmcv/runner/hooks/evaluation.py +509 -0
  43. CCEdit-main/src/controlnet11/annotator/uniformer/mmcv/runner/hooks/hook.py +92 -0
  44. CCEdit-main/src/controlnet11/annotator/uniformer/mmcv/runner/hooks/iter_timer.py +18 -0
  45. CCEdit-main/src/controlnet11/annotator/uniformer/mmcv/runner/hooks/logger/__init__.py +15 -0
  46. CCEdit-main/src/controlnet11/annotator/uniformer/mmcv/runner/hooks/logger/base.py +166 -0
  47. CCEdit-main/src/controlnet11/annotator/uniformer/mmcv/runner/hooks/logger/dvclive.py +58 -0
  48. CCEdit-main/src/controlnet11/annotator/uniformer/mmcv/runner/hooks/logger/mlflow.py +78 -0
  49. CCEdit-main/src/controlnet11/annotator/uniformer/mmcv/runner/hooks/logger/neptune.py +82 -0
  50. CCEdit-main/src/controlnet11/annotator/uniformer/mmcv/runner/hooks/logger/pavi.py +117 -0
CCEdit-main/src/controlnet11/annotator/uniformer/mmcv/cnn/__init__.py ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) OpenMMLab. All rights reserved.
2
+ from .alexnet import AlexNet
3
+ # yapf: disable
4
+ from .bricks import (ACTIVATION_LAYERS, CONV_LAYERS, NORM_LAYERS,
5
+ PADDING_LAYERS, PLUGIN_LAYERS, UPSAMPLE_LAYERS,
6
+ ContextBlock, Conv2d, Conv3d, ConvAWS2d, ConvModule,
7
+ ConvTranspose2d, ConvTranspose3d, ConvWS2d,
8
+ DepthwiseSeparableConvModule, GeneralizedAttention,
9
+ HSigmoid, HSwish, Linear, MaxPool2d, MaxPool3d,
10
+ NonLocal1d, NonLocal2d, NonLocal3d, Scale, Swish,
11
+ build_activation_layer, build_conv_layer,
12
+ build_norm_layer, build_padding_layer, build_plugin_layer,
13
+ build_upsample_layer, conv_ws_2d, is_norm)
14
+ from .builder import MODELS, build_model_from_cfg
15
+ # yapf: enable
16
+ from .resnet import ResNet, make_res_layer
17
+ from .utils import (INITIALIZERS, Caffe2XavierInit, ConstantInit, KaimingInit,
18
+ NormalInit, PretrainedInit, TruncNormalInit, UniformInit,
19
+ XavierInit, bias_init_with_prob, caffe2_xavier_init,
20
+ constant_init, fuse_conv_bn, get_model_complexity_info,
21
+ initialize, kaiming_init, normal_init, trunc_normal_init,
22
+ uniform_init, xavier_init)
23
+ from .vgg import VGG, make_vgg_layer
24
+
25
+ __all__ = [
26
+ 'AlexNet', 'VGG', 'make_vgg_layer', 'ResNet', 'make_res_layer',
27
+ 'constant_init', 'xavier_init', 'normal_init', 'trunc_normal_init',
28
+ 'uniform_init', 'kaiming_init', 'caffe2_xavier_init',
29
+ 'bias_init_with_prob', 'ConvModule', 'build_activation_layer',
30
+ 'build_conv_layer', 'build_norm_layer', 'build_padding_layer',
31
+ 'build_upsample_layer', 'build_plugin_layer', 'is_norm', 'NonLocal1d',
32
+ 'NonLocal2d', 'NonLocal3d', 'ContextBlock', 'HSigmoid', 'Swish', 'HSwish',
33
+ 'GeneralizedAttention', 'ACTIVATION_LAYERS', 'CONV_LAYERS', 'NORM_LAYERS',
34
+ 'PADDING_LAYERS', 'UPSAMPLE_LAYERS', 'PLUGIN_LAYERS', 'Scale',
35
+ 'get_model_complexity_info', 'conv_ws_2d', 'ConvAWS2d', 'ConvWS2d',
36
+ 'fuse_conv_bn', 'DepthwiseSeparableConvModule', 'Linear', 'Conv2d',
37
+ 'ConvTranspose2d', 'MaxPool2d', 'ConvTranspose3d', 'MaxPool3d', 'Conv3d',
38
+ 'initialize', 'INITIALIZERS', 'ConstantInit', 'XavierInit', 'NormalInit',
39
+ 'TruncNormalInit', 'UniformInit', 'KaimingInit', 'PretrainedInit',
40
+ 'Caffe2XavierInit', 'MODELS', 'build_model_from_cfg'
41
+ ]
CCEdit-main/src/controlnet11/annotator/uniformer/mmcv/cnn/bricks/__init__.py ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) OpenMMLab. All rights reserved.
2
+ from .activation import build_activation_layer
3
+ from .context_block import ContextBlock
4
+ from .conv import build_conv_layer
5
+ from .conv2d_adaptive_padding import Conv2dAdaptivePadding
6
+ from .conv_module import ConvModule
7
+ from .conv_ws import ConvAWS2d, ConvWS2d, conv_ws_2d
8
+ from .depthwise_separable_conv_module import DepthwiseSeparableConvModule
9
+ from .drop import Dropout, DropPath
10
+ from .generalized_attention import GeneralizedAttention
11
+ from .hsigmoid import HSigmoid
12
+ from .hswish import HSwish
13
+ from .non_local import NonLocal1d, NonLocal2d, NonLocal3d
14
+ from .norm import build_norm_layer, is_norm
15
+ from .padding import build_padding_layer
16
+ from .plugin import build_plugin_layer
17
+ from .registry import (ACTIVATION_LAYERS, CONV_LAYERS, NORM_LAYERS,
18
+ PADDING_LAYERS, PLUGIN_LAYERS, UPSAMPLE_LAYERS)
19
+ from .scale import Scale
20
+ from .swish import Swish
21
+ from .upsample import build_upsample_layer
22
+ from .wrappers import (Conv2d, Conv3d, ConvTranspose2d, ConvTranspose3d,
23
+ Linear, MaxPool2d, MaxPool3d)
24
+
25
+ __all__ = [
26
+ 'ConvModule', 'build_activation_layer', 'build_conv_layer',
27
+ 'build_norm_layer', 'build_padding_layer', 'build_upsample_layer',
28
+ 'build_plugin_layer', 'is_norm', 'HSigmoid', 'HSwish', 'NonLocal1d',
29
+ 'NonLocal2d', 'NonLocal3d', 'ContextBlock', 'GeneralizedAttention',
30
+ 'ACTIVATION_LAYERS', 'CONV_LAYERS', 'NORM_LAYERS', 'PADDING_LAYERS',
31
+ 'UPSAMPLE_LAYERS', 'PLUGIN_LAYERS', 'Scale', 'ConvAWS2d', 'ConvWS2d',
32
+ 'conv_ws_2d', 'DepthwiseSeparableConvModule', 'Swish', 'Linear',
33
+ 'Conv2dAdaptivePadding', 'Conv2d', 'ConvTranspose2d', 'MaxPool2d',
34
+ 'ConvTranspose3d', 'MaxPool3d', 'Conv3d', 'Dropout', 'DropPath'
35
+ ]
CCEdit-main/src/controlnet11/annotator/uniformer/mmcv/cnn/bricks/activation.py ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) OpenMMLab. All rights reserved.
2
+ import torch
3
+ import torch.nn as nn
4
+ import torch.nn.functional as F
5
+
6
+ from annotator.uniformer.mmcv.utils import TORCH_VERSION, build_from_cfg, digit_version
7
+ from .registry import ACTIVATION_LAYERS
8
+
9
+ for module in [
10
+ nn.ReLU, nn.LeakyReLU, nn.PReLU, nn.RReLU, nn.ReLU6, nn.ELU,
11
+ nn.Sigmoid, nn.Tanh
12
+ ]:
13
+ ACTIVATION_LAYERS.register_module(module=module)
14
+
15
+
16
+ @ACTIVATION_LAYERS.register_module(name='Clip')
17
+ @ACTIVATION_LAYERS.register_module()
18
+ class Clamp(nn.Module):
19
+ """Clamp activation layer.
20
+
21
+ This activation function is to clamp the feature map value within
22
+ :math:`[min, max]`. More details can be found in ``torch.clamp()``.
23
+
24
+ Args:
25
+ min (Number | optional): Lower-bound of the range to be clamped to.
26
+ Default to -1.
27
+ max (Number | optional): Upper-bound of the range to be clamped to.
28
+ Default to 1.
29
+ """
30
+
31
+ def __init__(self, min=-1., max=1.):
32
+ super(Clamp, self).__init__()
33
+ self.min = min
34
+ self.max = max
35
+
36
+ def forward(self, x):
37
+ """Forward function.
38
+
39
+ Args:
40
+ x (torch.Tensor): The input tensor.
41
+
42
+ Returns:
43
+ torch.Tensor: Clamped tensor.
44
+ """
45
+ return torch.clamp(x, min=self.min, max=self.max)
46
+
47
+
48
+ class GELU(nn.Module):
49
+ r"""Applies the Gaussian Error Linear Units function:
50
+
51
+ .. math::
52
+ \text{GELU}(x) = x * \Phi(x)
53
+ where :math:`\Phi(x)` is the Cumulative Distribution Function for
54
+ Gaussian Distribution.
55
+
56
+ Shape:
57
+ - Input: :math:`(N, *)` where `*` means, any number of additional
58
+ dimensions
59
+ - Output: :math:`(N, *)`, same shape as the input
60
+
61
+ .. image:: scripts/activation_images/GELU.png
62
+
63
+ Examples::
64
+
65
+ >>> m = nn.GELU()
66
+ >>> input = torch.randn(2)
67
+ >>> output = m(input)
68
+ """
69
+
70
+ def forward(self, input):
71
+ return F.gelu(input)
72
+
73
+
74
+ if (TORCH_VERSION == 'parrots'
75
+ or digit_version(TORCH_VERSION) < digit_version('1.4')):
76
+ ACTIVATION_LAYERS.register_module(module=GELU)
77
+ else:
78
+ ACTIVATION_LAYERS.register_module(module=nn.GELU)
79
+
80
+
81
+ def build_activation_layer(cfg):
82
+ """Build activation layer.
83
+
84
+ Args:
85
+ cfg (dict): The activation layer config, which should contain:
86
+ - type (str): Layer type.
87
+ - layer args: Args needed to instantiate an activation layer.
88
+
89
+ Returns:
90
+ nn.Module: Created activation layer.
91
+ """
92
+ return build_from_cfg(cfg, ACTIVATION_LAYERS)
CCEdit-main/src/controlnet11/annotator/uniformer/mmcv/cnn/bricks/context_block.py ADDED
@@ -0,0 +1,125 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) OpenMMLab. All rights reserved.
2
+ import torch
3
+ from torch import nn
4
+
5
+ from ..utils import constant_init, kaiming_init
6
+ from .registry import PLUGIN_LAYERS
7
+
8
+
9
+ def last_zero_init(m):
10
+ if isinstance(m, nn.Sequential):
11
+ constant_init(m[-1], val=0)
12
+ else:
13
+ constant_init(m, val=0)
14
+
15
+
16
+ @PLUGIN_LAYERS.register_module()
17
+ class ContextBlock(nn.Module):
18
+ """ContextBlock module in GCNet.
19
+
20
+ See 'GCNet: Non-local Networks Meet Squeeze-Excitation Networks and Beyond'
21
+ (https://arxiv.org/abs/1904.11492) for details.
22
+
23
+ Args:
24
+ in_channels (int): Channels of the input feature map.
25
+ ratio (float): Ratio of channels of transform bottleneck
26
+ pooling_type (str): Pooling method for context modeling.
27
+ Options are 'att' and 'avg', stand for attention pooling and
28
+ average pooling respectively. Default: 'att'.
29
+ fusion_types (Sequence[str]): Fusion method for feature fusion,
30
+ Options are 'channels_add', 'channel_mul', stand for channelwise
31
+ addition and multiplication respectively. Default: ('channel_add',)
32
+ """
33
+
34
+ _abbr_ = 'context_block'
35
+
36
+ def __init__(self,
37
+ in_channels,
38
+ ratio,
39
+ pooling_type='att',
40
+ fusion_types=('channel_add', )):
41
+ super(ContextBlock, self).__init__()
42
+ assert pooling_type in ['avg', 'att']
43
+ assert isinstance(fusion_types, (list, tuple))
44
+ valid_fusion_types = ['channel_add', 'channel_mul']
45
+ assert all([f in valid_fusion_types for f in fusion_types])
46
+ assert len(fusion_types) > 0, 'at least one fusion should be used'
47
+ self.in_channels = in_channels
48
+ self.ratio = ratio
49
+ self.planes = int(in_channels * ratio)
50
+ self.pooling_type = pooling_type
51
+ self.fusion_types = fusion_types
52
+ if pooling_type == 'att':
53
+ self.conv_mask = nn.Conv2d(in_channels, 1, kernel_size=1)
54
+ self.softmax = nn.Softmax(dim=2)
55
+ else:
56
+ self.avg_pool = nn.AdaptiveAvgPool2d(1)
57
+ if 'channel_add' in fusion_types:
58
+ self.channel_add_conv = nn.Sequential(
59
+ nn.Conv2d(self.in_channels, self.planes, kernel_size=1),
60
+ nn.LayerNorm([self.planes, 1, 1]),
61
+ nn.ReLU(inplace=True), # yapf: disable
62
+ nn.Conv2d(self.planes, self.in_channels, kernel_size=1))
63
+ else:
64
+ self.channel_add_conv = None
65
+ if 'channel_mul' in fusion_types:
66
+ self.channel_mul_conv = nn.Sequential(
67
+ nn.Conv2d(self.in_channels, self.planes, kernel_size=1),
68
+ nn.LayerNorm([self.planes, 1, 1]),
69
+ nn.ReLU(inplace=True), # yapf: disable
70
+ nn.Conv2d(self.planes, self.in_channels, kernel_size=1))
71
+ else:
72
+ self.channel_mul_conv = None
73
+ self.reset_parameters()
74
+
75
+ def reset_parameters(self):
76
+ if self.pooling_type == 'att':
77
+ kaiming_init(self.conv_mask, mode='fan_in')
78
+ self.conv_mask.inited = True
79
+
80
+ if self.channel_add_conv is not None:
81
+ last_zero_init(self.channel_add_conv)
82
+ if self.channel_mul_conv is not None:
83
+ last_zero_init(self.channel_mul_conv)
84
+
85
+ def spatial_pool(self, x):
86
+ batch, channel, height, width = x.size()
87
+ if self.pooling_type == 'att':
88
+ input_x = x
89
+ # [N, C, H * W]
90
+ input_x = input_x.view(batch, channel, height * width)
91
+ # [N, 1, C, H * W]
92
+ input_x = input_x.unsqueeze(1)
93
+ # [N, 1, H, W]
94
+ context_mask = self.conv_mask(x)
95
+ # [N, 1, H * W]
96
+ context_mask = context_mask.view(batch, 1, height * width)
97
+ # [N, 1, H * W]
98
+ context_mask = self.softmax(context_mask)
99
+ # [N, 1, H * W, 1]
100
+ context_mask = context_mask.unsqueeze(-1)
101
+ # [N, 1, C, 1]
102
+ context = torch.matmul(input_x, context_mask)
103
+ # [N, C, 1, 1]
104
+ context = context.view(batch, channel, 1, 1)
105
+ else:
106
+ # [N, C, 1, 1]
107
+ context = self.avg_pool(x)
108
+
109
+ return context
110
+
111
+ def forward(self, x):
112
+ # [N, C, 1, 1]
113
+ context = self.spatial_pool(x)
114
+
115
+ out = x
116
+ if self.channel_mul_conv is not None:
117
+ # [N, C, 1, 1]
118
+ channel_mul_term = torch.sigmoid(self.channel_mul_conv(context))
119
+ out = out * channel_mul_term
120
+ if self.channel_add_conv is not None:
121
+ # [N, C, 1, 1]
122
+ channel_add_term = self.channel_add_conv(context)
123
+ out = out + channel_add_term
124
+
125
+ return out
CCEdit-main/src/controlnet11/annotator/uniformer/mmcv/cnn/bricks/conv.py ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) OpenMMLab. All rights reserved.
2
+ from torch import nn
3
+
4
+ from .registry import CONV_LAYERS
5
+
6
+ CONV_LAYERS.register_module('Conv1d', module=nn.Conv1d)
7
+ CONV_LAYERS.register_module('Conv2d', module=nn.Conv2d)
8
+ CONV_LAYERS.register_module('Conv3d', module=nn.Conv3d)
9
+ CONV_LAYERS.register_module('Conv', module=nn.Conv2d)
10
+
11
+
12
+ def build_conv_layer(cfg, *args, **kwargs):
13
+ """Build convolution layer.
14
+
15
+ Args:
16
+ cfg (None or dict): The conv layer config, which should contain:
17
+ - type (str): Layer type.
18
+ - layer args: Args needed to instantiate an conv layer.
19
+ args (argument list): Arguments passed to the `__init__`
20
+ method of the corresponding conv layer.
21
+ kwargs (keyword arguments): Keyword arguments passed to the `__init__`
22
+ method of the corresponding conv layer.
23
+
24
+ Returns:
25
+ nn.Module: Created conv layer.
26
+ """
27
+ if cfg is None:
28
+ cfg_ = dict(type='Conv2d')
29
+ else:
30
+ if not isinstance(cfg, dict):
31
+ raise TypeError('cfg must be a dict')
32
+ if 'type' not in cfg:
33
+ raise KeyError('the cfg dict must contain the key "type"')
34
+ cfg_ = cfg.copy()
35
+
36
+ layer_type = cfg_.pop('type')
37
+ if layer_type not in CONV_LAYERS:
38
+ raise KeyError(f'Unrecognized norm type {layer_type}')
39
+ else:
40
+ conv_layer = CONV_LAYERS.get(layer_type)
41
+
42
+ layer = conv_layer(*args, **kwargs, **cfg_)
43
+
44
+ return layer
CCEdit-main/src/controlnet11/annotator/uniformer/mmcv/cnn/bricks/conv2d_adaptive_padding.py ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) OpenMMLab. All rights reserved.
2
+ import math
3
+
4
+ from torch import nn
5
+ from torch.nn import functional as F
6
+
7
+ from .registry import CONV_LAYERS
8
+
9
+
10
+ @CONV_LAYERS.register_module()
11
+ class Conv2dAdaptivePadding(nn.Conv2d):
12
+ """Implementation of 2D convolution in tensorflow with `padding` as "same",
13
+ which applies padding to input (if needed) so that input image gets fully
14
+ covered by filter and stride you specified. For stride 1, this will ensure
15
+ that output image size is same as input. For stride of 2, output dimensions
16
+ will be half, for example.
17
+
18
+ Args:
19
+ in_channels (int): Number of channels in the input image
20
+ out_channels (int): Number of channels produced by the convolution
21
+ kernel_size (int or tuple): Size of the convolving kernel
22
+ stride (int or tuple, optional): Stride of the convolution. Default: 1
23
+ padding (int or tuple, optional): Zero-padding added to both sides of
24
+ the input. Default: 0
25
+ dilation (int or tuple, optional): Spacing between kernel elements.
26
+ Default: 1
27
+ groups (int, optional): Number of blocked connections from input
28
+ channels to output channels. Default: 1
29
+ bias (bool, optional): If ``True``, adds a learnable bias to the
30
+ output. Default: ``True``
31
+ """
32
+
33
+ def __init__(self,
34
+ in_channels,
35
+ out_channels,
36
+ kernel_size,
37
+ stride=1,
38
+ padding=0,
39
+ dilation=1,
40
+ groups=1,
41
+ bias=True):
42
+ super().__init__(in_channels, out_channels, kernel_size, stride, 0,
43
+ dilation, groups, bias)
44
+
45
+ def forward(self, x):
46
+ img_h, img_w = x.size()[-2:]
47
+ kernel_h, kernel_w = self.weight.size()[-2:]
48
+ stride_h, stride_w = self.stride
49
+ output_h = math.ceil(img_h / stride_h)
50
+ output_w = math.ceil(img_w / stride_w)
51
+ pad_h = (
52
+ max((output_h - 1) * self.stride[0] +
53
+ (kernel_h - 1) * self.dilation[0] + 1 - img_h, 0))
54
+ pad_w = (
55
+ max((output_w - 1) * self.stride[1] +
56
+ (kernel_w - 1) * self.dilation[1] + 1 - img_w, 0))
57
+ if pad_h > 0 or pad_w > 0:
58
+ x = F.pad(x, [
59
+ pad_w // 2, pad_w - pad_w // 2, pad_h // 2, pad_h - pad_h // 2
60
+ ])
61
+ return F.conv2d(x, self.weight, self.bias, self.stride, self.padding,
62
+ self.dilation, self.groups)
CCEdit-main/src/controlnet11/annotator/uniformer/mmcv/cnn/bricks/conv_module.py ADDED
@@ -0,0 +1,206 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) OpenMMLab. All rights reserved.
2
+ import warnings
3
+
4
+ import torch.nn as nn
5
+
6
+ from annotator.uniformer.mmcv.utils import _BatchNorm, _InstanceNorm
7
+ from ..utils import constant_init, kaiming_init
8
+ from .activation import build_activation_layer
9
+ from .conv import build_conv_layer
10
+ from .norm import build_norm_layer
11
+ from .padding import build_padding_layer
12
+ from .registry import PLUGIN_LAYERS
13
+
14
+
15
+ @PLUGIN_LAYERS.register_module()
16
+ class ConvModule(nn.Module):
17
+ """A conv block that bundles conv/norm/activation layers.
18
+
19
+ This block simplifies the usage of convolution layers, which are commonly
20
+ used with a norm layer (e.g., BatchNorm) and activation layer (e.g., ReLU).
21
+ It is based upon three build methods: `build_conv_layer()`,
22
+ `build_norm_layer()` and `build_activation_layer()`.
23
+
24
+ Besides, we add some additional features in this module.
25
+ 1. Automatically set `bias` of the conv layer.
26
+ 2. Spectral norm is supported.
27
+ 3. More padding modes are supported. Before PyTorch 1.5, nn.Conv2d only
28
+ supports zero and circular padding, and we add "reflect" padding mode.
29
+
30
+ Args:
31
+ in_channels (int): Number of channels in the input feature map.
32
+ Same as that in ``nn._ConvNd``.
33
+ out_channels (int): Number of channels produced by the convolution.
34
+ Same as that in ``nn._ConvNd``.
35
+ kernel_size (int | tuple[int]): Size of the convolving kernel.
36
+ Same as that in ``nn._ConvNd``.
37
+ stride (int | tuple[int]): Stride of the convolution.
38
+ Same as that in ``nn._ConvNd``.
39
+ padding (int | tuple[int]): Zero-padding added to both sides of
40
+ the input. Same as that in ``nn._ConvNd``.
41
+ dilation (int | tuple[int]): Spacing between kernel elements.
42
+ Same as that in ``nn._ConvNd``.
43
+ groups (int): Number of blocked connections from input channels to
44
+ output channels. Same as that in ``nn._ConvNd``.
45
+ bias (bool | str): If specified as `auto`, it will be decided by the
46
+ norm_cfg. Bias will be set as True if `norm_cfg` is None, otherwise
47
+ False. Default: "auto".
48
+ conv_cfg (dict): Config dict for convolution layer. Default: None,
49
+ which means using conv2d.
50
+ norm_cfg (dict): Config dict for normalization layer. Default: None.
51
+ act_cfg (dict): Config dict for activation layer.
52
+ Default: dict(type='ReLU').
53
+ inplace (bool): Whether to use inplace mode for activation.
54
+ Default: True.
55
+ with_spectral_norm (bool): Whether use spectral norm in conv module.
56
+ Default: False.
57
+ padding_mode (str): If the `padding_mode` has not been supported by
58
+ current `Conv2d` in PyTorch, we will use our own padding layer
59
+ instead. Currently, we support ['zeros', 'circular'] with official
60
+ implementation and ['reflect'] with our own implementation.
61
+ Default: 'zeros'.
62
+ order (tuple[str]): The order of conv/norm/activation layers. It is a
63
+ sequence of "conv", "norm" and "act". Common examples are
64
+ ("conv", "norm", "act") and ("act", "conv", "norm").
65
+ Default: ('conv', 'norm', 'act').
66
+ """
67
+
68
+ _abbr_ = 'conv_block'
69
+
70
+ def __init__(self,
71
+ in_channels,
72
+ out_channels,
73
+ kernel_size,
74
+ stride=1,
75
+ padding=0,
76
+ dilation=1,
77
+ groups=1,
78
+ bias='auto',
79
+ conv_cfg=None,
80
+ norm_cfg=None,
81
+ act_cfg=dict(type='ReLU'),
82
+ inplace=True,
83
+ with_spectral_norm=False,
84
+ padding_mode='zeros',
85
+ order=('conv', 'norm', 'act')):
86
+ super(ConvModule, self).__init__()
87
+ assert conv_cfg is None or isinstance(conv_cfg, dict)
88
+ assert norm_cfg is None or isinstance(norm_cfg, dict)
89
+ assert act_cfg is None or isinstance(act_cfg, dict)
90
+ official_padding_mode = ['zeros', 'circular']
91
+ self.conv_cfg = conv_cfg
92
+ self.norm_cfg = norm_cfg
93
+ self.act_cfg = act_cfg
94
+ self.inplace = inplace
95
+ self.with_spectral_norm = with_spectral_norm
96
+ self.with_explicit_padding = padding_mode not in official_padding_mode
97
+ self.order = order
98
+ assert isinstance(self.order, tuple) and len(self.order) == 3
99
+ assert set(order) == set(['conv', 'norm', 'act'])
100
+
101
+ self.with_norm = norm_cfg is not None
102
+ self.with_activation = act_cfg is not None
103
+ # if the conv layer is before a norm layer, bias is unnecessary.
104
+ if bias == 'auto':
105
+ bias = not self.with_norm
106
+ self.with_bias = bias
107
+
108
+ if self.with_explicit_padding:
109
+ pad_cfg = dict(type=padding_mode)
110
+ self.padding_layer = build_padding_layer(pad_cfg, padding)
111
+
112
+ # reset padding to 0 for conv module
113
+ conv_padding = 0 if self.with_explicit_padding else padding
114
+ # build convolution layer
115
+ self.conv = build_conv_layer(
116
+ conv_cfg,
117
+ in_channels,
118
+ out_channels,
119
+ kernel_size,
120
+ stride=stride,
121
+ padding=conv_padding,
122
+ dilation=dilation,
123
+ groups=groups,
124
+ bias=bias)
125
+ # export the attributes of self.conv to a higher level for convenience
126
+ self.in_channels = self.conv.in_channels
127
+ self.out_channels = self.conv.out_channels
128
+ self.kernel_size = self.conv.kernel_size
129
+ self.stride = self.conv.stride
130
+ self.padding = padding
131
+ self.dilation = self.conv.dilation
132
+ self.transposed = self.conv.transposed
133
+ self.output_padding = self.conv.output_padding
134
+ self.groups = self.conv.groups
135
+
136
+ if self.with_spectral_norm:
137
+ self.conv = nn.utils.spectral_norm(self.conv)
138
+
139
+ # build normalization layers
140
+ if self.with_norm:
141
+ # norm layer is after conv layer
142
+ if order.index('norm') > order.index('conv'):
143
+ norm_channels = out_channels
144
+ else:
145
+ norm_channels = in_channels
146
+ self.norm_name, norm = build_norm_layer(norm_cfg, norm_channels)
147
+ self.add_module(self.norm_name, norm)
148
+ if self.with_bias:
149
+ if isinstance(norm, (_BatchNorm, _InstanceNorm)):
150
+ warnings.warn(
151
+ 'Unnecessary conv bias before batch/instance norm')
152
+ else:
153
+ self.norm_name = None
154
+
155
+ # build activation layer
156
+ if self.with_activation:
157
+ act_cfg_ = act_cfg.copy()
158
+ # nn.Tanh has no 'inplace' argument
159
+ if act_cfg_['type'] not in [
160
+ 'Tanh', 'PReLU', 'Sigmoid', 'HSigmoid', 'Swish'
161
+ ]:
162
+ act_cfg_.setdefault('inplace', inplace)
163
+ self.activate = build_activation_layer(act_cfg_)
164
+
165
+ # Use msra init by default
166
+ self.init_weights()
167
+
168
+ @property
169
+ def norm(self):
170
+ if self.norm_name:
171
+ return getattr(self, self.norm_name)
172
+ else:
173
+ return None
174
+
175
+ def init_weights(self):
176
+ # 1. It is mainly for customized conv layers with their own
177
+ # initialization manners by calling their own ``init_weights()``,
178
+ # and we do not want ConvModule to override the initialization.
179
+ # 2. For customized conv layers without their own initialization
180
+ # manners (that is, they don't have their own ``init_weights()``)
181
+ # and PyTorch's conv layers, they will be initialized by
182
+ # this method with default ``kaiming_init``.
183
+ # Note: For PyTorch's conv layers, they will be overwritten by our
184
+ # initialization implementation using default ``kaiming_init``.
185
+ if not hasattr(self.conv, 'init_weights'):
186
+ if self.with_activation and self.act_cfg['type'] == 'LeakyReLU':
187
+ nonlinearity = 'leaky_relu'
188
+ a = self.act_cfg.get('negative_slope', 0.01)
189
+ else:
190
+ nonlinearity = 'relu'
191
+ a = 0
192
+ kaiming_init(self.conv, a=a, nonlinearity=nonlinearity)
193
+ if self.with_norm:
194
+ constant_init(self.norm, 1, bias=0)
195
+
196
+ def forward(self, x, activate=True, norm=True):
197
+ for layer in self.order:
198
+ if layer == 'conv':
199
+ if self.with_explicit_padding:
200
+ x = self.padding_layer(x)
201
+ x = self.conv(x)
202
+ elif layer == 'norm' and norm and self.with_norm:
203
+ x = self.norm(x)
204
+ elif layer == 'act' and activate and self.with_activation:
205
+ x = self.activate(x)
206
+ return x
CCEdit-main/src/controlnet11/annotator/uniformer/mmcv/cnn/bricks/conv_ws.py ADDED
@@ -0,0 +1,148 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) OpenMMLab. All rights reserved.
2
+ import torch
3
+ import torch.nn as nn
4
+ import torch.nn.functional as F
5
+
6
+ from .registry import CONV_LAYERS
7
+
8
+
9
+ def conv_ws_2d(input,
10
+ weight,
11
+ bias=None,
12
+ stride=1,
13
+ padding=0,
14
+ dilation=1,
15
+ groups=1,
16
+ eps=1e-5):
17
+ c_in = weight.size(0)
18
+ weight_flat = weight.view(c_in, -1)
19
+ mean = weight_flat.mean(dim=1, keepdim=True).view(c_in, 1, 1, 1)
20
+ std = weight_flat.std(dim=1, keepdim=True).view(c_in, 1, 1, 1)
21
+ weight = (weight - mean) / (std + eps)
22
+ return F.conv2d(input, weight, bias, stride, padding, dilation, groups)
23
+
24
+
25
+ @CONV_LAYERS.register_module('ConvWS')
26
+ class ConvWS2d(nn.Conv2d):
27
+
28
+ def __init__(self,
29
+ in_channels,
30
+ out_channels,
31
+ kernel_size,
32
+ stride=1,
33
+ padding=0,
34
+ dilation=1,
35
+ groups=1,
36
+ bias=True,
37
+ eps=1e-5):
38
+ super(ConvWS2d, self).__init__(
39
+ in_channels,
40
+ out_channels,
41
+ kernel_size,
42
+ stride=stride,
43
+ padding=padding,
44
+ dilation=dilation,
45
+ groups=groups,
46
+ bias=bias)
47
+ self.eps = eps
48
+
49
+ def forward(self, x):
50
+ return conv_ws_2d(x, self.weight, self.bias, self.stride, self.padding,
51
+ self.dilation, self.groups, self.eps)
52
+
53
+
54
+ @CONV_LAYERS.register_module(name='ConvAWS')
55
+ class ConvAWS2d(nn.Conv2d):
56
+ """AWS (Adaptive Weight Standardization)
57
+
58
+ This is a variant of Weight Standardization
59
+ (https://arxiv.org/pdf/1903.10520.pdf)
60
+ It is used in DetectoRS to avoid NaN
61
+ (https://arxiv.org/pdf/2006.02334.pdf)
62
+
63
+ Args:
64
+ in_channels (int): Number of channels in the input image
65
+ out_channels (int): Number of channels produced by the convolution
66
+ kernel_size (int or tuple): Size of the conv kernel
67
+ stride (int or tuple, optional): Stride of the convolution. Default: 1
68
+ padding (int or tuple, optional): Zero-padding added to both sides of
69
+ the input. Default: 0
70
+ dilation (int or tuple, optional): Spacing between kernel elements.
71
+ Default: 1
72
+ groups (int, optional): Number of blocked connections from input
73
+ channels to output channels. Default: 1
74
+ bias (bool, optional): If set True, adds a learnable bias to the
75
+ output. Default: True
76
+ """
77
+
78
+ def __init__(self,
79
+ in_channels,
80
+ out_channels,
81
+ kernel_size,
82
+ stride=1,
83
+ padding=0,
84
+ dilation=1,
85
+ groups=1,
86
+ bias=True):
87
+ super().__init__(
88
+ in_channels,
89
+ out_channels,
90
+ kernel_size,
91
+ stride=stride,
92
+ padding=padding,
93
+ dilation=dilation,
94
+ groups=groups,
95
+ bias=bias)
96
+ self.register_buffer('weight_gamma',
97
+ torch.ones(self.out_channels, 1, 1, 1))
98
+ self.register_buffer('weight_beta',
99
+ torch.zeros(self.out_channels, 1, 1, 1))
100
+
101
+ def _get_weight(self, weight):
102
+ weight_flat = weight.view(weight.size(0), -1)
103
+ mean = weight_flat.mean(dim=1).view(-1, 1, 1, 1)
104
+ std = torch.sqrt(weight_flat.var(dim=1) + 1e-5).view(-1, 1, 1, 1)
105
+ weight = (weight - mean) / std
106
+ weight = self.weight_gamma * weight + self.weight_beta
107
+ return weight
108
+
109
+ def forward(self, x):
110
+ weight = self._get_weight(self.weight)
111
+ return F.conv2d(x, weight, self.bias, self.stride, self.padding,
112
+ self.dilation, self.groups)
113
+
114
+ def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict,
115
+ missing_keys, unexpected_keys, error_msgs):
116
+ """Override default load function.
117
+
118
+ AWS overrides the function _load_from_state_dict to recover
119
+ weight_gamma and weight_beta if they are missing. If weight_gamma and
120
+ weight_beta are found in the checkpoint, this function will return
121
+ after super()._load_from_state_dict. Otherwise, it will compute the
122
+ mean and std of the pretrained weights and store them in weight_beta
123
+ and weight_gamma.
124
+ """
125
+
126
+ self.weight_gamma.data.fill_(-1)
127
+ local_missing_keys = []
128
+ super()._load_from_state_dict(state_dict, prefix, local_metadata,
129
+ strict, local_missing_keys,
130
+ unexpected_keys, error_msgs)
131
+ if self.weight_gamma.data.mean() > 0:
132
+ for k in local_missing_keys:
133
+ missing_keys.append(k)
134
+ return
135
+ weight = self.weight.data
136
+ weight_flat = weight.view(weight.size(0), -1)
137
+ mean = weight_flat.mean(dim=1).view(-1, 1, 1, 1)
138
+ std = torch.sqrt(weight_flat.var(dim=1) + 1e-5).view(-1, 1, 1, 1)
139
+ self.weight_beta.data.copy_(mean)
140
+ self.weight_gamma.data.copy_(std)
141
+ missing_gamma_beta = [
142
+ k for k in local_missing_keys
143
+ if k.endswith('weight_gamma') or k.endswith('weight_beta')
144
+ ]
145
+ for k in missing_gamma_beta:
146
+ local_missing_keys.remove(k)
147
+ for k in local_missing_keys:
148
+ missing_keys.append(k)
CCEdit-main/src/controlnet11/annotator/uniformer/mmcv/cnn/bricks/depthwise_separable_conv_module.py ADDED
@@ -0,0 +1,96 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) OpenMMLab. All rights reserved.
2
+ import torch.nn as nn
3
+
4
+ from .conv_module import ConvModule
5
+
6
+
7
+ class DepthwiseSeparableConvModule(nn.Module):
8
+ """Depthwise separable convolution module.
9
+
10
+ See https://arxiv.org/pdf/1704.04861.pdf for details.
11
+
12
+ This module can replace a ConvModule with the conv block replaced by two
13
+ conv block: depthwise conv block and pointwise conv block. The depthwise
14
+ conv block contains depthwise-conv/norm/activation layers. The pointwise
15
+ conv block contains pointwise-conv/norm/activation layers. It should be
16
+ noted that there will be norm/activation layer in the depthwise conv block
17
+ if `norm_cfg` and `act_cfg` are specified.
18
+
19
+ Args:
20
+ in_channels (int): Number of channels in the input feature map.
21
+ Same as that in ``nn._ConvNd``.
22
+ out_channels (int): Number of channels produced by the convolution.
23
+ Same as that in ``nn._ConvNd``.
24
+ kernel_size (int | tuple[int]): Size of the convolving kernel.
25
+ Same as that in ``nn._ConvNd``.
26
+ stride (int | tuple[int]): Stride of the convolution.
27
+ Same as that in ``nn._ConvNd``. Default: 1.
28
+ padding (int | tuple[int]): Zero-padding added to both sides of
29
+ the input. Same as that in ``nn._ConvNd``. Default: 0.
30
+ dilation (int | tuple[int]): Spacing between kernel elements.
31
+ Same as that in ``nn._ConvNd``. Default: 1.
32
+ norm_cfg (dict): Default norm config for both depthwise ConvModule and
33
+ pointwise ConvModule. Default: None.
34
+ act_cfg (dict): Default activation config for both depthwise ConvModule
35
+ and pointwise ConvModule. Default: dict(type='ReLU').
36
+ dw_norm_cfg (dict): Norm config of depthwise ConvModule. If it is
37
+ 'default', it will be the same as `norm_cfg`. Default: 'default'.
38
+ dw_act_cfg (dict): Activation config of depthwise ConvModule. If it is
39
+ 'default', it will be the same as `act_cfg`. Default: 'default'.
40
+ pw_norm_cfg (dict): Norm config of pointwise ConvModule. If it is
41
+ 'default', it will be the same as `norm_cfg`. Default: 'default'.
42
+ pw_act_cfg (dict): Activation config of pointwise ConvModule. If it is
43
+ 'default', it will be the same as `act_cfg`. Default: 'default'.
44
+ kwargs (optional): Other shared arguments for depthwise and pointwise
45
+ ConvModule. See ConvModule for ref.
46
+ """
47
+
48
+ def __init__(self,
49
+ in_channels,
50
+ out_channels,
51
+ kernel_size,
52
+ stride=1,
53
+ padding=0,
54
+ dilation=1,
55
+ norm_cfg=None,
56
+ act_cfg=dict(type='ReLU'),
57
+ dw_norm_cfg='default',
58
+ dw_act_cfg='default',
59
+ pw_norm_cfg='default',
60
+ pw_act_cfg='default',
61
+ **kwargs):
62
+ super(DepthwiseSeparableConvModule, self).__init__()
63
+ assert 'groups' not in kwargs, 'groups should not be specified'
64
+
65
+ # if norm/activation config of depthwise/pointwise ConvModule is not
66
+ # specified, use default config.
67
+ dw_norm_cfg = dw_norm_cfg if dw_norm_cfg != 'default' else norm_cfg
68
+ dw_act_cfg = dw_act_cfg if dw_act_cfg != 'default' else act_cfg
69
+ pw_norm_cfg = pw_norm_cfg if pw_norm_cfg != 'default' else norm_cfg
70
+ pw_act_cfg = pw_act_cfg if pw_act_cfg != 'default' else act_cfg
71
+
72
+ # depthwise convolution
73
+ self.depthwise_conv = ConvModule(
74
+ in_channels,
75
+ in_channels,
76
+ kernel_size,
77
+ stride=stride,
78
+ padding=padding,
79
+ dilation=dilation,
80
+ groups=in_channels,
81
+ norm_cfg=dw_norm_cfg,
82
+ act_cfg=dw_act_cfg,
83
+ **kwargs)
84
+
85
+ self.pointwise_conv = ConvModule(
86
+ in_channels,
87
+ out_channels,
88
+ 1,
89
+ norm_cfg=pw_norm_cfg,
90
+ act_cfg=pw_act_cfg,
91
+ **kwargs)
92
+
93
+ def forward(self, x):
94
+ x = self.depthwise_conv(x)
95
+ x = self.pointwise_conv(x)
96
+ return x
CCEdit-main/src/controlnet11/annotator/uniformer/mmcv/cnn/bricks/drop.py ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) OpenMMLab. All rights reserved.
2
+ import torch
3
+ import torch.nn as nn
4
+
5
+ from annotator.uniformer.mmcv import build_from_cfg
6
+ from .registry import DROPOUT_LAYERS
7
+
8
+
9
+ def drop_path(x, drop_prob=0., training=False):
10
+ """Drop paths (Stochastic Depth) per sample (when applied in main path of
11
+ residual blocks).
12
+
13
+ We follow the implementation
14
+ https://github.com/rwightman/pytorch-image-models/blob/a2727c1bf78ba0d7b5727f5f95e37fb7f8866b1f/timm/models/layers/drop.py # noqa: E501
15
+ """
16
+ if drop_prob == 0. or not training:
17
+ return x
18
+ keep_prob = 1 - drop_prob
19
+ # handle tensors with different dimensions, not just 4D tensors.
20
+ shape = (x.shape[0], ) + (1, ) * (x.ndim - 1)
21
+ random_tensor = keep_prob + torch.rand(
22
+ shape, dtype=x.dtype, device=x.device)
23
+ output = x.div(keep_prob) * random_tensor.floor()
24
+ return output
25
+
26
+
27
+ @DROPOUT_LAYERS.register_module()
28
+ class DropPath(nn.Module):
29
+ """Drop paths (Stochastic Depth) per sample (when applied in main path of
30
+ residual blocks).
31
+
32
+ We follow the implementation
33
+ https://github.com/rwightman/pytorch-image-models/blob/a2727c1bf78ba0d7b5727f5f95e37fb7f8866b1f/timm/models/layers/drop.py # noqa: E501
34
+
35
+ Args:
36
+ drop_prob (float): Probability of the path to be zeroed. Default: 0.1
37
+ """
38
+
39
+ def __init__(self, drop_prob=0.1):
40
+ super(DropPath, self).__init__()
41
+ self.drop_prob = drop_prob
42
+
43
+ def forward(self, x):
44
+ return drop_path(x, self.drop_prob, self.training)
45
+
46
+
47
+ @DROPOUT_LAYERS.register_module()
48
+ class Dropout(nn.Dropout):
49
+ """A wrapper for ``torch.nn.Dropout``, We rename the ``p`` of
50
+ ``torch.nn.Dropout`` to ``drop_prob`` so as to be consistent with
51
+ ``DropPath``
52
+
53
+ Args:
54
+ drop_prob (float): Probability of the elements to be
55
+ zeroed. Default: 0.5.
56
+ inplace (bool): Do the operation inplace or not. Default: False.
57
+ """
58
+
59
+ def __init__(self, drop_prob=0.5, inplace=False):
60
+ super().__init__(p=drop_prob, inplace=inplace)
61
+
62
+
63
+ def build_dropout(cfg, default_args=None):
64
+ """Builder for drop out layers."""
65
+ return build_from_cfg(cfg, DROPOUT_LAYERS, default_args)
CCEdit-main/src/controlnet11/annotator/uniformer/mmcv/cnn/bricks/generalized_attention.py ADDED
@@ -0,0 +1,412 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) OpenMMLab. All rights reserved.
2
+ import math
3
+
4
+ import numpy as np
5
+ import torch
6
+ import torch.nn as nn
7
+ import torch.nn.functional as F
8
+
9
+ from ..utils import kaiming_init
10
+ from .registry import PLUGIN_LAYERS
11
+
12
+
13
+ @PLUGIN_LAYERS.register_module()
14
+ class GeneralizedAttention(nn.Module):
15
+ """GeneralizedAttention module.
16
+
17
+ See 'An Empirical Study of Spatial Attention Mechanisms in Deep Networks'
18
+ (https://arxiv.org/abs/1711.07971) for details.
19
+
20
+ Args:
21
+ in_channels (int): Channels of the input feature map.
22
+ spatial_range (int): The spatial range. -1 indicates no spatial range
23
+ constraint. Default: -1.
24
+ num_heads (int): The head number of empirical_attention module.
25
+ Default: 9.
26
+ position_embedding_dim (int): The position embedding dimension.
27
+ Default: -1.
28
+ position_magnitude (int): A multiplier acting on coord difference.
29
+ Default: 1.
30
+ kv_stride (int): The feature stride acting on key/value feature map.
31
+ Default: 2.
32
+ q_stride (int): The feature stride acting on query feature map.
33
+ Default: 1.
34
+ attention_type (str): A binary indicator string for indicating which
35
+ items in generalized empirical_attention module are used.
36
+ Default: '1111'.
37
+
38
+ - '1000' indicates 'query and key content' (appr - appr) item,
39
+ - '0100' indicates 'query content and relative position'
40
+ (appr - position) item,
41
+ - '0010' indicates 'key content only' (bias - appr) item,
42
+ - '0001' indicates 'relative position only' (bias - position) item.
43
+ """
44
+
45
+ _abbr_ = 'gen_attention_block'
46
+
47
+ def __init__(self,
48
+ in_channels,
49
+ spatial_range=-1,
50
+ num_heads=9,
51
+ position_embedding_dim=-1,
52
+ position_magnitude=1,
53
+ kv_stride=2,
54
+ q_stride=1,
55
+ attention_type='1111'):
56
+
57
+ super(GeneralizedAttention, self).__init__()
58
+
59
+ # hard range means local range for non-local operation
60
+ self.position_embedding_dim = (
61
+ position_embedding_dim
62
+ if position_embedding_dim > 0 else in_channels)
63
+
64
+ self.position_magnitude = position_magnitude
65
+ self.num_heads = num_heads
66
+ self.in_channels = in_channels
67
+ self.spatial_range = spatial_range
68
+ self.kv_stride = kv_stride
69
+ self.q_stride = q_stride
70
+ self.attention_type = [bool(int(_)) for _ in attention_type]
71
+ self.qk_embed_dim = in_channels // num_heads
72
+ out_c = self.qk_embed_dim * num_heads
73
+
74
+ if self.attention_type[0] or self.attention_type[1]:
75
+ self.query_conv = nn.Conv2d(
76
+ in_channels=in_channels,
77
+ out_channels=out_c,
78
+ kernel_size=1,
79
+ bias=False)
80
+ self.query_conv.kaiming_init = True
81
+
82
+ if self.attention_type[0] or self.attention_type[2]:
83
+ self.key_conv = nn.Conv2d(
84
+ in_channels=in_channels,
85
+ out_channels=out_c,
86
+ kernel_size=1,
87
+ bias=False)
88
+ self.key_conv.kaiming_init = True
89
+
90
+ self.v_dim = in_channels // num_heads
91
+ self.value_conv = nn.Conv2d(
92
+ in_channels=in_channels,
93
+ out_channels=self.v_dim * num_heads,
94
+ kernel_size=1,
95
+ bias=False)
96
+ self.value_conv.kaiming_init = True
97
+
98
+ if self.attention_type[1] or self.attention_type[3]:
99
+ self.appr_geom_fc_x = nn.Linear(
100
+ self.position_embedding_dim // 2, out_c, bias=False)
101
+ self.appr_geom_fc_x.kaiming_init = True
102
+
103
+ self.appr_geom_fc_y = nn.Linear(
104
+ self.position_embedding_dim // 2, out_c, bias=False)
105
+ self.appr_geom_fc_y.kaiming_init = True
106
+
107
+ if self.attention_type[2]:
108
+ stdv = 1.0 / math.sqrt(self.qk_embed_dim * 2)
109
+ appr_bias_value = -2 * stdv * torch.rand(out_c) + stdv
110
+ self.appr_bias = nn.Parameter(appr_bias_value)
111
+
112
+ if self.attention_type[3]:
113
+ stdv = 1.0 / math.sqrt(self.qk_embed_dim * 2)
114
+ geom_bias_value = -2 * stdv * torch.rand(out_c) + stdv
115
+ self.geom_bias = nn.Parameter(geom_bias_value)
116
+
117
+ self.proj_conv = nn.Conv2d(
118
+ in_channels=self.v_dim * num_heads,
119
+ out_channels=in_channels,
120
+ kernel_size=1,
121
+ bias=True)
122
+ self.proj_conv.kaiming_init = True
123
+ self.gamma = nn.Parameter(torch.zeros(1))
124
+
125
+ if self.spatial_range >= 0:
126
+ # only works when non local is after 3*3 conv
127
+ if in_channels == 256:
128
+ max_len = 84
129
+ elif in_channels == 512:
130
+ max_len = 42
131
+
132
+ max_len_kv = int((max_len - 1.0) / self.kv_stride + 1)
133
+ local_constraint_map = np.ones(
134
+ (max_len, max_len, max_len_kv, max_len_kv), dtype=np.int)
135
+ for iy in range(max_len):
136
+ for ix in range(max_len):
137
+ local_constraint_map[
138
+ iy, ix,
139
+ max((iy - self.spatial_range) //
140
+ self.kv_stride, 0):min((iy + self.spatial_range +
141
+ 1) // self.kv_stride +
142
+ 1, max_len),
143
+ max((ix - self.spatial_range) //
144
+ self.kv_stride, 0):min((ix + self.spatial_range +
145
+ 1) // self.kv_stride +
146
+ 1, max_len)] = 0
147
+
148
+ self.local_constraint_map = nn.Parameter(
149
+ torch.from_numpy(local_constraint_map).byte(),
150
+ requires_grad=False)
151
+
152
+ if self.q_stride > 1:
153
+ self.q_downsample = nn.AvgPool2d(
154
+ kernel_size=1, stride=self.q_stride)
155
+ else:
156
+ self.q_downsample = None
157
+
158
+ if self.kv_stride > 1:
159
+ self.kv_downsample = nn.AvgPool2d(
160
+ kernel_size=1, stride=self.kv_stride)
161
+ else:
162
+ self.kv_downsample = None
163
+
164
+ self.init_weights()
165
+
166
+ def get_position_embedding(self,
167
+ h,
168
+ w,
169
+ h_kv,
170
+ w_kv,
171
+ q_stride,
172
+ kv_stride,
173
+ device,
174
+ dtype,
175
+ feat_dim,
176
+ wave_length=1000):
177
+ # the default type of Tensor is float32, leading to type mismatch
178
+ # in fp16 mode. Cast it to support fp16 mode.
179
+ h_idxs = torch.linspace(0, h - 1, h).to(device=device, dtype=dtype)
180
+ h_idxs = h_idxs.view((h, 1)) * q_stride
181
+
182
+ w_idxs = torch.linspace(0, w - 1, w).to(device=device, dtype=dtype)
183
+ w_idxs = w_idxs.view((w, 1)) * q_stride
184
+
185
+ h_kv_idxs = torch.linspace(0, h_kv - 1, h_kv).to(
186
+ device=device, dtype=dtype)
187
+ h_kv_idxs = h_kv_idxs.view((h_kv, 1)) * kv_stride
188
+
189
+ w_kv_idxs = torch.linspace(0, w_kv - 1, w_kv).to(
190
+ device=device, dtype=dtype)
191
+ w_kv_idxs = w_kv_idxs.view((w_kv, 1)) * kv_stride
192
+
193
+ # (h, h_kv, 1)
194
+ h_diff = h_idxs.unsqueeze(1) - h_kv_idxs.unsqueeze(0)
195
+ h_diff *= self.position_magnitude
196
+
197
+ # (w, w_kv, 1)
198
+ w_diff = w_idxs.unsqueeze(1) - w_kv_idxs.unsqueeze(0)
199
+ w_diff *= self.position_magnitude
200
+
201
+ feat_range = torch.arange(0, feat_dim / 4).to(
202
+ device=device, dtype=dtype)
203
+
204
+ dim_mat = torch.Tensor([wave_length]).to(device=device, dtype=dtype)
205
+ dim_mat = dim_mat**((4. / feat_dim) * feat_range)
206
+ dim_mat = dim_mat.view((1, 1, -1))
207
+
208
+ embedding_x = torch.cat(
209
+ ((w_diff / dim_mat).sin(), (w_diff / dim_mat).cos()), dim=2)
210
+
211
+ embedding_y = torch.cat(
212
+ ((h_diff / dim_mat).sin(), (h_diff / dim_mat).cos()), dim=2)
213
+
214
+ return embedding_x, embedding_y
215
+
216
+ def forward(self, x_input):
217
+ num_heads = self.num_heads
218
+
219
+ # use empirical_attention
220
+ if self.q_downsample is not None:
221
+ x_q = self.q_downsample(x_input)
222
+ else:
223
+ x_q = x_input
224
+ n, _, h, w = x_q.shape
225
+
226
+ if self.kv_downsample is not None:
227
+ x_kv = self.kv_downsample(x_input)
228
+ else:
229
+ x_kv = x_input
230
+ _, _, h_kv, w_kv = x_kv.shape
231
+
232
+ if self.attention_type[0] or self.attention_type[1]:
233
+ proj_query = self.query_conv(x_q).view(
234
+ (n, num_heads, self.qk_embed_dim, h * w))
235
+ proj_query = proj_query.permute(0, 1, 3, 2)
236
+
237
+ if self.attention_type[0] or self.attention_type[2]:
238
+ proj_key = self.key_conv(x_kv).view(
239
+ (n, num_heads, self.qk_embed_dim, h_kv * w_kv))
240
+
241
+ if self.attention_type[1] or self.attention_type[3]:
242
+ position_embed_x, position_embed_y = self.get_position_embedding(
243
+ h, w, h_kv, w_kv, self.q_stride, self.kv_stride,
244
+ x_input.device, x_input.dtype, self.position_embedding_dim)
245
+ # (n, num_heads, w, w_kv, dim)
246
+ position_feat_x = self.appr_geom_fc_x(position_embed_x).\
247
+ view(1, w, w_kv, num_heads, self.qk_embed_dim).\
248
+ permute(0, 3, 1, 2, 4).\
249
+ repeat(n, 1, 1, 1, 1)
250
+
251
+ # (n, num_heads, h, h_kv, dim)
252
+ position_feat_y = self.appr_geom_fc_y(position_embed_y).\
253
+ view(1, h, h_kv, num_heads, self.qk_embed_dim).\
254
+ permute(0, 3, 1, 2, 4).\
255
+ repeat(n, 1, 1, 1, 1)
256
+
257
+ position_feat_x /= math.sqrt(2)
258
+ position_feat_y /= math.sqrt(2)
259
+
260
+ # accelerate for saliency only
261
+ if (np.sum(self.attention_type) == 1) and self.attention_type[2]:
262
+ appr_bias = self.appr_bias.\
263
+ view(1, num_heads, 1, self.qk_embed_dim).\
264
+ repeat(n, 1, 1, 1)
265
+
266
+ energy = torch.matmul(appr_bias, proj_key).\
267
+ view(n, num_heads, 1, h_kv * w_kv)
268
+
269
+ h = 1
270
+ w = 1
271
+ else:
272
+ # (n, num_heads, h*w, h_kv*w_kv), query before key, 540mb for
273
+ if not self.attention_type[0]:
274
+ energy = torch.zeros(
275
+ n,
276
+ num_heads,
277
+ h,
278
+ w,
279
+ h_kv,
280
+ w_kv,
281
+ dtype=x_input.dtype,
282
+ device=x_input.device)
283
+
284
+ # attention_type[0]: appr - appr
285
+ # attention_type[1]: appr - position
286
+ # attention_type[2]: bias - appr
287
+ # attention_type[3]: bias - position
288
+ if self.attention_type[0] or self.attention_type[2]:
289
+ if self.attention_type[0] and self.attention_type[2]:
290
+ appr_bias = self.appr_bias.\
291
+ view(1, num_heads, 1, self.qk_embed_dim)
292
+ energy = torch.matmul(proj_query + appr_bias, proj_key).\
293
+ view(n, num_heads, h, w, h_kv, w_kv)
294
+
295
+ elif self.attention_type[0]:
296
+ energy = torch.matmul(proj_query, proj_key).\
297
+ view(n, num_heads, h, w, h_kv, w_kv)
298
+
299
+ elif self.attention_type[2]:
300
+ appr_bias = self.appr_bias.\
301
+ view(1, num_heads, 1, self.qk_embed_dim).\
302
+ repeat(n, 1, 1, 1)
303
+
304
+ energy += torch.matmul(appr_bias, proj_key).\
305
+ view(n, num_heads, 1, 1, h_kv, w_kv)
306
+
307
+ if self.attention_type[1] or self.attention_type[3]:
308
+ if self.attention_type[1] and self.attention_type[3]:
309
+ geom_bias = self.geom_bias.\
310
+ view(1, num_heads, 1, self.qk_embed_dim)
311
+
312
+ proj_query_reshape = (proj_query + geom_bias).\
313
+ view(n, num_heads, h, w, self.qk_embed_dim)
314
+
315
+ energy_x = torch.matmul(
316
+ proj_query_reshape.permute(0, 1, 3, 2, 4),
317
+ position_feat_x.permute(0, 1, 2, 4, 3))
318
+ energy_x = energy_x.\
319
+ permute(0, 1, 3, 2, 4).unsqueeze(4)
320
+
321
+ energy_y = torch.matmul(
322
+ proj_query_reshape,
323
+ position_feat_y.permute(0, 1, 2, 4, 3))
324
+ energy_y = energy_y.unsqueeze(5)
325
+
326
+ energy += energy_x + energy_y
327
+
328
+ elif self.attention_type[1]:
329
+ proj_query_reshape = proj_query.\
330
+ view(n, num_heads, h, w, self.qk_embed_dim)
331
+ proj_query_reshape = proj_query_reshape.\
332
+ permute(0, 1, 3, 2, 4)
333
+ position_feat_x_reshape = position_feat_x.\
334
+ permute(0, 1, 2, 4, 3)
335
+ position_feat_y_reshape = position_feat_y.\
336
+ permute(0, 1, 2, 4, 3)
337
+
338
+ energy_x = torch.matmul(proj_query_reshape,
339
+ position_feat_x_reshape)
340
+ energy_x = energy_x.permute(0, 1, 3, 2, 4).unsqueeze(4)
341
+
342
+ energy_y = torch.matmul(proj_query_reshape,
343
+ position_feat_y_reshape)
344
+ energy_y = energy_y.unsqueeze(5)
345
+
346
+ energy += energy_x + energy_y
347
+
348
+ elif self.attention_type[3]:
349
+ geom_bias = self.geom_bias.\
350
+ view(1, num_heads, self.qk_embed_dim, 1).\
351
+ repeat(n, 1, 1, 1)
352
+
353
+ position_feat_x_reshape = position_feat_x.\
354
+ view(n, num_heads, w*w_kv, self.qk_embed_dim)
355
+
356
+ position_feat_y_reshape = position_feat_y.\
357
+ view(n, num_heads, h * h_kv, self.qk_embed_dim)
358
+
359
+ energy_x = torch.matmul(position_feat_x_reshape, geom_bias)
360
+ energy_x = energy_x.view(n, num_heads, 1, w, 1, w_kv)
361
+
362
+ energy_y = torch.matmul(position_feat_y_reshape, geom_bias)
363
+ energy_y = energy_y.view(n, num_heads, h, 1, h_kv, 1)
364
+
365
+ energy += energy_x + energy_y
366
+
367
+ energy = energy.view(n, num_heads, h * w, h_kv * w_kv)
368
+
369
+ if self.spatial_range >= 0:
370
+ cur_local_constraint_map = \
371
+ self.local_constraint_map[:h, :w, :h_kv, :w_kv].\
372
+ contiguous().\
373
+ view(1, 1, h*w, h_kv*w_kv)
374
+
375
+ energy = energy.masked_fill_(cur_local_constraint_map,
376
+ float('-inf'))
377
+
378
+ attention = F.softmax(energy, 3)
379
+
380
+ proj_value = self.value_conv(x_kv)
381
+ proj_value_reshape = proj_value.\
382
+ view((n, num_heads, self.v_dim, h_kv * w_kv)).\
383
+ permute(0, 1, 3, 2)
384
+
385
+ out = torch.matmul(attention, proj_value_reshape).\
386
+ permute(0, 1, 3, 2).\
387
+ contiguous().\
388
+ view(n, self.v_dim * self.num_heads, h, w)
389
+
390
+ out = self.proj_conv(out)
391
+
392
+ # output is downsampled, upsample back to input size
393
+ if self.q_downsample is not None:
394
+ out = F.interpolate(
395
+ out,
396
+ size=x_input.shape[2:],
397
+ mode='bilinear',
398
+ align_corners=False)
399
+
400
+ out = self.gamma * out + x_input
401
+ return out
402
+
403
+ def init_weights(self):
404
+ for m in self.modules():
405
+ if hasattr(m, 'kaiming_init') and m.kaiming_init:
406
+ kaiming_init(
407
+ m,
408
+ mode='fan_in',
409
+ nonlinearity='leaky_relu',
410
+ bias=0,
411
+ distribution='uniform',
412
+ a=1)
CCEdit-main/src/controlnet11/annotator/uniformer/mmcv/cnn/bricks/hsigmoid.py ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) OpenMMLab. All rights reserved.
2
+ import torch.nn as nn
3
+
4
+ from .registry import ACTIVATION_LAYERS
5
+
6
+
7
+ @ACTIVATION_LAYERS.register_module()
8
+ class HSigmoid(nn.Module):
9
+ """Hard Sigmoid Module. Apply the hard sigmoid function:
10
+ Hsigmoid(x) = min(max((x + bias) / divisor, min_value), max_value)
11
+ Default: Hsigmoid(x) = min(max((x + 1) / 2, 0), 1)
12
+
13
+ Args:
14
+ bias (float): Bias of the input feature map. Default: 1.0.
15
+ divisor (float): Divisor of the input feature map. Default: 2.0.
16
+ min_value (float): Lower bound value. Default: 0.0.
17
+ max_value (float): Upper bound value. Default: 1.0.
18
+
19
+ Returns:
20
+ Tensor: The output tensor.
21
+ """
22
+
23
+ def __init__(self, bias=1.0, divisor=2.0, min_value=0.0, max_value=1.0):
24
+ super(HSigmoid, self).__init__()
25
+ self.bias = bias
26
+ self.divisor = divisor
27
+ assert self.divisor != 0
28
+ self.min_value = min_value
29
+ self.max_value = max_value
30
+
31
+ def forward(self, x):
32
+ x = (x + self.bias) / self.divisor
33
+
34
+ return x.clamp_(self.min_value, self.max_value)
CCEdit-main/src/controlnet11/annotator/uniformer/mmcv/cnn/bricks/hswish.py ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) OpenMMLab. All rights reserved.
2
+ import torch.nn as nn
3
+
4
+ from .registry import ACTIVATION_LAYERS
5
+
6
+
7
+ @ACTIVATION_LAYERS.register_module()
8
+ class HSwish(nn.Module):
9
+ """Hard Swish Module.
10
+
11
+ This module applies the hard swish function:
12
+
13
+ .. math::
14
+ Hswish(x) = x * ReLU6(x + 3) / 6
15
+
16
+ Args:
17
+ inplace (bool): can optionally do the operation in-place.
18
+ Default: False.
19
+
20
+ Returns:
21
+ Tensor: The output tensor.
22
+ """
23
+
24
+ def __init__(self, inplace=False):
25
+ super(HSwish, self).__init__()
26
+ self.act = nn.ReLU6(inplace)
27
+
28
+ def forward(self, x):
29
+ return x * self.act(x + 3) / 6
CCEdit-main/src/controlnet11/annotator/uniformer/mmcv/cnn/bricks/non_local.py ADDED
@@ -0,0 +1,306 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) OpenMMLab. All rights reserved.
2
+ from abc import ABCMeta
3
+
4
+ import torch
5
+ import torch.nn as nn
6
+
7
+ from ..utils import constant_init, normal_init
8
+ from .conv_module import ConvModule
9
+ from .registry import PLUGIN_LAYERS
10
+
11
+
12
+ class _NonLocalNd(nn.Module, metaclass=ABCMeta):
13
+ """Basic Non-local module.
14
+
15
+ This module is proposed in
16
+ "Non-local Neural Networks"
17
+ Paper reference: https://arxiv.org/abs/1711.07971
18
+ Code reference: https://github.com/AlexHex7/Non-local_pytorch
19
+
20
+ Args:
21
+ in_channels (int): Channels of the input feature map.
22
+ reduction (int): Channel reduction ratio. Default: 2.
23
+ use_scale (bool): Whether to scale pairwise_weight by
24
+ `1/sqrt(inter_channels)` when the mode is `embedded_gaussian`.
25
+ Default: True.
26
+ conv_cfg (None | dict): The config dict for convolution layers.
27
+ If not specified, it will use `nn.Conv2d` for convolution layers.
28
+ Default: None.
29
+ norm_cfg (None | dict): The config dict for normalization layers.
30
+ Default: None. (This parameter is only applicable to conv_out.)
31
+ mode (str): Options are `gaussian`, `concatenation`,
32
+ `embedded_gaussian` and `dot_product`. Default: embedded_gaussian.
33
+ """
34
+
35
+ def __init__(self,
36
+ in_channels,
37
+ reduction=2,
38
+ use_scale=True,
39
+ conv_cfg=None,
40
+ norm_cfg=None,
41
+ mode='embedded_gaussian',
42
+ **kwargs):
43
+ super(_NonLocalNd, self).__init__()
44
+ self.in_channels = in_channels
45
+ self.reduction = reduction
46
+ self.use_scale = use_scale
47
+ self.inter_channels = max(in_channels // reduction, 1)
48
+ self.mode = mode
49
+
50
+ if mode not in [
51
+ 'gaussian', 'embedded_gaussian', 'dot_product', 'concatenation'
52
+ ]:
53
+ raise ValueError("Mode should be in 'gaussian', 'concatenation', "
54
+ f"'embedded_gaussian' or 'dot_product', but got "
55
+ f'{mode} instead.')
56
+
57
+ # g, theta, phi are defaulted as `nn.ConvNd`.
58
+ # Here we use ConvModule for potential usage.
59
+ self.g = ConvModule(
60
+ self.in_channels,
61
+ self.inter_channels,
62
+ kernel_size=1,
63
+ conv_cfg=conv_cfg,
64
+ act_cfg=None)
65
+ self.conv_out = ConvModule(
66
+ self.inter_channels,
67
+ self.in_channels,
68
+ kernel_size=1,
69
+ conv_cfg=conv_cfg,
70
+ norm_cfg=norm_cfg,
71
+ act_cfg=None)
72
+
73
+ if self.mode != 'gaussian':
74
+ self.theta = ConvModule(
75
+ self.in_channels,
76
+ self.inter_channels,
77
+ kernel_size=1,
78
+ conv_cfg=conv_cfg,
79
+ act_cfg=None)
80
+ self.phi = ConvModule(
81
+ self.in_channels,
82
+ self.inter_channels,
83
+ kernel_size=1,
84
+ conv_cfg=conv_cfg,
85
+ act_cfg=None)
86
+
87
+ if self.mode == 'concatenation':
88
+ self.concat_project = ConvModule(
89
+ self.inter_channels * 2,
90
+ 1,
91
+ kernel_size=1,
92
+ stride=1,
93
+ padding=0,
94
+ bias=False,
95
+ act_cfg=dict(type='ReLU'))
96
+
97
+ self.init_weights(**kwargs)
98
+
99
+ def init_weights(self, std=0.01, zeros_init=True):
100
+ if self.mode != 'gaussian':
101
+ for m in [self.g, self.theta, self.phi]:
102
+ normal_init(m.conv, std=std)
103
+ else:
104
+ normal_init(self.g.conv, std=std)
105
+ if zeros_init:
106
+ if self.conv_out.norm_cfg is None:
107
+ constant_init(self.conv_out.conv, 0)
108
+ else:
109
+ constant_init(self.conv_out.norm, 0)
110
+ else:
111
+ if self.conv_out.norm_cfg is None:
112
+ normal_init(self.conv_out.conv, std=std)
113
+ else:
114
+ normal_init(self.conv_out.norm, std=std)
115
+
116
+ def gaussian(self, theta_x, phi_x):
117
+ # NonLocal1d pairwise_weight: [N, H, H]
118
+ # NonLocal2d pairwise_weight: [N, HxW, HxW]
119
+ # NonLocal3d pairwise_weight: [N, TxHxW, TxHxW]
120
+ pairwise_weight = torch.matmul(theta_x, phi_x)
121
+ pairwise_weight = pairwise_weight.softmax(dim=-1)
122
+ return pairwise_weight
123
+
124
+ def embedded_gaussian(self, theta_x, phi_x):
125
+ # NonLocal1d pairwise_weight: [N, H, H]
126
+ # NonLocal2d pairwise_weight: [N, HxW, HxW]
127
+ # NonLocal3d pairwise_weight: [N, TxHxW, TxHxW]
128
+ pairwise_weight = torch.matmul(theta_x, phi_x)
129
+ if self.use_scale:
130
+ # theta_x.shape[-1] is `self.inter_channels`
131
+ pairwise_weight /= theta_x.shape[-1]**0.5
132
+ pairwise_weight = pairwise_weight.softmax(dim=-1)
133
+ return pairwise_weight
134
+
135
+ def dot_product(self, theta_x, phi_x):
136
+ # NonLocal1d pairwise_weight: [N, H, H]
137
+ # NonLocal2d pairwise_weight: [N, HxW, HxW]
138
+ # NonLocal3d pairwise_weight: [N, TxHxW, TxHxW]
139
+ pairwise_weight = torch.matmul(theta_x, phi_x)
140
+ pairwise_weight /= pairwise_weight.shape[-1]
141
+ return pairwise_weight
142
+
143
+ def concatenation(self, theta_x, phi_x):
144
+ # NonLocal1d pairwise_weight: [N, H, H]
145
+ # NonLocal2d pairwise_weight: [N, HxW, HxW]
146
+ # NonLocal3d pairwise_weight: [N, TxHxW, TxHxW]
147
+ h = theta_x.size(2)
148
+ w = phi_x.size(3)
149
+ theta_x = theta_x.repeat(1, 1, 1, w)
150
+ phi_x = phi_x.repeat(1, 1, h, 1)
151
+
152
+ concat_feature = torch.cat([theta_x, phi_x], dim=1)
153
+ pairwise_weight = self.concat_project(concat_feature)
154
+ n, _, h, w = pairwise_weight.size()
155
+ pairwise_weight = pairwise_weight.view(n, h, w)
156
+ pairwise_weight /= pairwise_weight.shape[-1]
157
+
158
+ return pairwise_weight
159
+
160
+ def forward(self, x):
161
+ # Assume `reduction = 1`, then `inter_channels = C`
162
+ # or `inter_channels = C` when `mode="gaussian"`
163
+
164
+ # NonLocal1d x: [N, C, H]
165
+ # NonLocal2d x: [N, C, H, W]
166
+ # NonLocal3d x: [N, C, T, H, W]
167
+ n = x.size(0)
168
+
169
+ # NonLocal1d g_x: [N, H, C]
170
+ # NonLocal2d g_x: [N, HxW, C]
171
+ # NonLocal3d g_x: [N, TxHxW, C]
172
+ g_x = self.g(x).view(n, self.inter_channels, -1)
173
+ g_x = g_x.permute(0, 2, 1)
174
+
175
+ # NonLocal1d theta_x: [N, H, C], phi_x: [N, C, H]
176
+ # NonLocal2d theta_x: [N, HxW, C], phi_x: [N, C, HxW]
177
+ # NonLocal3d theta_x: [N, TxHxW, C], phi_x: [N, C, TxHxW]
178
+ if self.mode == 'gaussian':
179
+ theta_x = x.view(n, self.in_channels, -1)
180
+ theta_x = theta_x.permute(0, 2, 1)
181
+ if self.sub_sample:
182
+ phi_x = self.phi(x).view(n, self.in_channels, -1)
183
+ else:
184
+ phi_x = x.view(n, self.in_channels, -1)
185
+ elif self.mode == 'concatenation':
186
+ theta_x = self.theta(x).view(n, self.inter_channels, -1, 1)
187
+ phi_x = self.phi(x).view(n, self.inter_channels, 1, -1)
188
+ else:
189
+ theta_x = self.theta(x).view(n, self.inter_channels, -1)
190
+ theta_x = theta_x.permute(0, 2, 1)
191
+ phi_x = self.phi(x).view(n, self.inter_channels, -1)
192
+
193
+ pairwise_func = getattr(self, self.mode)
194
+ # NonLocal1d pairwise_weight: [N, H, H]
195
+ # NonLocal2d pairwise_weight: [N, HxW, HxW]
196
+ # NonLocal3d pairwise_weight: [N, TxHxW, TxHxW]
197
+ pairwise_weight = pairwise_func(theta_x, phi_x)
198
+
199
+ # NonLocal1d y: [N, H, C]
200
+ # NonLocal2d y: [N, HxW, C]
201
+ # NonLocal3d y: [N, TxHxW, C]
202
+ y = torch.matmul(pairwise_weight, g_x)
203
+ # NonLocal1d y: [N, C, H]
204
+ # NonLocal2d y: [N, C, H, W]
205
+ # NonLocal3d y: [N, C, T, H, W]
206
+ y = y.permute(0, 2, 1).contiguous().reshape(n, self.inter_channels,
207
+ *x.size()[2:])
208
+
209
+ output = x + self.conv_out(y)
210
+
211
+ return output
212
+
213
+
214
+ class NonLocal1d(_NonLocalNd):
215
+ """1D Non-local module.
216
+
217
+ Args:
218
+ in_channels (int): Same as `NonLocalND`.
219
+ sub_sample (bool): Whether to apply max pooling after pairwise
220
+ function (Note that the `sub_sample` is applied on spatial only).
221
+ Default: False.
222
+ conv_cfg (None | dict): Same as `NonLocalND`.
223
+ Default: dict(type='Conv1d').
224
+ """
225
+
226
+ def __init__(self,
227
+ in_channels,
228
+ sub_sample=False,
229
+ conv_cfg=dict(type='Conv1d'),
230
+ **kwargs):
231
+ super(NonLocal1d, self).__init__(
232
+ in_channels, conv_cfg=conv_cfg, **kwargs)
233
+
234
+ self.sub_sample = sub_sample
235
+
236
+ if sub_sample:
237
+ max_pool_layer = nn.MaxPool1d(kernel_size=2)
238
+ self.g = nn.Sequential(self.g, max_pool_layer)
239
+ if self.mode != 'gaussian':
240
+ self.phi = nn.Sequential(self.phi, max_pool_layer)
241
+ else:
242
+ self.phi = max_pool_layer
243
+
244
+
245
+ @PLUGIN_LAYERS.register_module()
246
+ class NonLocal2d(_NonLocalNd):
247
+ """2D Non-local module.
248
+
249
+ Args:
250
+ in_channels (int): Same as `NonLocalND`.
251
+ sub_sample (bool): Whether to apply max pooling after pairwise
252
+ function (Note that the `sub_sample` is applied on spatial only).
253
+ Default: False.
254
+ conv_cfg (None | dict): Same as `NonLocalND`.
255
+ Default: dict(type='Conv2d').
256
+ """
257
+
258
+ _abbr_ = 'nonlocal_block'
259
+
260
+ def __init__(self,
261
+ in_channels,
262
+ sub_sample=False,
263
+ conv_cfg=dict(type='Conv2d'),
264
+ **kwargs):
265
+ super(NonLocal2d, self).__init__(
266
+ in_channels, conv_cfg=conv_cfg, **kwargs)
267
+
268
+ self.sub_sample = sub_sample
269
+
270
+ if sub_sample:
271
+ max_pool_layer = nn.MaxPool2d(kernel_size=(2, 2))
272
+ self.g = nn.Sequential(self.g, max_pool_layer)
273
+ if self.mode != 'gaussian':
274
+ self.phi = nn.Sequential(self.phi, max_pool_layer)
275
+ else:
276
+ self.phi = max_pool_layer
277
+
278
+
279
+ class NonLocal3d(_NonLocalNd):
280
+ """3D Non-local module.
281
+
282
+ Args:
283
+ in_channels (int): Same as `NonLocalND`.
284
+ sub_sample (bool): Whether to apply max pooling after pairwise
285
+ function (Note that the `sub_sample` is applied on spatial only).
286
+ Default: False.
287
+ conv_cfg (None | dict): Same as `NonLocalND`.
288
+ Default: dict(type='Conv3d').
289
+ """
290
+
291
+ def __init__(self,
292
+ in_channels,
293
+ sub_sample=False,
294
+ conv_cfg=dict(type='Conv3d'),
295
+ **kwargs):
296
+ super(NonLocal3d, self).__init__(
297
+ in_channels, conv_cfg=conv_cfg, **kwargs)
298
+ self.sub_sample = sub_sample
299
+
300
+ if sub_sample:
301
+ max_pool_layer = nn.MaxPool3d(kernel_size=(1, 2, 2))
302
+ self.g = nn.Sequential(self.g, max_pool_layer)
303
+ if self.mode != 'gaussian':
304
+ self.phi = nn.Sequential(self.phi, max_pool_layer)
305
+ else:
306
+ self.phi = max_pool_layer
CCEdit-main/src/controlnet11/annotator/uniformer/mmcv/cnn/bricks/norm.py ADDED
@@ -0,0 +1,144 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) OpenMMLab. All rights reserved.
2
+ import inspect
3
+
4
+ import torch.nn as nn
5
+
6
+ from annotator.uniformer.mmcv.utils import is_tuple_of
7
+ from annotator.uniformer.mmcv.utils.parrots_wrapper import SyncBatchNorm, _BatchNorm, _InstanceNorm
8
+ from .registry import NORM_LAYERS
9
+
10
+ NORM_LAYERS.register_module('BN', module=nn.BatchNorm2d)
11
+ NORM_LAYERS.register_module('BN1d', module=nn.BatchNorm1d)
12
+ NORM_LAYERS.register_module('BN2d', module=nn.BatchNorm2d)
13
+ NORM_LAYERS.register_module('BN3d', module=nn.BatchNorm3d)
14
+ NORM_LAYERS.register_module('SyncBN', module=SyncBatchNorm)
15
+ NORM_LAYERS.register_module('GN', module=nn.GroupNorm)
16
+ NORM_LAYERS.register_module('LN', module=nn.LayerNorm)
17
+ NORM_LAYERS.register_module('IN', module=nn.InstanceNorm2d)
18
+ NORM_LAYERS.register_module('IN1d', module=nn.InstanceNorm1d)
19
+ NORM_LAYERS.register_module('IN2d', module=nn.InstanceNorm2d)
20
+ NORM_LAYERS.register_module('IN3d', module=nn.InstanceNorm3d)
21
+
22
+
23
+ def infer_abbr(class_type):
24
+ """Infer abbreviation from the class name.
25
+
26
+ When we build a norm layer with `build_norm_layer()`, we want to preserve
27
+ the norm type in variable names, e.g, self.bn1, self.gn. This method will
28
+ infer the abbreviation to map class types to abbreviations.
29
+
30
+ Rule 1: If the class has the property "_abbr_", return the property.
31
+ Rule 2: If the parent class is _BatchNorm, GroupNorm, LayerNorm or
32
+ InstanceNorm, the abbreviation of this layer will be "bn", "gn", "ln" and
33
+ "in" respectively.
34
+ Rule 3: If the class name contains "batch", "group", "layer" or "instance",
35
+ the abbreviation of this layer will be "bn", "gn", "ln" and "in"
36
+ respectively.
37
+ Rule 4: Otherwise, the abbreviation falls back to "norm".
38
+
39
+ Args:
40
+ class_type (type): The norm layer type.
41
+
42
+ Returns:
43
+ str: The inferred abbreviation.
44
+ """
45
+ if not inspect.isclass(class_type):
46
+ raise TypeError(
47
+ f'class_type must be a type, but got {type(class_type)}')
48
+ if hasattr(class_type, '_abbr_'):
49
+ return class_type._abbr_
50
+ if issubclass(class_type, _InstanceNorm): # IN is a subclass of BN
51
+ return 'in'
52
+ elif issubclass(class_type, _BatchNorm):
53
+ return 'bn'
54
+ elif issubclass(class_type, nn.GroupNorm):
55
+ return 'gn'
56
+ elif issubclass(class_type, nn.LayerNorm):
57
+ return 'ln'
58
+ else:
59
+ class_name = class_type.__name__.lower()
60
+ if 'batch' in class_name:
61
+ return 'bn'
62
+ elif 'group' in class_name:
63
+ return 'gn'
64
+ elif 'layer' in class_name:
65
+ return 'ln'
66
+ elif 'instance' in class_name:
67
+ return 'in'
68
+ else:
69
+ return 'norm_layer'
70
+
71
+
72
+ def build_norm_layer(cfg, num_features, postfix=''):
73
+ """Build normalization layer.
74
+
75
+ Args:
76
+ cfg (dict): The norm layer config, which should contain:
77
+
78
+ - type (str): Layer type.
79
+ - layer args: Args needed to instantiate a norm layer.
80
+ - requires_grad (bool, optional): Whether stop gradient updates.
81
+ num_features (int): Number of input channels.
82
+ postfix (int | str): The postfix to be appended into norm abbreviation
83
+ to create named layer.
84
+
85
+ Returns:
86
+ (str, nn.Module): The first element is the layer name consisting of
87
+ abbreviation and postfix, e.g., bn1, gn. The second element is the
88
+ created norm layer.
89
+ """
90
+ if not isinstance(cfg, dict):
91
+ raise TypeError('cfg must be a dict')
92
+ if 'type' not in cfg:
93
+ raise KeyError('the cfg dict must contain the key "type"')
94
+ cfg_ = cfg.copy()
95
+
96
+ layer_type = cfg_.pop('type')
97
+ if layer_type not in NORM_LAYERS:
98
+ raise KeyError(f'Unrecognized norm type {layer_type}')
99
+
100
+ norm_layer = NORM_LAYERS.get(layer_type)
101
+ abbr = infer_abbr(norm_layer)
102
+
103
+ assert isinstance(postfix, (int, str))
104
+ name = abbr + str(postfix)
105
+
106
+ requires_grad = cfg_.pop('requires_grad', True)
107
+ cfg_.setdefault('eps', 1e-5)
108
+ if layer_type != 'GN':
109
+ layer = norm_layer(num_features, **cfg_)
110
+ if layer_type == 'SyncBN' and hasattr(layer, '_specify_ddp_gpu_num'):
111
+ layer._specify_ddp_gpu_num(1)
112
+ else:
113
+ assert 'num_groups' in cfg_
114
+ layer = norm_layer(num_channels=num_features, **cfg_)
115
+
116
+ for param in layer.parameters():
117
+ param.requires_grad = requires_grad
118
+
119
+ return name, layer
120
+
121
+
122
+ def is_norm(layer, exclude=None):
123
+ """Check if a layer is a normalization layer.
124
+
125
+ Args:
126
+ layer (nn.Module): The layer to be checked.
127
+ exclude (type | tuple[type]): Types to be excluded.
128
+
129
+ Returns:
130
+ bool: Whether the layer is a norm layer.
131
+ """
132
+ if exclude is not None:
133
+ if not isinstance(exclude, tuple):
134
+ exclude = (exclude, )
135
+ if not is_tuple_of(exclude, type):
136
+ raise TypeError(
137
+ f'"exclude" must be either None or type or a tuple of types, '
138
+ f'but got {type(exclude)}: {exclude}')
139
+
140
+ if exclude and isinstance(layer, exclude):
141
+ return False
142
+
143
+ all_norm_bases = (_BatchNorm, _InstanceNorm, nn.GroupNorm, nn.LayerNorm)
144
+ return isinstance(layer, all_norm_bases)
CCEdit-main/src/controlnet11/annotator/uniformer/mmcv/cnn/bricks/padding.py ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) OpenMMLab. All rights reserved.
2
+ import torch.nn as nn
3
+
4
+ from .registry import PADDING_LAYERS
5
+
6
+ PADDING_LAYERS.register_module('zero', module=nn.ZeroPad2d)
7
+ PADDING_LAYERS.register_module('reflect', module=nn.ReflectionPad2d)
8
+ PADDING_LAYERS.register_module('replicate', module=nn.ReplicationPad2d)
9
+
10
+
11
+ def build_padding_layer(cfg, *args, **kwargs):
12
+ """Build padding layer.
13
+
14
+ Args:
15
+ cfg (None or dict): The padding layer config, which should contain:
16
+ - type (str): Layer type.
17
+ - layer args: Args needed to instantiate a padding layer.
18
+
19
+ Returns:
20
+ nn.Module: Created padding layer.
21
+ """
22
+ if not isinstance(cfg, dict):
23
+ raise TypeError('cfg must be a dict')
24
+ if 'type' not in cfg:
25
+ raise KeyError('the cfg dict must contain the key "type"')
26
+
27
+ cfg_ = cfg.copy()
28
+ padding_type = cfg_.pop('type')
29
+ if padding_type not in PADDING_LAYERS:
30
+ raise KeyError(f'Unrecognized padding type {padding_type}.')
31
+ else:
32
+ padding_layer = PADDING_LAYERS.get(padding_type)
33
+
34
+ layer = padding_layer(*args, **kwargs, **cfg_)
35
+
36
+ return layer
CCEdit-main/src/controlnet11/annotator/uniformer/mmcv/cnn/bricks/plugin.py ADDED
@@ -0,0 +1,88 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import inspect
2
+ import platform
3
+
4
+ from .registry import PLUGIN_LAYERS
5
+
6
+ if platform.system() == 'Windows':
7
+ import regex as re
8
+ else:
9
+ import re
10
+
11
+
12
+ def infer_abbr(class_type):
13
+ """Infer abbreviation from the class name.
14
+
15
+ This method will infer the abbreviation to map class types to
16
+ abbreviations.
17
+
18
+ Rule 1: If the class has the property "abbr", return the property.
19
+ Rule 2: Otherwise, the abbreviation falls back to snake case of class
20
+ name, e.g. the abbreviation of ``FancyBlock`` will be ``fancy_block``.
21
+
22
+ Args:
23
+ class_type (type): The norm layer type.
24
+
25
+ Returns:
26
+ str: The inferred abbreviation.
27
+ """
28
+
29
+ def camel2snack(word):
30
+ """Convert camel case word into snack case.
31
+
32
+ Modified from `inflection lib
33
+ <https://inflection.readthedocs.io/en/latest/#inflection.underscore>`_.
34
+
35
+ Example::
36
+
37
+ >>> camel2snack("FancyBlock")
38
+ 'fancy_block'
39
+ """
40
+
41
+ word = re.sub(r'([A-Z]+)([A-Z][a-z])', r'\1_\2', word)
42
+ word = re.sub(r'([a-z\d])([A-Z])', r'\1_\2', word)
43
+ word = word.replace('-', '_')
44
+ return word.lower()
45
+
46
+ if not inspect.isclass(class_type):
47
+ raise TypeError(
48
+ f'class_type must be a type, but got {type(class_type)}')
49
+ if hasattr(class_type, '_abbr_'):
50
+ return class_type._abbr_
51
+ else:
52
+ return camel2snack(class_type.__name__)
53
+
54
+
55
+ def build_plugin_layer(cfg, postfix='', **kwargs):
56
+ """Build plugin layer.
57
+
58
+ Args:
59
+ cfg (None or dict): cfg should contain:
60
+ type (str): identify plugin layer type.
61
+ layer args: args needed to instantiate a plugin layer.
62
+ postfix (int, str): appended into norm abbreviation to
63
+ create named layer. Default: ''.
64
+
65
+ Returns:
66
+ tuple[str, nn.Module]:
67
+ name (str): abbreviation + postfix
68
+ layer (nn.Module): created plugin layer
69
+ """
70
+ if not isinstance(cfg, dict):
71
+ raise TypeError('cfg must be a dict')
72
+ if 'type' not in cfg:
73
+ raise KeyError('the cfg dict must contain the key "type"')
74
+ cfg_ = cfg.copy()
75
+
76
+ layer_type = cfg_.pop('type')
77
+ if layer_type not in PLUGIN_LAYERS:
78
+ raise KeyError(f'Unrecognized plugin type {layer_type}')
79
+
80
+ plugin_layer = PLUGIN_LAYERS.get(layer_type)
81
+ abbr = infer_abbr(plugin_layer)
82
+
83
+ assert isinstance(postfix, (int, str))
84
+ name = abbr + str(postfix)
85
+
86
+ layer = plugin_layer(**kwargs, **cfg_)
87
+
88
+ return name, layer
CCEdit-main/src/controlnet11/annotator/uniformer/mmcv/cnn/bricks/registry.py ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) OpenMMLab. All rights reserved.
2
+ from annotator.uniformer.mmcv.utils import Registry
3
+
4
+ CONV_LAYERS = Registry('conv layer')
5
+ NORM_LAYERS = Registry('norm layer')
6
+ ACTIVATION_LAYERS = Registry('activation layer')
7
+ PADDING_LAYERS = Registry('padding layer')
8
+ UPSAMPLE_LAYERS = Registry('upsample layer')
9
+ PLUGIN_LAYERS = Registry('plugin layer')
10
+
11
+ DROPOUT_LAYERS = Registry('drop out layers')
12
+ POSITIONAL_ENCODING = Registry('position encoding')
13
+ ATTENTION = Registry('attention')
14
+ FEEDFORWARD_NETWORK = Registry('feed-forward Network')
15
+ TRANSFORMER_LAYER = Registry('transformerLayer')
16
+ TRANSFORMER_LAYER_SEQUENCE = Registry('transformer-layers sequence')
CCEdit-main/src/controlnet11/annotator/uniformer/mmcv/cnn/bricks/scale.py ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) OpenMMLab. All rights reserved.
2
+ import torch
3
+ import torch.nn as nn
4
+
5
+
6
+ class Scale(nn.Module):
7
+ """A learnable scale parameter.
8
+
9
+ This layer scales the input by a learnable factor. It multiplies a
10
+ learnable scale parameter of shape (1,) with input of any shape.
11
+
12
+ Args:
13
+ scale (float): Initial value of scale factor. Default: 1.0
14
+ """
15
+
16
+ def __init__(self, scale=1.0):
17
+ super(Scale, self).__init__()
18
+ self.scale = nn.Parameter(torch.tensor(scale, dtype=torch.float))
19
+
20
+ def forward(self, x):
21
+ return x * self.scale
CCEdit-main/src/controlnet11/annotator/uniformer/mmcv/cnn/bricks/swish.py ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) OpenMMLab. All rights reserved.
2
+ import torch
3
+ import torch.nn as nn
4
+
5
+ from .registry import ACTIVATION_LAYERS
6
+
7
+
8
+ @ACTIVATION_LAYERS.register_module()
9
+ class Swish(nn.Module):
10
+ """Swish Module.
11
+
12
+ This module applies the swish function:
13
+
14
+ .. math::
15
+ Swish(x) = x * Sigmoid(x)
16
+
17
+ Returns:
18
+ Tensor: The output tensor.
19
+ """
20
+
21
+ def __init__(self):
22
+ super(Swish, self).__init__()
23
+
24
+ def forward(self, x):
25
+ return x * torch.sigmoid(x)
CCEdit-main/src/controlnet11/annotator/uniformer/mmcv/cnn/bricks/transformer.py ADDED
@@ -0,0 +1,595 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) OpenMMLab. All rights reserved.
2
+ import copy
3
+ import warnings
4
+
5
+ import torch
6
+ import torch.nn as nn
7
+
8
+ from annotator.uniformer.mmcv import ConfigDict, deprecated_api_warning
9
+ from annotator.uniformer.mmcv.cnn import Linear, build_activation_layer, build_norm_layer
10
+ from annotator.uniformer.mmcv.runner.base_module import BaseModule, ModuleList, Sequential
11
+ from annotator.uniformer.mmcv.utils import build_from_cfg
12
+ from .drop import build_dropout
13
+ from .registry import (ATTENTION, FEEDFORWARD_NETWORK, POSITIONAL_ENCODING,
14
+ TRANSFORMER_LAYER, TRANSFORMER_LAYER_SEQUENCE)
15
+
16
+ # Avoid BC-breaking of importing MultiScaleDeformableAttention from this file
17
+ try:
18
+ from annotator.uniformer.mmcv.ops.multi_scale_deform_attn import MultiScaleDeformableAttention # noqa F401
19
+ warnings.warn(
20
+ ImportWarning(
21
+ '``MultiScaleDeformableAttention`` has been moved to '
22
+ '``mmcv.ops.multi_scale_deform_attn``, please change original path ' # noqa E501
23
+ '``from annotator.uniformer.mmcv.cnn.bricks.transformer import MultiScaleDeformableAttention`` ' # noqa E501
24
+ 'to ``from annotator.uniformer.mmcv.ops.multi_scale_deform_attn import MultiScaleDeformableAttention`` ' # noqa E501
25
+ ))
26
+
27
+ except ImportError:
28
+ warnings.warn('Fail to import ``MultiScaleDeformableAttention`` from '
29
+ '``mmcv.ops.multi_scale_deform_attn``, '
30
+ 'You should install ``mmcv-full`` if you need this module. ')
31
+
32
+
33
+ def build_positional_encoding(cfg, default_args=None):
34
+ """Builder for Position Encoding."""
35
+ return build_from_cfg(cfg, POSITIONAL_ENCODING, default_args)
36
+
37
+
38
+ def build_attention(cfg, default_args=None):
39
+ """Builder for attention."""
40
+ return build_from_cfg(cfg, ATTENTION, default_args)
41
+
42
+
43
+ def build_feedforward_network(cfg, default_args=None):
44
+ """Builder for feed-forward network (FFN)."""
45
+ return build_from_cfg(cfg, FEEDFORWARD_NETWORK, default_args)
46
+
47
+
48
+ def build_transformer_layer(cfg, default_args=None):
49
+ """Builder for transformer layer."""
50
+ return build_from_cfg(cfg, TRANSFORMER_LAYER, default_args)
51
+
52
+
53
+ def build_transformer_layer_sequence(cfg, default_args=None):
54
+ """Builder for transformer encoder and transformer decoder."""
55
+ return build_from_cfg(cfg, TRANSFORMER_LAYER_SEQUENCE, default_args)
56
+
57
+
58
+ @ATTENTION.register_module()
59
+ class MultiheadAttention(BaseModule):
60
+ """A wrapper for ``torch.nn.MultiheadAttention``.
61
+
62
+ This module implements MultiheadAttention with identity connection,
63
+ and positional encoding is also passed as input.
64
+
65
+ Args:
66
+ embed_dims (int): The embedding dimension.
67
+ num_heads (int): Parallel attention heads.
68
+ attn_drop (float): A Dropout layer on attn_output_weights.
69
+ Default: 0.0.
70
+ proj_drop (float): A Dropout layer after `nn.MultiheadAttention`.
71
+ Default: 0.0.
72
+ dropout_layer (obj:`ConfigDict`): The dropout_layer used
73
+ when adding the shortcut.
74
+ init_cfg (obj:`mmcv.ConfigDict`): The Config for initialization.
75
+ Default: None.
76
+ batch_first (bool): When it is True, Key, Query and Value are shape of
77
+ (batch, n, embed_dim), otherwise (n, batch, embed_dim).
78
+ Default to False.
79
+ """
80
+
81
+ def __init__(self,
82
+ embed_dims,
83
+ num_heads,
84
+ attn_drop=0.,
85
+ proj_drop=0.,
86
+ dropout_layer=dict(type='Dropout', drop_prob=0.),
87
+ init_cfg=None,
88
+ batch_first=False,
89
+ **kwargs):
90
+ super(MultiheadAttention, self).__init__(init_cfg)
91
+ if 'dropout' in kwargs:
92
+ warnings.warn('The arguments `dropout` in MultiheadAttention '
93
+ 'has been deprecated, now you can separately '
94
+ 'set `attn_drop`(float), proj_drop(float), '
95
+ 'and `dropout_layer`(dict) ')
96
+ attn_drop = kwargs['dropout']
97
+ dropout_layer['drop_prob'] = kwargs.pop('dropout')
98
+
99
+ self.embed_dims = embed_dims
100
+ self.num_heads = num_heads
101
+ self.batch_first = batch_first
102
+
103
+ self.attn = nn.MultiheadAttention(embed_dims, num_heads, attn_drop,
104
+ **kwargs)
105
+
106
+ self.proj_drop = nn.Dropout(proj_drop)
107
+ self.dropout_layer = build_dropout(
108
+ dropout_layer) if dropout_layer else nn.Identity()
109
+
110
+ @deprecated_api_warning({'residual': 'identity'},
111
+ cls_name='MultiheadAttention')
112
+ def forward(self,
113
+ query,
114
+ key=None,
115
+ value=None,
116
+ identity=None,
117
+ query_pos=None,
118
+ key_pos=None,
119
+ attn_mask=None,
120
+ key_padding_mask=None,
121
+ **kwargs):
122
+ """Forward function for `MultiheadAttention`.
123
+
124
+ **kwargs allow passing a more general data flow when combining
125
+ with other operations in `transformerlayer`.
126
+
127
+ Args:
128
+ query (Tensor): The input query with shape [num_queries, bs,
129
+ embed_dims] if self.batch_first is False, else
130
+ [bs, num_queries embed_dims].
131
+ key (Tensor): The key tensor with shape [num_keys, bs,
132
+ embed_dims] if self.batch_first is False, else
133
+ [bs, num_keys, embed_dims] .
134
+ If None, the ``query`` will be used. Defaults to None.
135
+ value (Tensor): The value tensor with same shape as `key`.
136
+ Same in `nn.MultiheadAttention.forward`. Defaults to None.
137
+ If None, the `key` will be used.
138
+ identity (Tensor): This tensor, with the same shape as x,
139
+ will be used for the identity link.
140
+ If None, `x` will be used. Defaults to None.
141
+ query_pos (Tensor): The positional encoding for query, with
142
+ the same shape as `x`. If not None, it will
143
+ be added to `x` before forward function. Defaults to None.
144
+ key_pos (Tensor): The positional encoding for `key`, with the
145
+ same shape as `key`. Defaults to None. If not None, it will
146
+ be added to `key` before forward function. If None, and
147
+ `query_pos` has the same shape as `key`, then `query_pos`
148
+ will be used for `key_pos`. Defaults to None.
149
+ attn_mask (Tensor): ByteTensor mask with shape [num_queries,
150
+ num_keys]. Same in `nn.MultiheadAttention.forward`.
151
+ Defaults to None.
152
+ key_padding_mask (Tensor): ByteTensor with shape [bs, num_keys].
153
+ Defaults to None.
154
+
155
+ Returns:
156
+ Tensor: forwarded results with shape
157
+ [num_queries, bs, embed_dims]
158
+ if self.batch_first is False, else
159
+ [bs, num_queries embed_dims].
160
+ """
161
+
162
+ if key is None:
163
+ key = query
164
+ if value is None:
165
+ value = key
166
+ if identity is None:
167
+ identity = query
168
+ if key_pos is None:
169
+ if query_pos is not None:
170
+ # use query_pos if key_pos is not available
171
+ if query_pos.shape == key.shape:
172
+ key_pos = query_pos
173
+ else:
174
+ warnings.warn(f'position encoding of key is'
175
+ f'missing in {self.__class__.__name__}.')
176
+ if query_pos is not None:
177
+ query = query + query_pos
178
+ if key_pos is not None:
179
+ key = key + key_pos
180
+
181
+ # Because the dataflow('key', 'query', 'value') of
182
+ # ``torch.nn.MultiheadAttention`` is (num_query, batch,
183
+ # embed_dims), We should adjust the shape of dataflow from
184
+ # batch_first (batch, num_query, embed_dims) to num_query_first
185
+ # (num_query ,batch, embed_dims), and recover ``attn_output``
186
+ # from num_query_first to batch_first.
187
+ if self.batch_first:
188
+ query = query.transpose(0, 1)
189
+ key = key.transpose(0, 1)
190
+ value = value.transpose(0, 1)
191
+
192
+ out = self.attn(
193
+ query=query,
194
+ key=key,
195
+ value=value,
196
+ attn_mask=attn_mask,
197
+ key_padding_mask=key_padding_mask)[0]
198
+
199
+ if self.batch_first:
200
+ out = out.transpose(0, 1)
201
+
202
+ return identity + self.dropout_layer(self.proj_drop(out))
203
+
204
+
205
+ @FEEDFORWARD_NETWORK.register_module()
206
+ class FFN(BaseModule):
207
+ """Implements feed-forward networks (FFNs) with identity connection.
208
+
209
+ Args:
210
+ embed_dims (int): The feature dimension. Same as
211
+ `MultiheadAttention`. Defaults: 256.
212
+ feedforward_channels (int): The hidden dimension of FFNs.
213
+ Defaults: 1024.
214
+ num_fcs (int, optional): The number of fully-connected layers in
215
+ FFNs. Default: 2.
216
+ act_cfg (dict, optional): The activation config for FFNs.
217
+ Default: dict(type='ReLU')
218
+ ffn_drop (float, optional): Probability of an element to be
219
+ zeroed in FFN. Default 0.0.
220
+ add_identity (bool, optional): Whether to add the
221
+ identity connection. Default: `True`.
222
+ dropout_layer (obj:`ConfigDict`): The dropout_layer used
223
+ when adding the shortcut.
224
+ init_cfg (obj:`mmcv.ConfigDict`): The Config for initialization.
225
+ Default: None.
226
+ """
227
+
228
+ @deprecated_api_warning(
229
+ {
230
+ 'dropout': 'ffn_drop',
231
+ 'add_residual': 'add_identity'
232
+ },
233
+ cls_name='FFN')
234
+ def __init__(self,
235
+ embed_dims=256,
236
+ feedforward_channels=1024,
237
+ num_fcs=2,
238
+ act_cfg=dict(type='ReLU', inplace=True),
239
+ ffn_drop=0.,
240
+ dropout_layer=None,
241
+ add_identity=True,
242
+ init_cfg=None,
243
+ **kwargs):
244
+ super(FFN, self).__init__(init_cfg)
245
+ assert num_fcs >= 2, 'num_fcs should be no less ' \
246
+ f'than 2. got {num_fcs}.'
247
+ self.embed_dims = embed_dims
248
+ self.feedforward_channels = feedforward_channels
249
+ self.num_fcs = num_fcs
250
+ self.act_cfg = act_cfg
251
+ self.activate = build_activation_layer(act_cfg)
252
+
253
+ layers = []
254
+ in_channels = embed_dims
255
+ for _ in range(num_fcs - 1):
256
+ layers.append(
257
+ Sequential(
258
+ Linear(in_channels, feedforward_channels), self.activate,
259
+ nn.Dropout(ffn_drop)))
260
+ in_channels = feedforward_channels
261
+ layers.append(Linear(feedforward_channels, embed_dims))
262
+ layers.append(nn.Dropout(ffn_drop))
263
+ self.layers = Sequential(*layers)
264
+ self.dropout_layer = build_dropout(
265
+ dropout_layer) if dropout_layer else torch.nn.Identity()
266
+ self.add_identity = add_identity
267
+
268
+ @deprecated_api_warning({'residual': 'identity'}, cls_name='FFN')
269
+ def forward(self, x, identity=None):
270
+ """Forward function for `FFN`.
271
+
272
+ The function would add x to the output tensor if residue is None.
273
+ """
274
+ out = self.layers(x)
275
+ if not self.add_identity:
276
+ return self.dropout_layer(out)
277
+ if identity is None:
278
+ identity = x
279
+ return identity + self.dropout_layer(out)
280
+
281
+
282
+ @TRANSFORMER_LAYER.register_module()
283
+ class BaseTransformerLayer(BaseModule):
284
+ """Base `TransformerLayer` for vision transformer.
285
+
286
+ It can be built from `mmcv.ConfigDict` and support more flexible
287
+ customization, for example, using any number of `FFN or LN ` and
288
+ use different kinds of `attention` by specifying a list of `ConfigDict`
289
+ named `attn_cfgs`. It is worth mentioning that it supports `prenorm`
290
+ when you specifying `norm` as the first element of `operation_order`.
291
+ More details about the `prenorm`: `On Layer Normalization in the
292
+ Transformer Architecture <https://arxiv.org/abs/2002.04745>`_ .
293
+
294
+ Args:
295
+ attn_cfgs (list[`mmcv.ConfigDict`] | obj:`mmcv.ConfigDict` | None )):
296
+ Configs for `self_attention` or `cross_attention` modules,
297
+ The order of the configs in the list should be consistent with
298
+ corresponding attentions in operation_order.
299
+ If it is a dict, all of the attention modules in operation_order
300
+ will be built with this config. Default: None.
301
+ ffn_cfgs (list[`mmcv.ConfigDict`] | obj:`mmcv.ConfigDict` | None )):
302
+ Configs for FFN, The order of the configs in the list should be
303
+ consistent with corresponding ffn in operation_order.
304
+ If it is a dict, all of the attention modules in operation_order
305
+ will be built with this config.
306
+ operation_order (tuple[str]): The execution order of operation
307
+ in transformer. Such as ('self_attn', 'norm', 'ffn', 'norm').
308
+ Support `prenorm` when you specifying first element as `norm`.
309
+ Default:None.
310
+ norm_cfg (dict): Config dict for normalization layer.
311
+ Default: dict(type='LN').
312
+ init_cfg (obj:`mmcv.ConfigDict`): The Config for initialization.
313
+ Default: None.
314
+ batch_first (bool): Key, Query and Value are shape
315
+ of (batch, n, embed_dim)
316
+ or (n, batch, embed_dim). Default to False.
317
+ """
318
+
319
+ def __init__(self,
320
+ attn_cfgs=None,
321
+ ffn_cfgs=dict(
322
+ type='FFN',
323
+ embed_dims=256,
324
+ feedforward_channels=1024,
325
+ num_fcs=2,
326
+ ffn_drop=0.,
327
+ act_cfg=dict(type='ReLU', inplace=True),
328
+ ),
329
+ operation_order=None,
330
+ norm_cfg=dict(type='LN'),
331
+ init_cfg=None,
332
+ batch_first=False,
333
+ **kwargs):
334
+
335
+ deprecated_args = dict(
336
+ feedforward_channels='feedforward_channels',
337
+ ffn_dropout='ffn_drop',
338
+ ffn_num_fcs='num_fcs')
339
+ for ori_name, new_name in deprecated_args.items():
340
+ if ori_name in kwargs:
341
+ warnings.warn(
342
+ f'The arguments `{ori_name}` in BaseTransformerLayer '
343
+ f'has been deprecated, now you should set `{new_name}` '
344
+ f'and other FFN related arguments '
345
+ f'to a dict named `ffn_cfgs`. ')
346
+ ffn_cfgs[new_name] = kwargs[ori_name]
347
+
348
+ super(BaseTransformerLayer, self).__init__(init_cfg)
349
+
350
+ self.batch_first = batch_first
351
+
352
+ assert set(operation_order) & set(
353
+ ['self_attn', 'norm', 'ffn', 'cross_attn']) == \
354
+ set(operation_order), f'The operation_order of' \
355
+ f' {self.__class__.__name__} should ' \
356
+ f'contains all four operation type ' \
357
+ f"{['self_attn', 'norm', 'ffn', 'cross_attn']}"
358
+
359
+ num_attn = operation_order.count('self_attn') + operation_order.count(
360
+ 'cross_attn')
361
+ if isinstance(attn_cfgs, dict):
362
+ attn_cfgs = [copy.deepcopy(attn_cfgs) for _ in range(num_attn)]
363
+ else:
364
+ assert num_attn == len(attn_cfgs), f'The length ' \
365
+ f'of attn_cfg {num_attn} is ' \
366
+ f'not consistent with the number of attention' \
367
+ f'in operation_order {operation_order}.'
368
+
369
+ self.num_attn = num_attn
370
+ self.operation_order = operation_order
371
+ self.norm_cfg = norm_cfg
372
+ self.pre_norm = operation_order[0] == 'norm'
373
+ self.attentions = ModuleList()
374
+
375
+ index = 0
376
+ for operation_name in operation_order:
377
+ if operation_name in ['self_attn', 'cross_attn']:
378
+ if 'batch_first' in attn_cfgs[index]:
379
+ assert self.batch_first == attn_cfgs[index]['batch_first']
380
+ else:
381
+ attn_cfgs[index]['batch_first'] = self.batch_first
382
+ attention = build_attention(attn_cfgs[index])
383
+ # Some custom attentions used as `self_attn`
384
+ # or `cross_attn` can have different behavior.
385
+ attention.operation_name = operation_name
386
+ self.attentions.append(attention)
387
+ index += 1
388
+
389
+ self.embed_dims = self.attentions[0].embed_dims
390
+
391
+ self.ffns = ModuleList()
392
+ num_ffns = operation_order.count('ffn')
393
+ if isinstance(ffn_cfgs, dict):
394
+ ffn_cfgs = ConfigDict(ffn_cfgs)
395
+ if isinstance(ffn_cfgs, dict):
396
+ ffn_cfgs = [copy.deepcopy(ffn_cfgs) for _ in range(num_ffns)]
397
+ assert len(ffn_cfgs) == num_ffns
398
+ for ffn_index in range(num_ffns):
399
+ if 'embed_dims' not in ffn_cfgs[ffn_index]:
400
+ ffn_cfgs['embed_dims'] = self.embed_dims
401
+ else:
402
+ assert ffn_cfgs[ffn_index]['embed_dims'] == self.embed_dims
403
+ self.ffns.append(
404
+ build_feedforward_network(ffn_cfgs[ffn_index],
405
+ dict(type='FFN')))
406
+
407
+ self.norms = ModuleList()
408
+ num_norms = operation_order.count('norm')
409
+ for _ in range(num_norms):
410
+ self.norms.append(build_norm_layer(norm_cfg, self.embed_dims)[1])
411
+
412
+ def forward(self,
413
+ query,
414
+ key=None,
415
+ value=None,
416
+ query_pos=None,
417
+ key_pos=None,
418
+ attn_masks=None,
419
+ query_key_padding_mask=None,
420
+ key_padding_mask=None,
421
+ **kwargs):
422
+ """Forward function for `TransformerDecoderLayer`.
423
+
424
+ **kwargs contains some specific arguments of attentions.
425
+
426
+ Args:
427
+ query (Tensor): The input query with shape
428
+ [num_queries, bs, embed_dims] if
429
+ self.batch_first is False, else
430
+ [bs, num_queries embed_dims].
431
+ key (Tensor): The key tensor with shape [num_keys, bs,
432
+ embed_dims] if self.batch_first is False, else
433
+ [bs, num_keys, embed_dims] .
434
+ value (Tensor): The value tensor with same shape as `key`.
435
+ query_pos (Tensor): The positional encoding for `query`.
436
+ Default: None.
437
+ key_pos (Tensor): The positional encoding for `key`.
438
+ Default: None.
439
+ attn_masks (List[Tensor] | None): 2D Tensor used in
440
+ calculation of corresponding attention. The length of
441
+ it should equal to the number of `attention` in
442
+ `operation_order`. Default: None.
443
+ query_key_padding_mask (Tensor): ByteTensor for `query`, with
444
+ shape [bs, num_queries]. Only used in `self_attn` layer.
445
+ Defaults to None.
446
+ key_padding_mask (Tensor): ByteTensor for `query`, with
447
+ shape [bs, num_keys]. Default: None.
448
+
449
+ Returns:
450
+ Tensor: forwarded results with shape [num_queries, bs, embed_dims].
451
+ """
452
+
453
+ norm_index = 0
454
+ attn_index = 0
455
+ ffn_index = 0
456
+ identity = query
457
+ if attn_masks is None:
458
+ attn_masks = [None for _ in range(self.num_attn)]
459
+ elif isinstance(attn_masks, torch.Tensor):
460
+ attn_masks = [
461
+ copy.deepcopy(attn_masks) for _ in range(self.num_attn)
462
+ ]
463
+ warnings.warn(f'Use same attn_mask in all attentions in '
464
+ f'{self.__class__.__name__} ')
465
+ else:
466
+ assert len(attn_masks) == self.num_attn, f'The length of ' \
467
+ f'attn_masks {len(attn_masks)} must be equal ' \
468
+ f'to the number of attention in ' \
469
+ f'operation_order {self.num_attn}'
470
+
471
+ for layer in self.operation_order:
472
+ if layer == 'self_attn':
473
+ temp_key = temp_value = query
474
+ query = self.attentions[attn_index](
475
+ query,
476
+ temp_key,
477
+ temp_value,
478
+ identity if self.pre_norm else None,
479
+ query_pos=query_pos,
480
+ key_pos=query_pos,
481
+ attn_mask=attn_masks[attn_index],
482
+ key_padding_mask=query_key_padding_mask,
483
+ **kwargs)
484
+ attn_index += 1
485
+ identity = query
486
+
487
+ elif layer == 'norm':
488
+ query = self.norms[norm_index](query)
489
+ norm_index += 1
490
+
491
+ elif layer == 'cross_attn':
492
+ query = self.attentions[attn_index](
493
+ query,
494
+ key,
495
+ value,
496
+ identity if self.pre_norm else None,
497
+ query_pos=query_pos,
498
+ key_pos=key_pos,
499
+ attn_mask=attn_masks[attn_index],
500
+ key_padding_mask=key_padding_mask,
501
+ **kwargs)
502
+ attn_index += 1
503
+ identity = query
504
+
505
+ elif layer == 'ffn':
506
+ query = self.ffns[ffn_index](
507
+ query, identity if self.pre_norm else None)
508
+ ffn_index += 1
509
+
510
+ return query
511
+
512
+
513
+ @TRANSFORMER_LAYER_SEQUENCE.register_module()
514
+ class TransformerLayerSequence(BaseModule):
515
+ """Base class for TransformerEncoder and TransformerDecoder in vision
516
+ transformer.
517
+
518
+ As base-class of Encoder and Decoder in vision transformer.
519
+ Support customization such as specifying different kind
520
+ of `transformer_layer` in `transformer_coder`.
521
+
522
+ Args:
523
+ transformerlayer (list[obj:`mmcv.ConfigDict`] |
524
+ obj:`mmcv.ConfigDict`): Config of transformerlayer
525
+ in TransformerCoder. If it is obj:`mmcv.ConfigDict`,
526
+ it would be repeated `num_layer` times to a
527
+ list[`mmcv.ConfigDict`]. Default: None.
528
+ num_layers (int): The number of `TransformerLayer`. Default: None.
529
+ init_cfg (obj:`mmcv.ConfigDict`): The Config for initialization.
530
+ Default: None.
531
+ """
532
+
533
+ def __init__(self, transformerlayers=None, num_layers=None, init_cfg=None):
534
+ super(TransformerLayerSequence, self).__init__(init_cfg)
535
+ if isinstance(transformerlayers, dict):
536
+ transformerlayers = [
537
+ copy.deepcopy(transformerlayers) for _ in range(num_layers)
538
+ ]
539
+ else:
540
+ assert isinstance(transformerlayers, list) and \
541
+ len(transformerlayers) == num_layers
542
+ self.num_layers = num_layers
543
+ self.layers = ModuleList()
544
+ for i in range(num_layers):
545
+ self.layers.append(build_transformer_layer(transformerlayers[i]))
546
+ self.embed_dims = self.layers[0].embed_dims
547
+ self.pre_norm = self.layers[0].pre_norm
548
+
549
+ def forward(self,
550
+ query,
551
+ key,
552
+ value,
553
+ query_pos=None,
554
+ key_pos=None,
555
+ attn_masks=None,
556
+ query_key_padding_mask=None,
557
+ key_padding_mask=None,
558
+ **kwargs):
559
+ """Forward function for `TransformerCoder`.
560
+
561
+ Args:
562
+ query (Tensor): Input query with shape
563
+ `(num_queries, bs, embed_dims)`.
564
+ key (Tensor): The key tensor with shape
565
+ `(num_keys, bs, embed_dims)`.
566
+ value (Tensor): The value tensor with shape
567
+ `(num_keys, bs, embed_dims)`.
568
+ query_pos (Tensor): The positional encoding for `query`.
569
+ Default: None.
570
+ key_pos (Tensor): The positional encoding for `key`.
571
+ Default: None.
572
+ attn_masks (List[Tensor], optional): Each element is 2D Tensor
573
+ which is used in calculation of corresponding attention in
574
+ operation_order. Default: None.
575
+ query_key_padding_mask (Tensor): ByteTensor for `query`, with
576
+ shape [bs, num_queries]. Only used in self-attention
577
+ Default: None.
578
+ key_padding_mask (Tensor): ByteTensor for `query`, with
579
+ shape [bs, num_keys]. Default: None.
580
+
581
+ Returns:
582
+ Tensor: results with shape [num_queries, bs, embed_dims].
583
+ """
584
+ for layer in self.layers:
585
+ query = layer(
586
+ query,
587
+ key,
588
+ value,
589
+ query_pos=query_pos,
590
+ key_pos=key_pos,
591
+ attn_masks=attn_masks,
592
+ query_key_padding_mask=query_key_padding_mask,
593
+ key_padding_mask=key_padding_mask,
594
+ **kwargs)
595
+ return query
CCEdit-main/src/controlnet11/annotator/uniformer/mmcv/cnn/bricks/upsample.py ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) OpenMMLab. All rights reserved.
2
+ import torch.nn as nn
3
+ import torch.nn.functional as F
4
+
5
+ from ..utils import xavier_init
6
+ from .registry import UPSAMPLE_LAYERS
7
+
8
+ UPSAMPLE_LAYERS.register_module('nearest', module=nn.Upsample)
9
+ UPSAMPLE_LAYERS.register_module('bilinear', module=nn.Upsample)
10
+
11
+
12
+ @UPSAMPLE_LAYERS.register_module(name='pixel_shuffle')
13
+ class PixelShufflePack(nn.Module):
14
+ """Pixel Shuffle upsample layer.
15
+
16
+ This module packs `F.pixel_shuffle()` and a nn.Conv2d module together to
17
+ achieve a simple upsampling with pixel shuffle.
18
+
19
+ Args:
20
+ in_channels (int): Number of input channels.
21
+ out_channels (int): Number of output channels.
22
+ scale_factor (int): Upsample ratio.
23
+ upsample_kernel (int): Kernel size of the conv layer to expand the
24
+ channels.
25
+ """
26
+
27
+ def __init__(self, in_channels, out_channels, scale_factor,
28
+ upsample_kernel):
29
+ super(PixelShufflePack, self).__init__()
30
+ self.in_channels = in_channels
31
+ self.out_channels = out_channels
32
+ self.scale_factor = scale_factor
33
+ self.upsample_kernel = upsample_kernel
34
+ self.upsample_conv = nn.Conv2d(
35
+ self.in_channels,
36
+ self.out_channels * scale_factor * scale_factor,
37
+ self.upsample_kernel,
38
+ padding=(self.upsample_kernel - 1) // 2)
39
+ self.init_weights()
40
+
41
+ def init_weights(self):
42
+ xavier_init(self.upsample_conv, distribution='uniform')
43
+
44
+ def forward(self, x):
45
+ x = self.upsample_conv(x)
46
+ x = F.pixel_shuffle(x, self.scale_factor)
47
+ return x
48
+
49
+
50
+ def build_upsample_layer(cfg, *args, **kwargs):
51
+ """Build upsample layer.
52
+
53
+ Args:
54
+ cfg (dict): The upsample layer config, which should contain:
55
+
56
+ - type (str): Layer type.
57
+ - scale_factor (int): Upsample ratio, which is not applicable to
58
+ deconv.
59
+ - layer args: Args needed to instantiate a upsample layer.
60
+ args (argument list): Arguments passed to the ``__init__``
61
+ method of the corresponding conv layer.
62
+ kwargs (keyword arguments): Keyword arguments passed to the
63
+ ``__init__`` method of the corresponding conv layer.
64
+
65
+ Returns:
66
+ nn.Module: Created upsample layer.
67
+ """
68
+ if not isinstance(cfg, dict):
69
+ raise TypeError(f'cfg must be a dict, but got {type(cfg)}')
70
+ if 'type' not in cfg:
71
+ raise KeyError(
72
+ f'the cfg dict must contain the key "type", but got {cfg}')
73
+ cfg_ = cfg.copy()
74
+
75
+ layer_type = cfg_.pop('type')
76
+ if layer_type not in UPSAMPLE_LAYERS:
77
+ raise KeyError(f'Unrecognized upsample type {layer_type}')
78
+ else:
79
+ upsample = UPSAMPLE_LAYERS.get(layer_type)
80
+
81
+ if upsample is nn.Upsample:
82
+ cfg_['mode'] = layer_type
83
+ layer = upsample(*args, **kwargs, **cfg_)
84
+ return layer
CCEdit-main/src/controlnet11/annotator/uniformer/mmcv/cnn/bricks/wrappers.py ADDED
@@ -0,0 +1,180 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) OpenMMLab. All rights reserved.
2
+ r"""Modified from https://github.com/facebookresearch/detectron2/blob/master/detectron2/layers/wrappers.py # noqa: E501
3
+
4
+ Wrap some nn modules to support empty tensor input. Currently, these wrappers
5
+ are mainly used in mask heads like fcn_mask_head and maskiou_heads since mask
6
+ heads are trained on only positive RoIs.
7
+ """
8
+ import math
9
+
10
+ import torch
11
+ import torch.nn as nn
12
+ from torch.nn.modules.utils import _pair, _triple
13
+
14
+ from .registry import CONV_LAYERS, UPSAMPLE_LAYERS
15
+
16
+ if torch.__version__ == 'parrots':
17
+ TORCH_VERSION = torch.__version__
18
+ else:
19
+ # torch.__version__ could be 1.3.1+cu92, we only need the first two
20
+ # for comparison
21
+ TORCH_VERSION = tuple(int(x) for x in torch.__version__.split('.')[:2])
22
+
23
+
24
+ def obsolete_torch_version(torch_version, version_threshold):
25
+ return torch_version == 'parrots' or torch_version <= version_threshold
26
+
27
+
28
+ class NewEmptyTensorOp(torch.autograd.Function):
29
+
30
+ @staticmethod
31
+ def forward(ctx, x, new_shape):
32
+ ctx.shape = x.shape
33
+ return x.new_empty(new_shape)
34
+
35
+ @staticmethod
36
+ def backward(ctx, grad):
37
+ shape = ctx.shape
38
+ return NewEmptyTensorOp.apply(grad, shape), None
39
+
40
+
41
+ @CONV_LAYERS.register_module('Conv', force=True)
42
+ class Conv2d(nn.Conv2d):
43
+
44
+ def forward(self, x):
45
+ if x.numel() == 0 and obsolete_torch_version(TORCH_VERSION, (1, 4)):
46
+ out_shape = [x.shape[0], self.out_channels]
47
+ for i, k, p, s, d in zip(x.shape[-2:], self.kernel_size,
48
+ self.padding, self.stride, self.dilation):
49
+ o = (i + 2 * p - (d * (k - 1) + 1)) // s + 1
50
+ out_shape.append(o)
51
+ empty = NewEmptyTensorOp.apply(x, out_shape)
52
+ if self.training:
53
+ # produce dummy gradient to avoid DDP warning.
54
+ dummy = sum(x.view(-1)[0] for x in self.parameters()) * 0.0
55
+ return empty + dummy
56
+ else:
57
+ return empty
58
+
59
+ return super().forward(x)
60
+
61
+
62
+ @CONV_LAYERS.register_module('Conv3d', force=True)
63
+ class Conv3d(nn.Conv3d):
64
+
65
+ def forward(self, x):
66
+ if x.numel() == 0 and obsolete_torch_version(TORCH_VERSION, (1, 4)):
67
+ out_shape = [x.shape[0], self.out_channels]
68
+ for i, k, p, s, d in zip(x.shape[-3:], self.kernel_size,
69
+ self.padding, self.stride, self.dilation):
70
+ o = (i + 2 * p - (d * (k - 1) + 1)) // s + 1
71
+ out_shape.append(o)
72
+ empty = NewEmptyTensorOp.apply(x, out_shape)
73
+ if self.training:
74
+ # produce dummy gradient to avoid DDP warning.
75
+ dummy = sum(x.view(-1)[0] for x in self.parameters()) * 0.0
76
+ return empty + dummy
77
+ else:
78
+ return empty
79
+
80
+ return super().forward(x)
81
+
82
+
83
+ @CONV_LAYERS.register_module()
84
+ @CONV_LAYERS.register_module('deconv')
85
+ @UPSAMPLE_LAYERS.register_module('deconv', force=True)
86
+ class ConvTranspose2d(nn.ConvTranspose2d):
87
+
88
+ def forward(self, x):
89
+ if x.numel() == 0 and obsolete_torch_version(TORCH_VERSION, (1, 4)):
90
+ out_shape = [x.shape[0], self.out_channels]
91
+ for i, k, p, s, d, op in zip(x.shape[-2:], self.kernel_size,
92
+ self.padding, self.stride,
93
+ self.dilation, self.output_padding):
94
+ out_shape.append((i - 1) * s - 2 * p + (d * (k - 1) + 1) + op)
95
+ empty = NewEmptyTensorOp.apply(x, out_shape)
96
+ if self.training:
97
+ # produce dummy gradient to avoid DDP warning.
98
+ dummy = sum(x.view(-1)[0] for x in self.parameters()) * 0.0
99
+ return empty + dummy
100
+ else:
101
+ return empty
102
+
103
+ return super().forward(x)
104
+
105
+
106
+ @CONV_LAYERS.register_module()
107
+ @CONV_LAYERS.register_module('deconv3d')
108
+ @UPSAMPLE_LAYERS.register_module('deconv3d', force=True)
109
+ class ConvTranspose3d(nn.ConvTranspose3d):
110
+
111
+ def forward(self, x):
112
+ if x.numel() == 0 and obsolete_torch_version(TORCH_VERSION, (1, 4)):
113
+ out_shape = [x.shape[0], self.out_channels]
114
+ for i, k, p, s, d, op in zip(x.shape[-3:], self.kernel_size,
115
+ self.padding, self.stride,
116
+ self.dilation, self.output_padding):
117
+ out_shape.append((i - 1) * s - 2 * p + (d * (k - 1) + 1) + op)
118
+ empty = NewEmptyTensorOp.apply(x, out_shape)
119
+ if self.training:
120
+ # produce dummy gradient to avoid DDP warning.
121
+ dummy = sum(x.view(-1)[0] for x in self.parameters()) * 0.0
122
+ return empty + dummy
123
+ else:
124
+ return empty
125
+
126
+ return super().forward(x)
127
+
128
+
129
+ class MaxPool2d(nn.MaxPool2d):
130
+
131
+ def forward(self, x):
132
+ # PyTorch 1.9 does not support empty tensor inference yet
133
+ if x.numel() == 0 and obsolete_torch_version(TORCH_VERSION, (1, 9)):
134
+ out_shape = list(x.shape[:2])
135
+ for i, k, p, s, d in zip(x.shape[-2:], _pair(self.kernel_size),
136
+ _pair(self.padding), _pair(self.stride),
137
+ _pair(self.dilation)):
138
+ o = (i + 2 * p - (d * (k - 1) + 1)) / s + 1
139
+ o = math.ceil(o) if self.ceil_mode else math.floor(o)
140
+ out_shape.append(o)
141
+ empty = NewEmptyTensorOp.apply(x, out_shape)
142
+ return empty
143
+
144
+ return super().forward(x)
145
+
146
+
147
+ class MaxPool3d(nn.MaxPool3d):
148
+
149
+ def forward(self, x):
150
+ # PyTorch 1.9 does not support empty tensor inference yet
151
+ if x.numel() == 0 and obsolete_torch_version(TORCH_VERSION, (1, 9)):
152
+ out_shape = list(x.shape[:2])
153
+ for i, k, p, s, d in zip(x.shape[-3:], _triple(self.kernel_size),
154
+ _triple(self.padding),
155
+ _triple(self.stride),
156
+ _triple(self.dilation)):
157
+ o = (i + 2 * p - (d * (k - 1) + 1)) / s + 1
158
+ o = math.ceil(o) if self.ceil_mode else math.floor(o)
159
+ out_shape.append(o)
160
+ empty = NewEmptyTensorOp.apply(x, out_shape)
161
+ return empty
162
+
163
+ return super().forward(x)
164
+
165
+
166
+ class Linear(torch.nn.Linear):
167
+
168
+ def forward(self, x):
169
+ # empty tensor forward of Linear layer is supported in Pytorch 1.6
170
+ if x.numel() == 0 and obsolete_torch_version(TORCH_VERSION, (1, 5)):
171
+ out_shape = [x.shape[0], self.out_features]
172
+ empty = NewEmptyTensorOp.apply(x, out_shape)
173
+ if self.training:
174
+ # produce dummy gradient to avoid DDP warning.
175
+ dummy = sum(x.view(-1)[0] for x in self.parameters()) * 0.0
176
+ return empty + dummy
177
+ else:
178
+ return empty
179
+
180
+ return super().forward(x)
CCEdit-main/src/controlnet11/annotator/uniformer/mmcv/cnn/utils/__init__.py ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) OpenMMLab. All rights reserved.
2
+ from .flops_counter import get_model_complexity_info
3
+ from .fuse_conv_bn import fuse_conv_bn
4
+ from .sync_bn import revert_sync_batchnorm
5
+ from .weight_init import (INITIALIZERS, Caffe2XavierInit, ConstantInit,
6
+ KaimingInit, NormalInit, PretrainedInit,
7
+ TruncNormalInit, UniformInit, XavierInit,
8
+ bias_init_with_prob, caffe2_xavier_init,
9
+ constant_init, initialize, kaiming_init, normal_init,
10
+ trunc_normal_init, uniform_init, xavier_init)
11
+
12
+ __all__ = [
13
+ 'get_model_complexity_info', 'bias_init_with_prob', 'caffe2_xavier_init',
14
+ 'constant_init', 'kaiming_init', 'normal_init', 'trunc_normal_init',
15
+ 'uniform_init', 'xavier_init', 'fuse_conv_bn', 'initialize',
16
+ 'INITIALIZERS', 'ConstantInit', 'XavierInit', 'NormalInit',
17
+ 'TruncNormalInit', 'UniformInit', 'KaimingInit', 'PretrainedInit',
18
+ 'Caffe2XavierInit', 'revert_sync_batchnorm'
19
+ ]
CCEdit-main/src/controlnet11/annotator/uniformer/mmcv/cnn/utils/flops_counter.py ADDED
@@ -0,0 +1,599 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Modified from flops-counter.pytorch by Vladislav Sovrasov
2
+ # original repo: https://github.com/sovrasov/flops-counter.pytorch
3
+
4
+ # MIT License
5
+
6
+ # Copyright (c) 2018 Vladislav Sovrasov
7
+
8
+ # Permission is hereby granted, free of charge, to any person obtaining a copy
9
+ # of this software and associated documentation files (the "Software"), to deal
10
+ # in the Software without restriction, including without limitation the rights
11
+ # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12
+ # copies of the Software, and to permit persons to whom the Software is
13
+ # furnished to do so, subject to the following conditions:
14
+
15
+ # The above copyright notice and this permission notice shall be included in
16
+ # all copies or substantial portions of the Software.
17
+
18
+ # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19
+ # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20
+ # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
21
+ # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22
+ # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23
+ # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24
+ # SOFTWARE.
25
+
26
+ import sys
27
+ from functools import partial
28
+
29
+ import numpy as np
30
+ import torch
31
+ import torch.nn as nn
32
+
33
+ import annotator.uniformer.mmcv as mmcv
34
+
35
+
36
+ def get_model_complexity_info(model,
37
+ input_shape,
38
+ print_per_layer_stat=True,
39
+ as_strings=True,
40
+ input_constructor=None,
41
+ flush=False,
42
+ ost=sys.stdout):
43
+ """Get complexity information of a model.
44
+
45
+ This method can calculate FLOPs and parameter counts of a model with
46
+ corresponding input shape. It can also print complexity information for
47
+ each layer in a model.
48
+
49
+ Supported layers are listed as below:
50
+ - Convolutions: ``nn.Conv1d``, ``nn.Conv2d``, ``nn.Conv3d``.
51
+ - Activations: ``nn.ReLU``, ``nn.PReLU``, ``nn.ELU``, ``nn.LeakyReLU``,
52
+ ``nn.ReLU6``.
53
+ - Poolings: ``nn.MaxPool1d``, ``nn.MaxPool2d``, ``nn.MaxPool3d``,
54
+ ``nn.AvgPool1d``, ``nn.AvgPool2d``, ``nn.AvgPool3d``,
55
+ ``nn.AdaptiveMaxPool1d``, ``nn.AdaptiveMaxPool2d``,
56
+ ``nn.AdaptiveMaxPool3d``, ``nn.AdaptiveAvgPool1d``,
57
+ ``nn.AdaptiveAvgPool2d``, ``nn.AdaptiveAvgPool3d``.
58
+ - BatchNorms: ``nn.BatchNorm1d``, ``nn.BatchNorm2d``,
59
+ ``nn.BatchNorm3d``, ``nn.GroupNorm``, ``nn.InstanceNorm1d``,
60
+ ``InstanceNorm2d``, ``InstanceNorm3d``, ``nn.LayerNorm``.
61
+ - Linear: ``nn.Linear``.
62
+ - Deconvolution: ``nn.ConvTranspose2d``.
63
+ - Upsample: ``nn.Upsample``.
64
+
65
+ Args:
66
+ model (nn.Module): The model for complexity calculation.
67
+ input_shape (tuple): Input shape used for calculation.
68
+ print_per_layer_stat (bool): Whether to print complexity information
69
+ for each layer in a model. Default: True.
70
+ as_strings (bool): Output FLOPs and params counts in a string form.
71
+ Default: True.
72
+ input_constructor (None | callable): If specified, it takes a callable
73
+ method that generates input. otherwise, it will generate a random
74
+ tensor with input shape to calculate FLOPs. Default: None.
75
+ flush (bool): same as that in :func:`print`. Default: False.
76
+ ost (stream): same as ``file`` param in :func:`print`.
77
+ Default: sys.stdout.
78
+
79
+ Returns:
80
+ tuple[float | str]: If ``as_strings`` is set to True, it will return
81
+ FLOPs and parameter counts in a string format. otherwise, it will
82
+ return those in a float number format.
83
+ """
84
+ assert type(input_shape) is tuple
85
+ assert len(input_shape) >= 1
86
+ assert isinstance(model, nn.Module)
87
+ flops_model = add_flops_counting_methods(model)
88
+ flops_model.eval()
89
+ flops_model.start_flops_count()
90
+ if input_constructor:
91
+ input = input_constructor(input_shape)
92
+ _ = flops_model(**input)
93
+ else:
94
+ try:
95
+ batch = torch.ones(()).new_empty(
96
+ (1, *input_shape),
97
+ dtype=next(flops_model.parameters()).dtype,
98
+ device=next(flops_model.parameters()).device)
99
+ except StopIteration:
100
+ # Avoid StopIteration for models which have no parameters,
101
+ # like `nn.Relu()`, `nn.AvgPool2d`, etc.
102
+ batch = torch.ones(()).new_empty((1, *input_shape))
103
+
104
+ _ = flops_model(batch)
105
+
106
+ flops_count, params_count = flops_model.compute_average_flops_cost()
107
+ if print_per_layer_stat:
108
+ print_model_with_flops(
109
+ flops_model, flops_count, params_count, ost=ost, flush=flush)
110
+ flops_model.stop_flops_count()
111
+
112
+ if as_strings:
113
+ return flops_to_string(flops_count), params_to_string(params_count)
114
+
115
+ return flops_count, params_count
116
+
117
+
118
+ def flops_to_string(flops, units='GFLOPs', precision=2):
119
+ """Convert FLOPs number into a string.
120
+
121
+ Note that Here we take a multiply-add counts as one FLOP.
122
+
123
+ Args:
124
+ flops (float): FLOPs number to be converted.
125
+ units (str | None): Converted FLOPs units. Options are None, 'GFLOPs',
126
+ 'MFLOPs', 'KFLOPs', 'FLOPs'. If set to None, it will automatically
127
+ choose the most suitable unit for FLOPs. Default: 'GFLOPs'.
128
+ precision (int): Digit number after the decimal point. Default: 2.
129
+
130
+ Returns:
131
+ str: The converted FLOPs number with units.
132
+
133
+ Examples:
134
+ >>> flops_to_string(1e9)
135
+ '1.0 GFLOPs'
136
+ >>> flops_to_string(2e5, 'MFLOPs')
137
+ '0.2 MFLOPs'
138
+ >>> flops_to_string(3e-9, None)
139
+ '3e-09 FLOPs'
140
+ """
141
+ if units is None:
142
+ if flops // 10**9 > 0:
143
+ return str(round(flops / 10.**9, precision)) + ' GFLOPs'
144
+ elif flops // 10**6 > 0:
145
+ return str(round(flops / 10.**6, precision)) + ' MFLOPs'
146
+ elif flops // 10**3 > 0:
147
+ return str(round(flops / 10.**3, precision)) + ' KFLOPs'
148
+ else:
149
+ return str(flops) + ' FLOPs'
150
+ else:
151
+ if units == 'GFLOPs':
152
+ return str(round(flops / 10.**9, precision)) + ' ' + units
153
+ elif units == 'MFLOPs':
154
+ return str(round(flops / 10.**6, precision)) + ' ' + units
155
+ elif units == 'KFLOPs':
156
+ return str(round(flops / 10.**3, precision)) + ' ' + units
157
+ else:
158
+ return str(flops) + ' FLOPs'
159
+
160
+
161
+ def params_to_string(num_params, units=None, precision=2):
162
+ """Convert parameter number into a string.
163
+
164
+ Args:
165
+ num_params (float): Parameter number to be converted.
166
+ units (str | None): Converted FLOPs units. Options are None, 'M',
167
+ 'K' and ''. If set to None, it will automatically choose the most
168
+ suitable unit for Parameter number. Default: None.
169
+ precision (int): Digit number after the decimal point. Default: 2.
170
+
171
+ Returns:
172
+ str: The converted parameter number with units.
173
+
174
+ Examples:
175
+ >>> params_to_string(1e9)
176
+ '1000.0 M'
177
+ >>> params_to_string(2e5)
178
+ '200.0 k'
179
+ >>> params_to_string(3e-9)
180
+ '3e-09'
181
+ """
182
+ if units is None:
183
+ if num_params // 10**6 > 0:
184
+ return str(round(num_params / 10**6, precision)) + ' M'
185
+ elif num_params // 10**3:
186
+ return str(round(num_params / 10**3, precision)) + ' k'
187
+ else:
188
+ return str(num_params)
189
+ else:
190
+ if units == 'M':
191
+ return str(round(num_params / 10.**6, precision)) + ' ' + units
192
+ elif units == 'K':
193
+ return str(round(num_params / 10.**3, precision)) + ' ' + units
194
+ else:
195
+ return str(num_params)
196
+
197
+
198
+ def print_model_with_flops(model,
199
+ total_flops,
200
+ total_params,
201
+ units='GFLOPs',
202
+ precision=3,
203
+ ost=sys.stdout,
204
+ flush=False):
205
+ """Print a model with FLOPs for each layer.
206
+
207
+ Args:
208
+ model (nn.Module): The model to be printed.
209
+ total_flops (float): Total FLOPs of the model.
210
+ total_params (float): Total parameter counts of the model.
211
+ units (str | None): Converted FLOPs units. Default: 'GFLOPs'.
212
+ precision (int): Digit number after the decimal point. Default: 3.
213
+ ost (stream): same as `file` param in :func:`print`.
214
+ Default: sys.stdout.
215
+ flush (bool): same as that in :func:`print`. Default: False.
216
+
217
+ Example:
218
+ >>> class ExampleModel(nn.Module):
219
+
220
+ >>> def __init__(self):
221
+ >>> super().__init__()
222
+ >>> self.conv1 = nn.Conv2d(3, 8, 3)
223
+ >>> self.conv2 = nn.Conv2d(8, 256, 3)
224
+ >>> self.conv3 = nn.Conv2d(256, 8, 3)
225
+ >>> self.avg_pool = nn.AdaptiveAvgPool2d((1, 1))
226
+ >>> self.flatten = nn.Flatten()
227
+ >>> self.fc = nn.Linear(8, 1)
228
+
229
+ >>> def forward(self, x):
230
+ >>> x = self.conv1(x)
231
+ >>> x = self.conv2(x)
232
+ >>> x = self.conv3(x)
233
+ >>> x = self.avg_pool(x)
234
+ >>> x = self.flatten(x)
235
+ >>> x = self.fc(x)
236
+ >>> return x
237
+
238
+ >>> model = ExampleModel()
239
+ >>> x = (3, 16, 16)
240
+ to print the complexity information state for each layer, you can use
241
+ >>> get_model_complexity_info(model, x)
242
+ or directly use
243
+ >>> print_model_with_flops(model, 4579784.0, 37361)
244
+ ExampleModel(
245
+ 0.037 M, 100.000% Params, 0.005 GFLOPs, 100.000% FLOPs,
246
+ (conv1): Conv2d(0.0 M, 0.600% Params, 0.0 GFLOPs, 0.959% FLOPs, 3, 8, kernel_size=(3, 3), stride=(1, 1)) # noqa: E501
247
+ (conv2): Conv2d(0.019 M, 50.020% Params, 0.003 GFLOPs, 58.760% FLOPs, 8, 256, kernel_size=(3, 3), stride=(1, 1))
248
+ (conv3): Conv2d(0.018 M, 49.356% Params, 0.002 GFLOPs, 40.264% FLOPs, 256, 8, kernel_size=(3, 3), stride=(1, 1))
249
+ (avg_pool): AdaptiveAvgPool2d(0.0 M, 0.000% Params, 0.0 GFLOPs, 0.017% FLOPs, output_size=(1, 1))
250
+ (flatten): Flatten(0.0 M, 0.000% Params, 0.0 GFLOPs, 0.000% FLOPs, )
251
+ (fc): Linear(0.0 M, 0.024% Params, 0.0 GFLOPs, 0.000% FLOPs, in_features=8, out_features=1, bias=True)
252
+ )
253
+ """
254
+
255
+ def accumulate_params(self):
256
+ if is_supported_instance(self):
257
+ return self.__params__
258
+ else:
259
+ sum = 0
260
+ for m in self.children():
261
+ sum += m.accumulate_params()
262
+ return sum
263
+
264
+ def accumulate_flops(self):
265
+ if is_supported_instance(self):
266
+ return self.__flops__ / model.__batch_counter__
267
+ else:
268
+ sum = 0
269
+ for m in self.children():
270
+ sum += m.accumulate_flops()
271
+ return sum
272
+
273
+ def flops_repr(self):
274
+ accumulated_num_params = self.accumulate_params()
275
+ accumulated_flops_cost = self.accumulate_flops()
276
+ return ', '.join([
277
+ params_to_string(
278
+ accumulated_num_params, units='M', precision=precision),
279
+ '{:.3%} Params'.format(accumulated_num_params / total_params),
280
+ flops_to_string(
281
+ accumulated_flops_cost, units=units, precision=precision),
282
+ '{:.3%} FLOPs'.format(accumulated_flops_cost / total_flops),
283
+ self.original_extra_repr()
284
+ ])
285
+
286
+ def add_extra_repr(m):
287
+ m.accumulate_flops = accumulate_flops.__get__(m)
288
+ m.accumulate_params = accumulate_params.__get__(m)
289
+ flops_extra_repr = flops_repr.__get__(m)
290
+ if m.extra_repr != flops_extra_repr:
291
+ m.original_extra_repr = m.extra_repr
292
+ m.extra_repr = flops_extra_repr
293
+ assert m.extra_repr != m.original_extra_repr
294
+
295
+ def del_extra_repr(m):
296
+ if hasattr(m, 'original_extra_repr'):
297
+ m.extra_repr = m.original_extra_repr
298
+ del m.original_extra_repr
299
+ if hasattr(m, 'accumulate_flops'):
300
+ del m.accumulate_flops
301
+
302
+ model.apply(add_extra_repr)
303
+ print(model, file=ost, flush=flush)
304
+ model.apply(del_extra_repr)
305
+
306
+
307
+ def get_model_parameters_number(model):
308
+ """Calculate parameter number of a model.
309
+
310
+ Args:
311
+ model (nn.module): The model for parameter number calculation.
312
+
313
+ Returns:
314
+ float: Parameter number of the model.
315
+ """
316
+ num_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
317
+ return num_params
318
+
319
+
320
+ def add_flops_counting_methods(net_main_module):
321
+ # adding additional methods to the existing module object,
322
+ # this is done this way so that each function has access to self object
323
+ net_main_module.start_flops_count = start_flops_count.__get__(
324
+ net_main_module)
325
+ net_main_module.stop_flops_count = stop_flops_count.__get__(
326
+ net_main_module)
327
+ net_main_module.reset_flops_count = reset_flops_count.__get__(
328
+ net_main_module)
329
+ net_main_module.compute_average_flops_cost = compute_average_flops_cost.__get__( # noqa: E501
330
+ net_main_module)
331
+
332
+ net_main_module.reset_flops_count()
333
+
334
+ return net_main_module
335
+
336
+
337
+ def compute_average_flops_cost(self):
338
+ """Compute average FLOPs cost.
339
+
340
+ A method to compute average FLOPs cost, which will be available after
341
+ `add_flops_counting_methods()` is called on a desired net object.
342
+
343
+ Returns:
344
+ float: Current mean flops consumption per image.
345
+ """
346
+ batches_count = self.__batch_counter__
347
+ flops_sum = 0
348
+ for module in self.modules():
349
+ if is_supported_instance(module):
350
+ flops_sum += module.__flops__
351
+ params_sum = get_model_parameters_number(self)
352
+ return flops_sum / batches_count, params_sum
353
+
354
+
355
+ def start_flops_count(self):
356
+ """Activate the computation of mean flops consumption per image.
357
+
358
+ A method to activate the computation of mean flops consumption per image.
359
+ which will be available after ``add_flops_counting_methods()`` is called on
360
+ a desired net object. It should be called before running the network.
361
+ """
362
+ add_batch_counter_hook_function(self)
363
+
364
+ def add_flops_counter_hook_function(module):
365
+ if is_supported_instance(module):
366
+ if hasattr(module, '__flops_handle__'):
367
+ return
368
+
369
+ else:
370
+ handle = module.register_forward_hook(
371
+ get_modules_mapping()[type(module)])
372
+
373
+ module.__flops_handle__ = handle
374
+
375
+ self.apply(partial(add_flops_counter_hook_function))
376
+
377
+
378
+ def stop_flops_count(self):
379
+ """Stop computing the mean flops consumption per image.
380
+
381
+ A method to stop computing the mean flops consumption per image, which will
382
+ be available after ``add_flops_counting_methods()`` is called on a desired
383
+ net object. It can be called to pause the computation whenever.
384
+ """
385
+ remove_batch_counter_hook_function(self)
386
+ self.apply(remove_flops_counter_hook_function)
387
+
388
+
389
+ def reset_flops_count(self):
390
+ """Reset statistics computed so far.
391
+
392
+ A method to Reset computed statistics, which will be available after
393
+ `add_flops_counting_methods()` is called on a desired net object.
394
+ """
395
+ add_batch_counter_variables_or_reset(self)
396
+ self.apply(add_flops_counter_variable_or_reset)
397
+
398
+
399
+ # ---- Internal functions
400
+ def empty_flops_counter_hook(module, input, output):
401
+ module.__flops__ += 0
402
+
403
+
404
+ def upsample_flops_counter_hook(module, input, output):
405
+ output_size = output[0]
406
+ batch_size = output_size.shape[0]
407
+ output_elements_count = batch_size
408
+ for val in output_size.shape[1:]:
409
+ output_elements_count *= val
410
+ module.__flops__ += int(output_elements_count)
411
+
412
+
413
+ def relu_flops_counter_hook(module, input, output):
414
+ active_elements_count = output.numel()
415
+ module.__flops__ += int(active_elements_count)
416
+
417
+
418
+ def linear_flops_counter_hook(module, input, output):
419
+ input = input[0]
420
+ output_last_dim = output.shape[
421
+ -1] # pytorch checks dimensions, so here we don't care much
422
+ module.__flops__ += int(np.prod(input.shape) * output_last_dim)
423
+
424
+
425
+ def pool_flops_counter_hook(module, input, output):
426
+ input = input[0]
427
+ module.__flops__ += int(np.prod(input.shape))
428
+
429
+
430
+ def norm_flops_counter_hook(module, input, output):
431
+ input = input[0]
432
+
433
+ batch_flops = np.prod(input.shape)
434
+ if (getattr(module, 'affine', False)
435
+ or getattr(module, 'elementwise_affine', False)):
436
+ batch_flops *= 2
437
+ module.__flops__ += int(batch_flops)
438
+
439
+
440
+ def deconv_flops_counter_hook(conv_module, input, output):
441
+ # Can have multiple inputs, getting the first one
442
+ input = input[0]
443
+
444
+ batch_size = input.shape[0]
445
+ input_height, input_width = input.shape[2:]
446
+
447
+ kernel_height, kernel_width = conv_module.kernel_size
448
+ in_channels = conv_module.in_channels
449
+ out_channels = conv_module.out_channels
450
+ groups = conv_module.groups
451
+
452
+ filters_per_channel = out_channels // groups
453
+ conv_per_position_flops = (
454
+ kernel_height * kernel_width * in_channels * filters_per_channel)
455
+
456
+ active_elements_count = batch_size * input_height * input_width
457
+ overall_conv_flops = conv_per_position_flops * active_elements_count
458
+ bias_flops = 0
459
+ if conv_module.bias is not None:
460
+ output_height, output_width = output.shape[2:]
461
+ bias_flops = out_channels * batch_size * output_height * output_height
462
+ overall_flops = overall_conv_flops + bias_flops
463
+
464
+ conv_module.__flops__ += int(overall_flops)
465
+
466
+
467
+ def conv_flops_counter_hook(conv_module, input, output):
468
+ # Can have multiple inputs, getting the first one
469
+ input = input[0]
470
+
471
+ batch_size = input.shape[0]
472
+ output_dims = list(output.shape[2:])
473
+
474
+ kernel_dims = list(conv_module.kernel_size)
475
+ in_channels = conv_module.in_channels
476
+ out_channels = conv_module.out_channels
477
+ groups = conv_module.groups
478
+
479
+ filters_per_channel = out_channels // groups
480
+ conv_per_position_flops = int(
481
+ np.prod(kernel_dims)) * in_channels * filters_per_channel
482
+
483
+ active_elements_count = batch_size * int(np.prod(output_dims))
484
+
485
+ overall_conv_flops = conv_per_position_flops * active_elements_count
486
+
487
+ bias_flops = 0
488
+
489
+ if conv_module.bias is not None:
490
+
491
+ bias_flops = out_channels * active_elements_count
492
+
493
+ overall_flops = overall_conv_flops + bias_flops
494
+
495
+ conv_module.__flops__ += int(overall_flops)
496
+
497
+
498
+ def batch_counter_hook(module, input, output):
499
+ batch_size = 1
500
+ if len(input) > 0:
501
+ # Can have multiple inputs, getting the first one
502
+ input = input[0]
503
+ batch_size = len(input)
504
+ else:
505
+ pass
506
+ print('Warning! No positional inputs found for a module, '
507
+ 'assuming batch size is 1.')
508
+ module.__batch_counter__ += batch_size
509
+
510
+
511
+ def add_batch_counter_variables_or_reset(module):
512
+
513
+ module.__batch_counter__ = 0
514
+
515
+
516
+ def add_batch_counter_hook_function(module):
517
+ if hasattr(module, '__batch_counter_handle__'):
518
+ return
519
+
520
+ handle = module.register_forward_hook(batch_counter_hook)
521
+ module.__batch_counter_handle__ = handle
522
+
523
+
524
+ def remove_batch_counter_hook_function(module):
525
+ if hasattr(module, '__batch_counter_handle__'):
526
+ module.__batch_counter_handle__.remove()
527
+ del module.__batch_counter_handle__
528
+
529
+
530
+ def add_flops_counter_variable_or_reset(module):
531
+ if is_supported_instance(module):
532
+ if hasattr(module, '__flops__') or hasattr(module, '__params__'):
533
+ print('Warning: variables __flops__ or __params__ are already '
534
+ 'defined for the module' + type(module).__name__ +
535
+ ' ptflops can affect your code!')
536
+ module.__flops__ = 0
537
+ module.__params__ = get_model_parameters_number(module)
538
+
539
+
540
+ def is_supported_instance(module):
541
+ if type(module) in get_modules_mapping():
542
+ return True
543
+ return False
544
+
545
+
546
+ def remove_flops_counter_hook_function(module):
547
+ if is_supported_instance(module):
548
+ if hasattr(module, '__flops_handle__'):
549
+ module.__flops_handle__.remove()
550
+ del module.__flops_handle__
551
+
552
+
553
+ def get_modules_mapping():
554
+ return {
555
+ # convolutions
556
+ nn.Conv1d: conv_flops_counter_hook,
557
+ nn.Conv2d: conv_flops_counter_hook,
558
+ mmcv.cnn.bricks.Conv2d: conv_flops_counter_hook,
559
+ nn.Conv3d: conv_flops_counter_hook,
560
+ mmcv.cnn.bricks.Conv3d: conv_flops_counter_hook,
561
+ # activations
562
+ nn.ReLU: relu_flops_counter_hook,
563
+ nn.PReLU: relu_flops_counter_hook,
564
+ nn.ELU: relu_flops_counter_hook,
565
+ nn.LeakyReLU: relu_flops_counter_hook,
566
+ nn.ReLU6: relu_flops_counter_hook,
567
+ # poolings
568
+ nn.MaxPool1d: pool_flops_counter_hook,
569
+ nn.AvgPool1d: pool_flops_counter_hook,
570
+ nn.AvgPool2d: pool_flops_counter_hook,
571
+ nn.MaxPool2d: pool_flops_counter_hook,
572
+ mmcv.cnn.bricks.MaxPool2d: pool_flops_counter_hook,
573
+ nn.MaxPool3d: pool_flops_counter_hook,
574
+ mmcv.cnn.bricks.MaxPool3d: pool_flops_counter_hook,
575
+ nn.AvgPool3d: pool_flops_counter_hook,
576
+ nn.AdaptiveMaxPool1d: pool_flops_counter_hook,
577
+ nn.AdaptiveAvgPool1d: pool_flops_counter_hook,
578
+ nn.AdaptiveMaxPool2d: pool_flops_counter_hook,
579
+ nn.AdaptiveAvgPool2d: pool_flops_counter_hook,
580
+ nn.AdaptiveMaxPool3d: pool_flops_counter_hook,
581
+ nn.AdaptiveAvgPool3d: pool_flops_counter_hook,
582
+ # normalizations
583
+ nn.BatchNorm1d: norm_flops_counter_hook,
584
+ nn.BatchNorm2d: norm_flops_counter_hook,
585
+ nn.BatchNorm3d: norm_flops_counter_hook,
586
+ nn.GroupNorm: norm_flops_counter_hook,
587
+ nn.InstanceNorm1d: norm_flops_counter_hook,
588
+ nn.InstanceNorm2d: norm_flops_counter_hook,
589
+ nn.InstanceNorm3d: norm_flops_counter_hook,
590
+ nn.LayerNorm: norm_flops_counter_hook,
591
+ # FC
592
+ nn.Linear: linear_flops_counter_hook,
593
+ mmcv.cnn.bricks.Linear: linear_flops_counter_hook,
594
+ # Upscale
595
+ nn.Upsample: upsample_flops_counter_hook,
596
+ # Deconvolution
597
+ nn.ConvTranspose2d: deconv_flops_counter_hook,
598
+ mmcv.cnn.bricks.ConvTranspose2d: deconv_flops_counter_hook,
599
+ }
CCEdit-main/src/controlnet11/annotator/uniformer/mmcv/cnn/utils/fuse_conv_bn.py ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) OpenMMLab. All rights reserved.
2
+ import torch
3
+ import torch.nn as nn
4
+
5
+
6
+ def _fuse_conv_bn(conv, bn):
7
+ """Fuse conv and bn into one module.
8
+
9
+ Args:
10
+ conv (nn.Module): Conv to be fused.
11
+ bn (nn.Module): BN to be fused.
12
+
13
+ Returns:
14
+ nn.Module: Fused module.
15
+ """
16
+ conv_w = conv.weight
17
+ conv_b = conv.bias if conv.bias is not None else torch.zeros_like(
18
+ bn.running_mean)
19
+
20
+ factor = bn.weight / torch.sqrt(bn.running_var + bn.eps)
21
+ conv.weight = nn.Parameter(conv_w *
22
+ factor.reshape([conv.out_channels, 1, 1, 1]))
23
+ conv.bias = nn.Parameter((conv_b - bn.running_mean) * factor + bn.bias)
24
+ return conv
25
+
26
+
27
+ def fuse_conv_bn(module):
28
+ """Recursively fuse conv and bn in a module.
29
+
30
+ During inference, the functionary of batch norm layers is turned off
31
+ but only the mean and var alone channels are used, which exposes the
32
+ chance to fuse it with the preceding conv layers to save computations and
33
+ simplify network structures.
34
+
35
+ Args:
36
+ module (nn.Module): Module to be fused.
37
+
38
+ Returns:
39
+ nn.Module: Fused module.
40
+ """
41
+ last_conv = None
42
+ last_conv_name = None
43
+
44
+ for name, child in module.named_children():
45
+ if isinstance(child,
46
+ (nn.modules.batchnorm._BatchNorm, nn.SyncBatchNorm)):
47
+ if last_conv is None: # only fuse BN that is after Conv
48
+ continue
49
+ fused_conv = _fuse_conv_bn(last_conv, child)
50
+ module._modules[last_conv_name] = fused_conv
51
+ # To reduce changes, set BN as Identity instead of deleting it.
52
+ module._modules[name] = nn.Identity()
53
+ last_conv = None
54
+ elif isinstance(child, nn.Conv2d):
55
+ last_conv = child
56
+ last_conv_name = name
57
+ else:
58
+ fuse_conv_bn(child)
59
+ return module
CCEdit-main/src/controlnet11/annotator/uniformer/mmcv/cnn/utils/sync_bn.py ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+ import annotator.uniformer.mmcv as mmcv
4
+
5
+
6
+ class _BatchNormXd(torch.nn.modules.batchnorm._BatchNorm):
7
+ """A general BatchNorm layer without input dimension check.
8
+
9
+ Reproduced from @kapily's work:
10
+ (https://github.com/pytorch/pytorch/issues/41081#issuecomment-783961547)
11
+ The only difference between BatchNorm1d, BatchNorm2d, BatchNorm3d, etc
12
+ is `_check_input_dim` that is designed for tensor sanity checks.
13
+ The check has been bypassed in this class for the convenience of converting
14
+ SyncBatchNorm.
15
+ """
16
+
17
+ def _check_input_dim(self, input):
18
+ return
19
+
20
+
21
+ def revert_sync_batchnorm(module):
22
+ """Helper function to convert all `SyncBatchNorm` (SyncBN) and
23
+ `mmcv.ops.sync_bn.SyncBatchNorm`(MMSyncBN) layers in the model to
24
+ `BatchNormXd` layers.
25
+
26
+ Adapted from @kapily's work:
27
+ (https://github.com/pytorch/pytorch/issues/41081#issuecomment-783961547)
28
+
29
+ Args:
30
+ module (nn.Module): The module containing `SyncBatchNorm` layers.
31
+
32
+ Returns:
33
+ module_output: The converted module with `BatchNormXd` layers.
34
+ """
35
+ module_output = module
36
+ module_checklist = [torch.nn.modules.batchnorm.SyncBatchNorm]
37
+ if hasattr(mmcv, 'ops'):
38
+ module_checklist.append(mmcv.ops.SyncBatchNorm)
39
+ if isinstance(module, tuple(module_checklist)):
40
+ module_output = _BatchNormXd(module.num_features, module.eps,
41
+ module.momentum, module.affine,
42
+ module.track_running_stats)
43
+ if module.affine:
44
+ # no_grad() may not be needed here but
45
+ # just to be consistent with `convert_sync_batchnorm()`
46
+ with torch.no_grad():
47
+ module_output.weight = module.weight
48
+ module_output.bias = module.bias
49
+ module_output.running_mean = module.running_mean
50
+ module_output.running_var = module.running_var
51
+ module_output.num_batches_tracked = module.num_batches_tracked
52
+ module_output.training = module.training
53
+ # qconfig exists in quantized models
54
+ if hasattr(module, 'qconfig'):
55
+ module_output.qconfig = module.qconfig
56
+ for name, child in module.named_children():
57
+ module_output.add_module(name, revert_sync_batchnorm(child))
58
+ del module
59
+ return module_output
CCEdit-main/src/controlnet11/annotator/uniformer/mmcv/cnn/utils/weight_init.py ADDED
@@ -0,0 +1,684 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) OpenMMLab. All rights reserved.
2
+ import copy
3
+ import math
4
+ import warnings
5
+
6
+ import numpy as np
7
+ import torch
8
+ import torch.nn as nn
9
+ from torch import Tensor
10
+
11
+ from annotator.uniformer.mmcv.utils import Registry, build_from_cfg, get_logger, print_log
12
+
13
+ INITIALIZERS = Registry('initializer')
14
+
15
+
16
+ def update_init_info(module, init_info):
17
+ """Update the `_params_init_info` in the module if the value of parameters
18
+ are changed.
19
+
20
+ Args:
21
+ module (obj:`nn.Module`): The module of PyTorch with a user-defined
22
+ attribute `_params_init_info` which records the initialization
23
+ information.
24
+ init_info (str): The string that describes the initialization.
25
+ """
26
+ assert hasattr(
27
+ module,
28
+ '_params_init_info'), f'Can not find `_params_init_info` in {module}'
29
+ for name, param in module.named_parameters():
30
+
31
+ assert param in module._params_init_info, (
32
+ f'Find a new :obj:`Parameter` '
33
+ f'named `{name}` during executing the '
34
+ f'`init_weights` of '
35
+ f'`{module.__class__.__name__}`. '
36
+ f'Please do not add or '
37
+ f'replace parameters during executing '
38
+ f'the `init_weights`. ')
39
+
40
+ # The parameter has been changed during executing the
41
+ # `init_weights` of module
42
+ mean_value = param.data.mean()
43
+ if module._params_init_info[param]['tmp_mean_value'] != mean_value:
44
+ module._params_init_info[param]['init_info'] = init_info
45
+ module._params_init_info[param]['tmp_mean_value'] = mean_value
46
+
47
+
48
+ def constant_init(module, val, bias=0):
49
+ if hasattr(module, 'weight') and module.weight is not None:
50
+ nn.init.constant_(module.weight, val)
51
+ if hasattr(module, 'bias') and module.bias is not None:
52
+ nn.init.constant_(module.bias, bias)
53
+
54
+
55
+ def xavier_init(module, gain=1, bias=0, distribution='normal'):
56
+ assert distribution in ['uniform', 'normal']
57
+ if hasattr(module, 'weight') and module.weight is not None:
58
+ if distribution == 'uniform':
59
+ nn.init.xavier_uniform_(module.weight, gain=gain)
60
+ else:
61
+ nn.init.xavier_normal_(module.weight, gain=gain)
62
+ if hasattr(module, 'bias') and module.bias is not None:
63
+ nn.init.constant_(module.bias, bias)
64
+
65
+
66
+ def normal_init(module, mean=0, std=1, bias=0):
67
+ if hasattr(module, 'weight') and module.weight is not None:
68
+ nn.init.normal_(module.weight, mean, std)
69
+ if hasattr(module, 'bias') and module.bias is not None:
70
+ nn.init.constant_(module.bias, bias)
71
+
72
+
73
+ def trunc_normal_init(module: nn.Module,
74
+ mean: float = 0,
75
+ std: float = 1,
76
+ a: float = -2,
77
+ b: float = 2,
78
+ bias: float = 0) -> None:
79
+ if hasattr(module, 'weight') and module.weight is not None:
80
+ trunc_normal_(module.weight, mean, std, a, b) # type: ignore
81
+ if hasattr(module, 'bias') and module.bias is not None:
82
+ nn.init.constant_(module.bias, bias) # type: ignore
83
+
84
+
85
+ def uniform_init(module, a=0, b=1, bias=0):
86
+ if hasattr(module, 'weight') and module.weight is not None:
87
+ nn.init.uniform_(module.weight, a, b)
88
+ if hasattr(module, 'bias') and module.bias is not None:
89
+ nn.init.constant_(module.bias, bias)
90
+
91
+
92
+ def kaiming_init(module,
93
+ a=0,
94
+ mode='fan_out',
95
+ nonlinearity='relu',
96
+ bias=0,
97
+ distribution='normal'):
98
+ assert distribution in ['uniform', 'normal']
99
+ if hasattr(module, 'weight') and module.weight is not None:
100
+ if distribution == 'uniform':
101
+ nn.init.kaiming_uniform_(
102
+ module.weight, a=a, mode=mode, nonlinearity=nonlinearity)
103
+ else:
104
+ nn.init.kaiming_normal_(
105
+ module.weight, a=a, mode=mode, nonlinearity=nonlinearity)
106
+ if hasattr(module, 'bias') and module.bias is not None:
107
+ nn.init.constant_(module.bias, bias)
108
+
109
+
110
+ def caffe2_xavier_init(module, bias=0):
111
+ # `XavierFill` in Caffe2 corresponds to `kaiming_uniform_` in PyTorch
112
+ # Acknowledgment to FAIR's internal code
113
+ kaiming_init(
114
+ module,
115
+ a=1,
116
+ mode='fan_in',
117
+ nonlinearity='leaky_relu',
118
+ bias=bias,
119
+ distribution='uniform')
120
+
121
+
122
+ def bias_init_with_prob(prior_prob):
123
+ """initialize conv/fc bias value according to a given probability value."""
124
+ bias_init = float(-np.log((1 - prior_prob) / prior_prob))
125
+ return bias_init
126
+
127
+
128
+ def _get_bases_name(m):
129
+ return [b.__name__ for b in m.__class__.__bases__]
130
+
131
+
132
+ class BaseInit(object):
133
+
134
+ def __init__(self, *, bias=0, bias_prob=None, layer=None):
135
+ self.wholemodule = False
136
+ if not isinstance(bias, (int, float)):
137
+ raise TypeError(f'bias must be a number, but got a {type(bias)}')
138
+
139
+ if bias_prob is not None:
140
+ if not isinstance(bias_prob, float):
141
+ raise TypeError(f'bias_prob type must be float, \
142
+ but got {type(bias_prob)}')
143
+
144
+ if layer is not None:
145
+ if not isinstance(layer, (str, list)):
146
+ raise TypeError(f'layer must be a str or a list of str, \
147
+ but got a {type(layer)}')
148
+ else:
149
+ layer = []
150
+
151
+ if bias_prob is not None:
152
+ self.bias = bias_init_with_prob(bias_prob)
153
+ else:
154
+ self.bias = bias
155
+ self.layer = [layer] if isinstance(layer, str) else layer
156
+
157
+ def _get_init_info(self):
158
+ info = f'{self.__class__.__name__}, bias={self.bias}'
159
+ return info
160
+
161
+
162
+ @INITIALIZERS.register_module(name='Constant')
163
+ class ConstantInit(BaseInit):
164
+ """Initialize module parameters with constant values.
165
+
166
+ Args:
167
+ val (int | float): the value to fill the weights in the module with
168
+ bias (int | float): the value to fill the bias. Defaults to 0.
169
+ bias_prob (float, optional): the probability for bias initialization.
170
+ Defaults to None.
171
+ layer (str | list[str], optional): the layer will be initialized.
172
+ Defaults to None.
173
+ """
174
+
175
+ def __init__(self, val, **kwargs):
176
+ super().__init__(**kwargs)
177
+ self.val = val
178
+
179
+ def __call__(self, module):
180
+
181
+ def init(m):
182
+ if self.wholemodule:
183
+ constant_init(m, self.val, self.bias)
184
+ else:
185
+ layername = m.__class__.__name__
186
+ basesname = _get_bases_name(m)
187
+ if len(set(self.layer) & set([layername] + basesname)):
188
+ constant_init(m, self.val, self.bias)
189
+
190
+ module.apply(init)
191
+ if hasattr(module, '_params_init_info'):
192
+ update_init_info(module, init_info=self._get_init_info())
193
+
194
+ def _get_init_info(self):
195
+ info = f'{self.__class__.__name__}: val={self.val}, bias={self.bias}'
196
+ return info
197
+
198
+
199
+ @INITIALIZERS.register_module(name='Xavier')
200
+ class XavierInit(BaseInit):
201
+ r"""Initialize module parameters with values according to the method
202
+ described in `Understanding the difficulty of training deep feedforward
203
+ neural networks - Glorot, X. & Bengio, Y. (2010).
204
+ <http://proceedings.mlr.press/v9/glorot10a/glorot10a.pdf>`_
205
+
206
+ Args:
207
+ gain (int | float): an optional scaling factor. Defaults to 1.
208
+ bias (int | float): the value to fill the bias. Defaults to 0.
209
+ bias_prob (float, optional): the probability for bias initialization.
210
+ Defaults to None.
211
+ distribution (str): distribution either be ``'normal'``
212
+ or ``'uniform'``. Defaults to ``'normal'``.
213
+ layer (str | list[str], optional): the layer will be initialized.
214
+ Defaults to None.
215
+ """
216
+
217
+ def __init__(self, gain=1, distribution='normal', **kwargs):
218
+ super().__init__(**kwargs)
219
+ self.gain = gain
220
+ self.distribution = distribution
221
+
222
+ def __call__(self, module):
223
+
224
+ def init(m):
225
+ if self.wholemodule:
226
+ xavier_init(m, self.gain, self.bias, self.distribution)
227
+ else:
228
+ layername = m.__class__.__name__
229
+ basesname = _get_bases_name(m)
230
+ if len(set(self.layer) & set([layername] + basesname)):
231
+ xavier_init(m, self.gain, self.bias, self.distribution)
232
+
233
+ module.apply(init)
234
+ if hasattr(module, '_params_init_info'):
235
+ update_init_info(module, init_info=self._get_init_info())
236
+
237
+ def _get_init_info(self):
238
+ info = f'{self.__class__.__name__}: gain={self.gain}, ' \
239
+ f'distribution={self.distribution}, bias={self.bias}'
240
+ return info
241
+
242
+
243
+ @INITIALIZERS.register_module(name='Normal')
244
+ class NormalInit(BaseInit):
245
+ r"""Initialize module parameters with the values drawn from the normal
246
+ distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)`.
247
+
248
+ Args:
249
+ mean (int | float):the mean of the normal distribution. Defaults to 0.
250
+ std (int | float): the standard deviation of the normal distribution.
251
+ Defaults to 1.
252
+ bias (int | float): the value to fill the bias. Defaults to 0.
253
+ bias_prob (float, optional): the probability for bias initialization.
254
+ Defaults to None.
255
+ layer (str | list[str], optional): the layer will be initialized.
256
+ Defaults to None.
257
+
258
+ """
259
+
260
+ def __init__(self, mean=0, std=1, **kwargs):
261
+ super().__init__(**kwargs)
262
+ self.mean = mean
263
+ self.std = std
264
+
265
+ def __call__(self, module):
266
+
267
+ def init(m):
268
+ if self.wholemodule:
269
+ normal_init(m, self.mean, self.std, self.bias)
270
+ else:
271
+ layername = m.__class__.__name__
272
+ basesname = _get_bases_name(m)
273
+ if len(set(self.layer) & set([layername] + basesname)):
274
+ normal_init(m, self.mean, self.std, self.bias)
275
+
276
+ module.apply(init)
277
+ if hasattr(module, '_params_init_info'):
278
+ update_init_info(module, init_info=self._get_init_info())
279
+
280
+ def _get_init_info(self):
281
+ info = f'{self.__class__.__name__}: mean={self.mean},' \
282
+ f' std={self.std}, bias={self.bias}'
283
+ return info
284
+
285
+
286
+ @INITIALIZERS.register_module(name='TruncNormal')
287
+ class TruncNormalInit(BaseInit):
288
+ r"""Initialize module parameters with the values drawn from the normal
289
+ distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)` with values
290
+ outside :math:`[a, b]`.
291
+
292
+ Args:
293
+ mean (float): the mean of the normal distribution. Defaults to 0.
294
+ std (float): the standard deviation of the normal distribution.
295
+ Defaults to 1.
296
+ a (float): The minimum cutoff value.
297
+ b ( float): The maximum cutoff value.
298
+ bias (float): the value to fill the bias. Defaults to 0.
299
+ bias_prob (float, optional): the probability for bias initialization.
300
+ Defaults to None.
301
+ layer (str | list[str], optional): the layer will be initialized.
302
+ Defaults to None.
303
+
304
+ """
305
+
306
+ def __init__(self,
307
+ mean: float = 0,
308
+ std: float = 1,
309
+ a: float = -2,
310
+ b: float = 2,
311
+ **kwargs) -> None:
312
+ super().__init__(**kwargs)
313
+ self.mean = mean
314
+ self.std = std
315
+ self.a = a
316
+ self.b = b
317
+
318
+ def __call__(self, module: nn.Module) -> None:
319
+
320
+ def init(m):
321
+ if self.wholemodule:
322
+ trunc_normal_init(m, self.mean, self.std, self.a, self.b,
323
+ self.bias)
324
+ else:
325
+ layername = m.__class__.__name__
326
+ basesname = _get_bases_name(m)
327
+ if len(set(self.layer) & set([layername] + basesname)):
328
+ trunc_normal_init(m, self.mean, self.std, self.a, self.b,
329
+ self.bias)
330
+
331
+ module.apply(init)
332
+ if hasattr(module, '_params_init_info'):
333
+ update_init_info(module, init_info=self._get_init_info())
334
+
335
+ def _get_init_info(self):
336
+ info = f'{self.__class__.__name__}: a={self.a}, b={self.b},' \
337
+ f' mean={self.mean}, std={self.std}, bias={self.bias}'
338
+ return info
339
+
340
+
341
+ @INITIALIZERS.register_module(name='Uniform')
342
+ class UniformInit(BaseInit):
343
+ r"""Initialize module parameters with values drawn from the uniform
344
+ distribution :math:`\mathcal{U}(a, b)`.
345
+
346
+ Args:
347
+ a (int | float): the lower bound of the uniform distribution.
348
+ Defaults to 0.
349
+ b (int | float): the upper bound of the uniform distribution.
350
+ Defaults to 1.
351
+ bias (int | float): the value to fill the bias. Defaults to 0.
352
+ bias_prob (float, optional): the probability for bias initialization.
353
+ Defaults to None.
354
+ layer (str | list[str], optional): the layer will be initialized.
355
+ Defaults to None.
356
+ """
357
+
358
+ def __init__(self, a=0, b=1, **kwargs):
359
+ super().__init__(**kwargs)
360
+ self.a = a
361
+ self.b = b
362
+
363
+ def __call__(self, module):
364
+
365
+ def init(m):
366
+ if self.wholemodule:
367
+ uniform_init(m, self.a, self.b, self.bias)
368
+ else:
369
+ layername = m.__class__.__name__
370
+ basesname = _get_bases_name(m)
371
+ if len(set(self.layer) & set([layername] + basesname)):
372
+ uniform_init(m, self.a, self.b, self.bias)
373
+
374
+ module.apply(init)
375
+ if hasattr(module, '_params_init_info'):
376
+ update_init_info(module, init_info=self._get_init_info())
377
+
378
+ def _get_init_info(self):
379
+ info = f'{self.__class__.__name__}: a={self.a},' \
380
+ f' b={self.b}, bias={self.bias}'
381
+ return info
382
+
383
+
384
+ @INITIALIZERS.register_module(name='Kaiming')
385
+ class KaimingInit(BaseInit):
386
+ r"""Initialize module parameters with the values according to the method
387
+ described in `Delving deep into rectifiers: Surpassing human-level
388
+ performance on ImageNet classification - He, K. et al. (2015).
389
+ <https://www.cv-foundation.org/openaccess/content_iccv_2015/
390
+ papers/He_Delving_Deep_into_ICCV_2015_paper.pdf>`_
391
+
392
+ Args:
393
+ a (int | float): the negative slope of the rectifier used after this
394
+ layer (only used with ``'leaky_relu'``). Defaults to 0.
395
+ mode (str): either ``'fan_in'`` or ``'fan_out'``. Choosing
396
+ ``'fan_in'`` preserves the magnitude of the variance of the weights
397
+ in the forward pass. Choosing ``'fan_out'`` preserves the
398
+ magnitudes in the backwards pass. Defaults to ``'fan_out'``.
399
+ nonlinearity (str): the non-linear function (`nn.functional` name),
400
+ recommended to use only with ``'relu'`` or ``'leaky_relu'`` .
401
+ Defaults to 'relu'.
402
+ bias (int | float): the value to fill the bias. Defaults to 0.
403
+ bias_prob (float, optional): the probability for bias initialization.
404
+ Defaults to None.
405
+ distribution (str): distribution either be ``'normal'`` or
406
+ ``'uniform'``. Defaults to ``'normal'``.
407
+ layer (str | list[str], optional): the layer will be initialized.
408
+ Defaults to None.
409
+ """
410
+
411
+ def __init__(self,
412
+ a=0,
413
+ mode='fan_out',
414
+ nonlinearity='relu',
415
+ distribution='normal',
416
+ **kwargs):
417
+ super().__init__(**kwargs)
418
+ self.a = a
419
+ self.mode = mode
420
+ self.nonlinearity = nonlinearity
421
+ self.distribution = distribution
422
+
423
+ def __call__(self, module):
424
+
425
+ def init(m):
426
+ if self.wholemodule:
427
+ kaiming_init(m, self.a, self.mode, self.nonlinearity,
428
+ self.bias, self.distribution)
429
+ else:
430
+ layername = m.__class__.__name__
431
+ basesname = _get_bases_name(m)
432
+ if len(set(self.layer) & set([layername] + basesname)):
433
+ kaiming_init(m, self.a, self.mode, self.nonlinearity,
434
+ self.bias, self.distribution)
435
+
436
+ module.apply(init)
437
+ if hasattr(module, '_params_init_info'):
438
+ update_init_info(module, init_info=self._get_init_info())
439
+
440
+ def _get_init_info(self):
441
+ info = f'{self.__class__.__name__}: a={self.a}, mode={self.mode}, ' \
442
+ f'nonlinearity={self.nonlinearity}, ' \
443
+ f'distribution ={self.distribution}, bias={self.bias}'
444
+ return info
445
+
446
+
447
+ @INITIALIZERS.register_module(name='Caffe2Xavier')
448
+ class Caffe2XavierInit(KaimingInit):
449
+ # `XavierFill` in Caffe2 corresponds to `kaiming_uniform_` in PyTorch
450
+ # Acknowledgment to FAIR's internal code
451
+ def __init__(self, **kwargs):
452
+ super().__init__(
453
+ a=1,
454
+ mode='fan_in',
455
+ nonlinearity='leaky_relu',
456
+ distribution='uniform',
457
+ **kwargs)
458
+
459
+ def __call__(self, module):
460
+ super().__call__(module)
461
+
462
+
463
+ @INITIALIZERS.register_module(name='Pretrained')
464
+ class PretrainedInit(object):
465
+ """Initialize module by loading a pretrained model.
466
+
467
+ Args:
468
+ checkpoint (str): the checkpoint file of the pretrained model should
469
+ be load.
470
+ prefix (str, optional): the prefix of a sub-module in the pretrained
471
+ model. it is for loading a part of the pretrained model to
472
+ initialize. For example, if we would like to only load the
473
+ backbone of a detector model, we can set ``prefix='backbone.'``.
474
+ Defaults to None.
475
+ map_location (str): map tensors into proper locations.
476
+ """
477
+
478
+ def __init__(self, checkpoint, prefix=None, map_location=None):
479
+ self.checkpoint = checkpoint
480
+ self.prefix = prefix
481
+ self.map_location = map_location
482
+
483
+ def __call__(self, module):
484
+ from annotator.uniformer.mmcv.runner import (_load_checkpoint_with_prefix, load_checkpoint,
485
+ load_state_dict)
486
+ logger = get_logger('mmcv')
487
+ if self.prefix is None:
488
+ print_log(f'load model from: {self.checkpoint}', logger=logger)
489
+ load_checkpoint(
490
+ module,
491
+ self.checkpoint,
492
+ map_location=self.map_location,
493
+ strict=False,
494
+ logger=logger)
495
+ else:
496
+ print_log(
497
+ f'load {self.prefix} in model from: {self.checkpoint}',
498
+ logger=logger)
499
+ state_dict = _load_checkpoint_with_prefix(
500
+ self.prefix, self.checkpoint, map_location=self.map_location)
501
+ load_state_dict(module, state_dict, strict=False, logger=logger)
502
+
503
+ if hasattr(module, '_params_init_info'):
504
+ update_init_info(module, init_info=self._get_init_info())
505
+
506
+ def _get_init_info(self):
507
+ info = f'{self.__class__.__name__}: load from {self.checkpoint}'
508
+ return info
509
+
510
+
511
+ def _initialize(module, cfg, wholemodule=False):
512
+ func = build_from_cfg(cfg, INITIALIZERS)
513
+ # wholemodule flag is for override mode, there is no layer key in override
514
+ # and initializer will give init values for the whole module with the name
515
+ # in override.
516
+ func.wholemodule = wholemodule
517
+ func(module)
518
+
519
+
520
+ def _initialize_override(module, override, cfg):
521
+ if not isinstance(override, (dict, list)):
522
+ raise TypeError(f'override must be a dict or a list of dict, \
523
+ but got {type(override)}')
524
+
525
+ override = [override] if isinstance(override, dict) else override
526
+
527
+ for override_ in override:
528
+
529
+ cp_override = copy.deepcopy(override_)
530
+ name = cp_override.pop('name', None)
531
+ if name is None:
532
+ raise ValueError('`override` must contain the key "name",'
533
+ f'but got {cp_override}')
534
+ # if override only has name key, it means use args in init_cfg
535
+ if not cp_override:
536
+ cp_override.update(cfg)
537
+ # if override has name key and other args except type key, it will
538
+ # raise error
539
+ elif 'type' not in cp_override.keys():
540
+ raise ValueError(
541
+ f'`override` need "type" key, but got {cp_override}')
542
+
543
+ if hasattr(module, name):
544
+ _initialize(getattr(module, name), cp_override, wholemodule=True)
545
+ else:
546
+ raise RuntimeError(f'module did not have attribute {name}, '
547
+ f'but init_cfg is {cp_override}.')
548
+
549
+
550
+ def initialize(module, init_cfg):
551
+ """Initialize a module.
552
+
553
+ Args:
554
+ module (``torch.nn.Module``): the module will be initialized.
555
+ init_cfg (dict | list[dict]): initialization configuration dict to
556
+ define initializer. OpenMMLab has implemented 6 initializers
557
+ including ``Constant``, ``Xavier``, ``Normal``, ``Uniform``,
558
+ ``Kaiming``, and ``Pretrained``.
559
+ Example:
560
+ >>> module = nn.Linear(2, 3, bias=True)
561
+ >>> init_cfg = dict(type='Constant', layer='Linear', val =1 , bias =2)
562
+ >>> initialize(module, init_cfg)
563
+
564
+ >>> module = nn.Sequential(nn.Conv1d(3, 1, 3), nn.Linear(1,2))
565
+ >>> # define key ``'layer'`` for initializing layer with different
566
+ >>> # configuration
567
+ >>> init_cfg = [dict(type='Constant', layer='Conv1d', val=1),
568
+ dict(type='Constant', layer='Linear', val=2)]
569
+ >>> initialize(module, init_cfg)
570
+
571
+ >>> # define key``'override'`` to initialize some specific part in
572
+ >>> # module
573
+ >>> class FooNet(nn.Module):
574
+ >>> def __init__(self):
575
+ >>> super().__init__()
576
+ >>> self.feat = nn.Conv2d(3, 16, 3)
577
+ >>> self.reg = nn.Conv2d(16, 10, 3)
578
+ >>> self.cls = nn.Conv2d(16, 5, 3)
579
+ >>> model = FooNet()
580
+ >>> init_cfg = dict(type='Constant', val=1, bias=2, layer='Conv2d',
581
+ >>> override=dict(type='Constant', name='reg', val=3, bias=4))
582
+ >>> initialize(model, init_cfg)
583
+
584
+ >>> model = ResNet(depth=50)
585
+ >>> # Initialize weights with the pretrained model.
586
+ >>> init_cfg = dict(type='Pretrained',
587
+ checkpoint='torchvision://resnet50')
588
+ >>> initialize(model, init_cfg)
589
+
590
+ >>> # Initialize weights of a sub-module with the specific part of
591
+ >>> # a pretrained model by using "prefix".
592
+ >>> url = 'http://download.openmmlab.com/mmdetection/v2.0/retinanet/'\
593
+ >>> 'retinanet_r50_fpn_1x_coco/'\
594
+ >>> 'retinanet_r50_fpn_1x_coco_20200130-c2398f9e.pth'
595
+ >>> init_cfg = dict(type='Pretrained',
596
+ checkpoint=url, prefix='backbone.')
597
+ """
598
+ if not isinstance(init_cfg, (dict, list)):
599
+ raise TypeError(f'init_cfg must be a dict or a list of dict, \
600
+ but got {type(init_cfg)}')
601
+
602
+ if isinstance(init_cfg, dict):
603
+ init_cfg = [init_cfg]
604
+
605
+ for cfg in init_cfg:
606
+ # should deeply copy the original config because cfg may be used by
607
+ # other modules, e.g., one init_cfg shared by multiple bottleneck
608
+ # blocks, the expected cfg will be changed after pop and will change
609
+ # the initialization behavior of other modules
610
+ cp_cfg = copy.deepcopy(cfg)
611
+ override = cp_cfg.pop('override', None)
612
+ _initialize(module, cp_cfg)
613
+
614
+ if override is not None:
615
+ cp_cfg.pop('layer', None)
616
+ _initialize_override(module, override, cp_cfg)
617
+ else:
618
+ # All attributes in module have same initialization.
619
+ pass
620
+
621
+
622
+ def _no_grad_trunc_normal_(tensor: Tensor, mean: float, std: float, a: float,
623
+ b: float) -> Tensor:
624
+ # Method based on
625
+ # https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf
626
+ # Modified from
627
+ # https://github.com/pytorch/pytorch/blob/master/torch/nn/init.py
628
+ def norm_cdf(x):
629
+ # Computes standard normal cumulative distribution function
630
+ return (1. + math.erf(x / math.sqrt(2.))) / 2.
631
+
632
+ if (mean < a - 2 * std) or (mean > b + 2 * std):
633
+ warnings.warn(
634
+ 'mean is more than 2 std from [a, b] in nn.init.trunc_normal_. '
635
+ 'The distribution of values may be incorrect.',
636
+ stacklevel=2)
637
+
638
+ with torch.no_grad():
639
+ # Values are generated by using a truncated uniform distribution and
640
+ # then using the inverse CDF for the normal distribution.
641
+ # Get upper and lower cdf values
642
+ lower = norm_cdf((a - mean) / std)
643
+ upper = norm_cdf((b - mean) / std)
644
+
645
+ # Uniformly fill tensor with values from [lower, upper], then translate
646
+ # to [2lower-1, 2upper-1].
647
+ tensor.uniform_(2 * lower - 1, 2 * upper - 1)
648
+
649
+ # Use inverse cdf transform for normal distribution to get truncated
650
+ # standard normal
651
+ tensor.erfinv_()
652
+
653
+ # Transform to proper mean, std
654
+ tensor.mul_(std * math.sqrt(2.))
655
+ tensor.add_(mean)
656
+
657
+ # Clamp to ensure it's in the proper range
658
+ tensor.clamp_(min=a, max=b)
659
+ return tensor
660
+
661
+
662
+ def trunc_normal_(tensor: Tensor,
663
+ mean: float = 0.,
664
+ std: float = 1.,
665
+ a: float = -2.,
666
+ b: float = 2.) -> Tensor:
667
+ r"""Fills the input Tensor with values drawn from a truncated
668
+ normal distribution. The values are effectively drawn from the
669
+ normal distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)`
670
+ with values outside :math:`[a, b]` redrawn until they are within
671
+ the bounds. The method used for generating the random values works
672
+ best when :math:`a \leq \text{mean} \leq b`.
673
+
674
+ Modified from
675
+ https://github.com/pytorch/pytorch/blob/master/torch/nn/init.py
676
+
677
+ Args:
678
+ tensor (``torch.Tensor``): an n-dimensional `torch.Tensor`.
679
+ mean (float): the mean of the normal distribution.
680
+ std (float): the standard deviation of the normal distribution.
681
+ a (float): the minimum cutoff value.
682
+ b (float): the maximum cutoff value.
683
+ """
684
+ return _no_grad_trunc_normal_(tensor, mean, std, a, b)
CCEdit-main/src/controlnet11/annotator/uniformer/mmcv/runner/__init__.py ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) OpenMMLab. All rights reserved.
2
+ from .base_module import BaseModule, ModuleList, Sequential
3
+ from .base_runner import BaseRunner
4
+ from .builder import RUNNERS, build_runner
5
+ from .checkpoint import (CheckpointLoader, _load_checkpoint,
6
+ _load_checkpoint_with_prefix, load_checkpoint,
7
+ load_state_dict, save_checkpoint, weights_to_cpu)
8
+ from .default_constructor import DefaultRunnerConstructor
9
+ from .dist_utils import (allreduce_grads, allreduce_params, get_dist_info,
10
+ init_dist, master_only)
11
+ from .epoch_based_runner import EpochBasedRunner, Runner
12
+ from .fp16_utils import LossScaler, auto_fp16, force_fp32, wrap_fp16_model
13
+ from .hooks import (HOOKS, CheckpointHook, ClosureHook, DistEvalHook,
14
+ DistSamplerSeedHook, DvcliveLoggerHook, EMAHook, EvalHook,
15
+ Fp16OptimizerHook, GradientCumulativeFp16OptimizerHook,
16
+ GradientCumulativeOptimizerHook, Hook, IterTimerHook,
17
+ LoggerHook, LrUpdaterHook, MlflowLoggerHook,
18
+ NeptuneLoggerHook, OptimizerHook, PaviLoggerHook,
19
+ SyncBuffersHook, TensorboardLoggerHook, TextLoggerHook,
20
+ WandbLoggerHook)
21
+ from .iter_based_runner import IterBasedRunner, IterLoader
22
+ from .log_buffer import LogBuffer
23
+ from .optimizer import (OPTIMIZER_BUILDERS, OPTIMIZERS,
24
+ DefaultOptimizerConstructor, build_optimizer,
25
+ build_optimizer_constructor)
26
+ from .priority import Priority, get_priority
27
+ from .utils import get_host_info, get_time_str, obj_from_dict, set_random_seed
28
+
29
+ __all__ = [
30
+ 'BaseRunner', 'Runner', 'EpochBasedRunner', 'IterBasedRunner', 'LogBuffer',
31
+ 'HOOKS', 'Hook', 'CheckpointHook', 'ClosureHook', 'LrUpdaterHook',
32
+ 'OptimizerHook', 'IterTimerHook', 'DistSamplerSeedHook', 'LoggerHook',
33
+ 'PaviLoggerHook', 'TextLoggerHook', 'TensorboardLoggerHook',
34
+ 'NeptuneLoggerHook', 'WandbLoggerHook', 'MlflowLoggerHook',
35
+ 'DvcliveLoggerHook', '_load_checkpoint', 'load_state_dict',
36
+ 'load_checkpoint', 'weights_to_cpu', 'save_checkpoint', 'Priority',
37
+ 'get_priority', 'get_host_info', 'get_time_str', 'obj_from_dict',
38
+ 'init_dist', 'get_dist_info', 'master_only', 'OPTIMIZER_BUILDERS',
39
+ 'OPTIMIZERS', 'DefaultOptimizerConstructor', 'build_optimizer',
40
+ 'build_optimizer_constructor', 'IterLoader', 'set_random_seed',
41
+ 'auto_fp16', 'force_fp32', 'wrap_fp16_model', 'Fp16OptimizerHook',
42
+ 'SyncBuffersHook', 'EMAHook', 'build_runner', 'RUNNERS', 'allreduce_grads',
43
+ 'allreduce_params', 'LossScaler', 'CheckpointLoader', 'BaseModule',
44
+ '_load_checkpoint_with_prefix', 'EvalHook', 'DistEvalHook', 'Sequential',
45
+ 'ModuleList', 'GradientCumulativeOptimizerHook',
46
+ 'GradientCumulativeFp16OptimizerHook', 'DefaultRunnerConstructor'
47
+ ]
CCEdit-main/src/controlnet11/annotator/uniformer/mmcv/runner/base_module.py ADDED
@@ -0,0 +1,195 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) OpenMMLab. All rights reserved.
2
+ import copy
3
+ import warnings
4
+ from abc import ABCMeta
5
+ from collections import defaultdict
6
+ from logging import FileHandler
7
+
8
+ import torch.nn as nn
9
+
10
+ from annotator.uniformer.mmcv.runner.dist_utils import master_only
11
+ from annotator.uniformer.mmcv.utils.logging import get_logger, logger_initialized, print_log
12
+
13
+
14
+ class BaseModule(nn.Module, metaclass=ABCMeta):
15
+ """Base module for all modules in openmmlab.
16
+
17
+ ``BaseModule`` is a wrapper of ``torch.nn.Module`` with additional
18
+ functionality of parameter initialization. Compared with
19
+ ``torch.nn.Module``, ``BaseModule`` mainly adds three attributes.
20
+
21
+ - ``init_cfg``: the config to control the initialization.
22
+ - ``init_weights``: The function of parameter
23
+ initialization and recording initialization
24
+ information.
25
+ - ``_params_init_info``: Used to track the parameter
26
+ initialization information. This attribute only
27
+ exists during executing the ``init_weights``.
28
+
29
+ Args:
30
+ init_cfg (dict, optional): Initialization config dict.
31
+ """
32
+
33
+ def __init__(self, init_cfg=None):
34
+ """Initialize BaseModule, inherited from `torch.nn.Module`"""
35
+
36
+ # NOTE init_cfg can be defined in different levels, but init_cfg
37
+ # in low levels has a higher priority.
38
+
39
+ super(BaseModule, self).__init__()
40
+ # define default value of init_cfg instead of hard code
41
+ # in init_weights() function
42
+ self._is_init = False
43
+
44
+ self.init_cfg = copy.deepcopy(init_cfg)
45
+
46
+ # Backward compatibility in derived classes
47
+ # if pretrained is not None:
48
+ # warnings.warn('DeprecationWarning: pretrained is a deprecated \
49
+ # key, please consider using init_cfg')
50
+ # self.init_cfg = dict(type='Pretrained', checkpoint=pretrained)
51
+
52
+ @property
53
+ def is_init(self):
54
+ return self._is_init
55
+
56
+ def init_weights(self):
57
+ """Initialize the weights."""
58
+
59
+ is_top_level_module = False
60
+ # check if it is top-level module
61
+ if not hasattr(self, '_params_init_info'):
62
+ # The `_params_init_info` is used to record the initialization
63
+ # information of the parameters
64
+ # the key should be the obj:`nn.Parameter` of model and the value
65
+ # should be a dict containing
66
+ # - init_info (str): The string that describes the initialization.
67
+ # - tmp_mean_value (FloatTensor): The mean of the parameter,
68
+ # which indicates whether the parameter has been modified.
69
+ # this attribute would be deleted after all parameters
70
+ # is initialized.
71
+ self._params_init_info = defaultdict(dict)
72
+ is_top_level_module = True
73
+
74
+ # Initialize the `_params_init_info`,
75
+ # When detecting the `tmp_mean_value` of
76
+ # the corresponding parameter is changed, update related
77
+ # initialization information
78
+ for name, param in self.named_parameters():
79
+ self._params_init_info[param][
80
+ 'init_info'] = f'The value is the same before and ' \
81
+ f'after calling `init_weights` ' \
82
+ f'of {self.__class__.__name__} '
83
+ self._params_init_info[param][
84
+ 'tmp_mean_value'] = param.data.mean()
85
+
86
+ # pass `params_init_info` to all submodules
87
+ # All submodules share the same `params_init_info`,
88
+ # so it will be updated when parameters are
89
+ # modified at any level of the model.
90
+ for sub_module in self.modules():
91
+ sub_module._params_init_info = self._params_init_info
92
+
93
+ # Get the initialized logger, if not exist,
94
+ # create a logger named `mmcv`
95
+ logger_names = list(logger_initialized.keys())
96
+ logger_name = logger_names[0] if logger_names else 'mmcv'
97
+
98
+ from ..cnn import initialize
99
+ from ..cnn.utils.weight_init import update_init_info
100
+ module_name = self.__class__.__name__
101
+ if not self._is_init:
102
+ if self.init_cfg:
103
+ print_log(
104
+ f'initialize {module_name} with init_cfg {self.init_cfg}',
105
+ logger=logger_name)
106
+ initialize(self, self.init_cfg)
107
+ if isinstance(self.init_cfg, dict):
108
+ # prevent the parameters of
109
+ # the pre-trained model
110
+ # from being overwritten by
111
+ # the `init_weights`
112
+ if self.init_cfg['type'] == 'Pretrained':
113
+ return
114
+
115
+ for m in self.children():
116
+ if hasattr(m, 'init_weights'):
117
+ m.init_weights()
118
+ # users may overload the `init_weights`
119
+ update_init_info(
120
+ m,
121
+ init_info=f'Initialized by '
122
+ f'user-defined `init_weights`'
123
+ f' in {m.__class__.__name__} ')
124
+
125
+ self._is_init = True
126
+ else:
127
+ warnings.warn(f'init_weights of {self.__class__.__name__} has '
128
+ f'been called more than once.')
129
+
130
+ if is_top_level_module:
131
+ self._dump_init_info(logger_name)
132
+
133
+ for sub_module in self.modules():
134
+ del sub_module._params_init_info
135
+
136
+ @master_only
137
+ def _dump_init_info(self, logger_name):
138
+ """Dump the initialization information to a file named
139
+ `initialization.log.json` in workdir.
140
+
141
+ Args:
142
+ logger_name (str): The name of logger.
143
+ """
144
+
145
+ logger = get_logger(logger_name)
146
+
147
+ with_file_handler = False
148
+ # dump the information to the logger file if there is a `FileHandler`
149
+ for handler in logger.handlers:
150
+ if isinstance(handler, FileHandler):
151
+ handler.stream.write(
152
+ 'Name of parameter - Initialization information\n')
153
+ for name, param in self.named_parameters():
154
+ handler.stream.write(
155
+ f'\n{name} - {param.shape}: '
156
+ f"\n{self._params_init_info[param]['init_info']} \n")
157
+ handler.stream.flush()
158
+ with_file_handler = True
159
+ if not with_file_handler:
160
+ for name, param in self.named_parameters():
161
+ print_log(
162
+ f'\n{name} - {param.shape}: '
163
+ f"\n{self._params_init_info[param]['init_info']} \n ",
164
+ logger=logger_name)
165
+
166
+ def __repr__(self):
167
+ s = super().__repr__()
168
+ if self.init_cfg:
169
+ s += f'\ninit_cfg={self.init_cfg}'
170
+ return s
171
+
172
+
173
+ class Sequential(BaseModule, nn.Sequential):
174
+ """Sequential module in openmmlab.
175
+
176
+ Args:
177
+ init_cfg (dict, optional): Initialization config dict.
178
+ """
179
+
180
+ def __init__(self, *args, init_cfg=None):
181
+ BaseModule.__init__(self, init_cfg)
182
+ nn.Sequential.__init__(self, *args)
183
+
184
+
185
+ class ModuleList(BaseModule, nn.ModuleList):
186
+ """ModuleList in openmmlab.
187
+
188
+ Args:
189
+ modules (iterable, optional): an iterable of modules to add.
190
+ init_cfg (dict, optional): Initialization config dict.
191
+ """
192
+
193
+ def __init__(self, modules=None, init_cfg=None):
194
+ BaseModule.__init__(self, init_cfg)
195
+ nn.ModuleList.__init__(self, modules)
CCEdit-main/src/controlnet11/annotator/uniformer/mmcv/runner/base_runner.py ADDED
@@ -0,0 +1,542 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) OpenMMLab. All rights reserved.
2
+ import copy
3
+ import logging
4
+ import os.path as osp
5
+ import warnings
6
+ from abc import ABCMeta, abstractmethod
7
+
8
+ import torch
9
+ from torch.optim import Optimizer
10
+
11
+ import annotator.uniformer.mmcv as mmcv
12
+ from ..parallel import is_module_wrapper
13
+ from .checkpoint import load_checkpoint
14
+ from .dist_utils import get_dist_info
15
+ from .hooks import HOOKS, Hook
16
+ from .log_buffer import LogBuffer
17
+ from .priority import Priority, get_priority
18
+ from .utils import get_time_str
19
+
20
+
21
+ class BaseRunner(metaclass=ABCMeta):
22
+ """The base class of Runner, a training helper for PyTorch.
23
+
24
+ All subclasses should implement the following APIs:
25
+
26
+ - ``run()``
27
+ - ``train()``
28
+ - ``val()``
29
+ - ``save_checkpoint()``
30
+
31
+ Args:
32
+ model (:obj:`torch.nn.Module`): The model to be run.
33
+ batch_processor (callable): A callable method that process a data
34
+ batch. The interface of this method should be
35
+ `batch_processor(model, data, train_mode) -> dict`
36
+ optimizer (dict or :obj:`torch.optim.Optimizer`): It can be either an
37
+ optimizer (in most cases) or a dict of optimizers (in models that
38
+ requires more than one optimizer, e.g., GAN).
39
+ work_dir (str, optional): The working directory to save checkpoints
40
+ and logs. Defaults to None.
41
+ logger (:obj:`logging.Logger`): Logger used during training.
42
+ Defaults to None. (The default value is just for backward
43
+ compatibility)
44
+ meta (dict | None): A dict records some import information such as
45
+ environment info and seed, which will be logged in logger hook.
46
+ Defaults to None.
47
+ max_epochs (int, optional): Total training epochs.
48
+ max_iters (int, optional): Total training iterations.
49
+ """
50
+
51
+ def __init__(self,
52
+ model,
53
+ batch_processor=None,
54
+ optimizer=None,
55
+ work_dir=None,
56
+ logger=None,
57
+ meta=None,
58
+ max_iters=None,
59
+ max_epochs=None):
60
+ if batch_processor is not None:
61
+ if not callable(batch_processor):
62
+ raise TypeError('batch_processor must be callable, '
63
+ f'but got {type(batch_processor)}')
64
+ warnings.warn('batch_processor is deprecated, please implement '
65
+ 'train_step() and val_step() in the model instead.')
66
+ # raise an error is `batch_processor` is not None and
67
+ # `model.train_step()` exists.
68
+ if is_module_wrapper(model):
69
+ _model = model.module
70
+ else:
71
+ _model = model
72
+ if hasattr(_model, 'train_step') or hasattr(_model, 'val_step'):
73
+ raise RuntimeError(
74
+ 'batch_processor and model.train_step()/model.val_step() '
75
+ 'cannot be both available.')
76
+ else:
77
+ assert hasattr(model, 'train_step')
78
+
79
+ # check the type of `optimizer`
80
+ if isinstance(optimizer, dict):
81
+ for name, optim in optimizer.items():
82
+ if not isinstance(optim, Optimizer):
83
+ raise TypeError(
84
+ f'optimizer must be a dict of torch.optim.Optimizers, '
85
+ f'but optimizer["{name}"] is a {type(optim)}')
86
+ elif not isinstance(optimizer, Optimizer) and optimizer is not None:
87
+ raise TypeError(
88
+ f'optimizer must be a torch.optim.Optimizer object '
89
+ f'or dict or None, but got {type(optimizer)}')
90
+
91
+ # check the type of `logger`
92
+ if not isinstance(logger, logging.Logger):
93
+ raise TypeError(f'logger must be a logging.Logger object, '
94
+ f'but got {type(logger)}')
95
+
96
+ # check the type of `meta`
97
+ if meta is not None and not isinstance(meta, dict):
98
+ raise TypeError(
99
+ f'meta must be a dict or None, but got {type(meta)}')
100
+
101
+ self.model = model
102
+ self.batch_processor = batch_processor
103
+ self.optimizer = optimizer
104
+ self.logger = logger
105
+ self.meta = meta
106
+ # create work_dir
107
+ if mmcv.is_str(work_dir):
108
+ self.work_dir = osp.abspath(work_dir)
109
+ mmcv.mkdir_or_exist(self.work_dir)
110
+ elif work_dir is None:
111
+ self.work_dir = None
112
+ else:
113
+ raise TypeError('"work_dir" must be a str or None')
114
+
115
+ # get model name from the model class
116
+ if hasattr(self.model, 'module'):
117
+ self._model_name = self.model.module.__class__.__name__
118
+ else:
119
+ self._model_name = self.model.__class__.__name__
120
+
121
+ self._rank, self._world_size = get_dist_info()
122
+ self.timestamp = get_time_str()
123
+ self.mode = None
124
+ self._hooks = []
125
+ self._epoch = 0
126
+ self._iter = 0
127
+ self._inner_iter = 0
128
+
129
+ if max_epochs is not None and max_iters is not None:
130
+ raise ValueError(
131
+ 'Only one of `max_epochs` or `max_iters` can be set.')
132
+
133
+ self._max_epochs = max_epochs
134
+ self._max_iters = max_iters
135
+ # TODO: Redesign LogBuffer, it is not flexible and elegant enough
136
+ self.log_buffer = LogBuffer()
137
+
138
+ @property
139
+ def model_name(self):
140
+ """str: Name of the model, usually the module class name."""
141
+ return self._model_name
142
+
143
+ @property
144
+ def rank(self):
145
+ """int: Rank of current process. (distributed training)"""
146
+ return self._rank
147
+
148
+ @property
149
+ def world_size(self):
150
+ """int: Number of processes participating in the job.
151
+ (distributed training)"""
152
+ return self._world_size
153
+
154
+ @property
155
+ def hooks(self):
156
+ """list[:obj:`Hook`]: A list of registered hooks."""
157
+ return self._hooks
158
+
159
+ @property
160
+ def epoch(self):
161
+ """int: Current epoch."""
162
+ return self._epoch
163
+
164
+ @property
165
+ def iter(self):
166
+ """int: Current iteration."""
167
+ return self._iter
168
+
169
+ @property
170
+ def inner_iter(self):
171
+ """int: Iteration in an epoch."""
172
+ return self._inner_iter
173
+
174
+ @property
175
+ def max_epochs(self):
176
+ """int: Maximum training epochs."""
177
+ return self._max_epochs
178
+
179
+ @property
180
+ def max_iters(self):
181
+ """int: Maximum training iterations."""
182
+ return self._max_iters
183
+
184
+ @abstractmethod
185
+ def train(self):
186
+ pass
187
+
188
+ @abstractmethod
189
+ def val(self):
190
+ pass
191
+
192
+ @abstractmethod
193
+ def run(self, data_loaders, workflow, **kwargs):
194
+ pass
195
+
196
+ @abstractmethod
197
+ def save_checkpoint(self,
198
+ out_dir,
199
+ filename_tmpl,
200
+ save_optimizer=True,
201
+ meta=None,
202
+ create_symlink=True):
203
+ pass
204
+
205
+ def current_lr(self):
206
+ """Get current learning rates.
207
+
208
+ Returns:
209
+ list[float] | dict[str, list[float]]: Current learning rates of all
210
+ param groups. If the runner has a dict of optimizers, this
211
+ method will return a dict.
212
+ """
213
+ if isinstance(self.optimizer, torch.optim.Optimizer):
214
+ lr = [group['lr'] for group in self.optimizer.param_groups]
215
+ elif isinstance(self.optimizer, dict):
216
+ lr = dict()
217
+ for name, optim in self.optimizer.items():
218
+ lr[name] = [group['lr'] for group in optim.param_groups]
219
+ else:
220
+ raise RuntimeError(
221
+ 'lr is not applicable because optimizer does not exist.')
222
+ return lr
223
+
224
+ def current_momentum(self):
225
+ """Get current momentums.
226
+
227
+ Returns:
228
+ list[float] | dict[str, list[float]]: Current momentums of all
229
+ param groups. If the runner has a dict of optimizers, this
230
+ method will return a dict.
231
+ """
232
+
233
+ def _get_momentum(optimizer):
234
+ momentums = []
235
+ for group in optimizer.param_groups:
236
+ if 'momentum' in group.keys():
237
+ momentums.append(group['momentum'])
238
+ elif 'betas' in group.keys():
239
+ momentums.append(group['betas'][0])
240
+ else:
241
+ momentums.append(0)
242
+ return momentums
243
+
244
+ if self.optimizer is None:
245
+ raise RuntimeError(
246
+ 'momentum is not applicable because optimizer does not exist.')
247
+ elif isinstance(self.optimizer, torch.optim.Optimizer):
248
+ momentums = _get_momentum(self.optimizer)
249
+ elif isinstance(self.optimizer, dict):
250
+ momentums = dict()
251
+ for name, optim in self.optimizer.items():
252
+ momentums[name] = _get_momentum(optim)
253
+ return momentums
254
+
255
+ def register_hook(self, hook, priority='NORMAL'):
256
+ """Register a hook into the hook list.
257
+
258
+ The hook will be inserted into a priority queue, with the specified
259
+ priority (See :class:`Priority` for details of priorities).
260
+ For hooks with the same priority, they will be triggered in the same
261
+ order as they are registered.
262
+
263
+ Args:
264
+ hook (:obj:`Hook`): The hook to be registered.
265
+ priority (int or str or :obj:`Priority`): Hook priority.
266
+ Lower value means higher priority.
267
+ """
268
+ assert isinstance(hook, Hook)
269
+ if hasattr(hook, 'priority'):
270
+ raise ValueError('"priority" is a reserved attribute for hooks')
271
+ priority = get_priority(priority)
272
+ hook.priority = priority
273
+ # insert the hook to a sorted list
274
+ inserted = False
275
+ for i in range(len(self._hooks) - 1, -1, -1):
276
+ if priority >= self._hooks[i].priority:
277
+ self._hooks.insert(i + 1, hook)
278
+ inserted = True
279
+ break
280
+ if not inserted:
281
+ self._hooks.insert(0, hook)
282
+
283
+ def register_hook_from_cfg(self, hook_cfg):
284
+ """Register a hook from its cfg.
285
+
286
+ Args:
287
+ hook_cfg (dict): Hook config. It should have at least keys 'type'
288
+ and 'priority' indicating its type and priority.
289
+
290
+ Notes:
291
+ The specific hook class to register should not use 'type' and
292
+ 'priority' arguments during initialization.
293
+ """
294
+ hook_cfg = hook_cfg.copy()
295
+ priority = hook_cfg.pop('priority', 'NORMAL')
296
+ hook = mmcv.build_from_cfg(hook_cfg, HOOKS)
297
+ self.register_hook(hook, priority=priority)
298
+
299
+ def call_hook(self, fn_name):
300
+ """Call all hooks.
301
+
302
+ Args:
303
+ fn_name (str): The function name in each hook to be called, such as
304
+ "before_train_epoch".
305
+ """
306
+ for hook in self._hooks:
307
+ getattr(hook, fn_name)(self)
308
+
309
+ def get_hook_info(self):
310
+ # Get hooks info in each stage
311
+ stage_hook_map = {stage: [] for stage in Hook.stages}
312
+ for hook in self.hooks:
313
+ try:
314
+ priority = Priority(hook.priority).name
315
+ except ValueError:
316
+ priority = hook.priority
317
+ classname = hook.__class__.__name__
318
+ hook_info = f'({priority:<12}) {classname:<35}'
319
+ for trigger_stage in hook.get_triggered_stages():
320
+ stage_hook_map[trigger_stage].append(hook_info)
321
+
322
+ stage_hook_infos = []
323
+ for stage in Hook.stages:
324
+ hook_infos = stage_hook_map[stage]
325
+ if len(hook_infos) > 0:
326
+ info = f'{stage}:\n'
327
+ info += '\n'.join(hook_infos)
328
+ info += '\n -------------------- '
329
+ stage_hook_infos.append(info)
330
+ return '\n'.join(stage_hook_infos)
331
+
332
+ def load_checkpoint(self,
333
+ filename,
334
+ map_location='cpu',
335
+ strict=False,
336
+ revise_keys=[(r'^module.', '')]):
337
+ return load_checkpoint(
338
+ self.model,
339
+ filename,
340
+ map_location,
341
+ strict,
342
+ self.logger,
343
+ revise_keys=revise_keys)
344
+
345
+ def resume(self,
346
+ checkpoint,
347
+ resume_optimizer=True,
348
+ map_location='default'):
349
+ if map_location == 'default':
350
+ if torch.cuda.is_available():
351
+ device_id = torch.cuda.current_device()
352
+ checkpoint = self.load_checkpoint(
353
+ checkpoint,
354
+ map_location=lambda storage, loc: storage.cuda(device_id))
355
+ else:
356
+ checkpoint = self.load_checkpoint(checkpoint)
357
+ else:
358
+ checkpoint = self.load_checkpoint(
359
+ checkpoint, map_location=map_location)
360
+
361
+ self._epoch = checkpoint['meta']['epoch']
362
+ self._iter = checkpoint['meta']['iter']
363
+ if self.meta is None:
364
+ self.meta = {}
365
+ self.meta.setdefault('hook_msgs', {})
366
+ # load `last_ckpt`, `best_score`, `best_ckpt`, etc. for hook messages
367
+ self.meta['hook_msgs'].update(checkpoint['meta'].get('hook_msgs', {}))
368
+
369
+ # Re-calculate the number of iterations when resuming
370
+ # models with different number of GPUs
371
+ if 'config' in checkpoint['meta']:
372
+ config = mmcv.Config.fromstring(
373
+ checkpoint['meta']['config'], file_format='.py')
374
+ previous_gpu_ids = config.get('gpu_ids', None)
375
+ if previous_gpu_ids and len(previous_gpu_ids) > 0 and len(
376
+ previous_gpu_ids) != self.world_size:
377
+ self._iter = int(self._iter * len(previous_gpu_ids) /
378
+ self.world_size)
379
+ self.logger.info('the iteration number is changed due to '
380
+ 'change of GPU number')
381
+
382
+ # resume meta information meta
383
+ self.meta = checkpoint['meta']
384
+
385
+ if 'optimizer' in checkpoint and resume_optimizer:
386
+ if isinstance(self.optimizer, Optimizer):
387
+ self.optimizer.load_state_dict(checkpoint['optimizer'])
388
+ elif isinstance(self.optimizer, dict):
389
+ for k in self.optimizer.keys():
390
+ self.optimizer[k].load_state_dict(
391
+ checkpoint['optimizer'][k])
392
+ else:
393
+ raise TypeError(
394
+ 'Optimizer should be dict or torch.optim.Optimizer '
395
+ f'but got {type(self.optimizer)}')
396
+
397
+ self.logger.info('resumed epoch %d, iter %d', self.epoch, self.iter)
398
+
399
+ def register_lr_hook(self, lr_config):
400
+ if lr_config is None:
401
+ return
402
+ elif isinstance(lr_config, dict):
403
+ assert 'policy' in lr_config
404
+ policy_type = lr_config.pop('policy')
405
+ # If the type of policy is all in lower case, e.g., 'cyclic',
406
+ # then its first letter will be capitalized, e.g., to be 'Cyclic'.
407
+ # This is for the convenient usage of Lr updater.
408
+ # Since this is not applicable for `
409
+ # CosineAnnealingLrUpdater`,
410
+ # the string will not be changed if it contains capital letters.
411
+ if policy_type == policy_type.lower():
412
+ policy_type = policy_type.title()
413
+ hook_type = policy_type + 'LrUpdaterHook'
414
+ lr_config['type'] = hook_type
415
+ hook = mmcv.build_from_cfg(lr_config, HOOKS)
416
+ else:
417
+ hook = lr_config
418
+ self.register_hook(hook, priority='VERY_HIGH')
419
+
420
+ def register_momentum_hook(self, momentum_config):
421
+ if momentum_config is None:
422
+ return
423
+ if isinstance(momentum_config, dict):
424
+ assert 'policy' in momentum_config
425
+ policy_type = momentum_config.pop('policy')
426
+ # If the type of policy is all in lower case, e.g., 'cyclic',
427
+ # then its first letter will be capitalized, e.g., to be 'Cyclic'.
428
+ # This is for the convenient usage of momentum updater.
429
+ # Since this is not applicable for
430
+ # `CosineAnnealingMomentumUpdater`,
431
+ # the string will not be changed if it contains capital letters.
432
+ if policy_type == policy_type.lower():
433
+ policy_type = policy_type.title()
434
+ hook_type = policy_type + 'MomentumUpdaterHook'
435
+ momentum_config['type'] = hook_type
436
+ hook = mmcv.build_from_cfg(momentum_config, HOOKS)
437
+ else:
438
+ hook = momentum_config
439
+ self.register_hook(hook, priority='HIGH')
440
+
441
+ def register_optimizer_hook(self, optimizer_config):
442
+ if optimizer_config is None:
443
+ return
444
+ if isinstance(optimizer_config, dict):
445
+ optimizer_config.setdefault('type', 'OptimizerHook')
446
+ hook = mmcv.build_from_cfg(optimizer_config, HOOKS)
447
+ else:
448
+ hook = optimizer_config
449
+ self.register_hook(hook, priority='ABOVE_NORMAL')
450
+
451
+ def register_checkpoint_hook(self, checkpoint_config):
452
+ if checkpoint_config is None:
453
+ return
454
+ if isinstance(checkpoint_config, dict):
455
+ checkpoint_config.setdefault('type', 'CheckpointHook')
456
+ hook = mmcv.build_from_cfg(checkpoint_config, HOOKS)
457
+ else:
458
+ hook = checkpoint_config
459
+ self.register_hook(hook, priority='NORMAL')
460
+
461
+ def register_logger_hooks(self, log_config):
462
+ if log_config is None:
463
+ return
464
+ log_interval = log_config['interval']
465
+ for info in log_config['hooks']:
466
+ logger_hook = mmcv.build_from_cfg(
467
+ info, HOOKS, default_args=dict(interval=log_interval))
468
+ self.register_hook(logger_hook, priority='VERY_LOW')
469
+
470
+ def register_timer_hook(self, timer_config):
471
+ if timer_config is None:
472
+ return
473
+ if isinstance(timer_config, dict):
474
+ timer_config_ = copy.deepcopy(timer_config)
475
+ hook = mmcv.build_from_cfg(timer_config_, HOOKS)
476
+ else:
477
+ hook = timer_config
478
+ self.register_hook(hook, priority='LOW')
479
+
480
+ def register_custom_hooks(self, custom_config):
481
+ if custom_config is None:
482
+ return
483
+
484
+ if not isinstance(custom_config, list):
485
+ custom_config = [custom_config]
486
+
487
+ for item in custom_config:
488
+ if isinstance(item, dict):
489
+ self.register_hook_from_cfg(item)
490
+ else:
491
+ self.register_hook(item, priority='NORMAL')
492
+
493
+ def register_profiler_hook(self, profiler_config):
494
+ if profiler_config is None:
495
+ return
496
+ if isinstance(profiler_config, dict):
497
+ profiler_config.setdefault('type', 'ProfilerHook')
498
+ hook = mmcv.build_from_cfg(profiler_config, HOOKS)
499
+ else:
500
+ hook = profiler_config
501
+ self.register_hook(hook)
502
+
503
+ def register_training_hooks(self,
504
+ lr_config,
505
+ optimizer_config=None,
506
+ checkpoint_config=None,
507
+ log_config=None,
508
+ momentum_config=None,
509
+ timer_config=dict(type='IterTimerHook'),
510
+ custom_hooks_config=None):
511
+ """Register default and custom hooks for training.
512
+
513
+ Default and custom hooks include:
514
+
515
+ +----------------------+-------------------------+
516
+ | Hooks | Priority |
517
+ +======================+=========================+
518
+ | LrUpdaterHook | VERY_HIGH (10) |
519
+ +----------------------+-------------------------+
520
+ | MomentumUpdaterHook | HIGH (30) |
521
+ +----------------------+-------------------------+
522
+ | OptimizerStepperHook | ABOVE_NORMAL (40) |
523
+ +----------------------+-------------------------+
524
+ | CheckpointSaverHook | NORMAL (50) |
525
+ +----------------------+-------------------------+
526
+ | IterTimerHook | LOW (70) |
527
+ +----------------------+-------------------------+
528
+ | LoggerHook(s) | VERY_LOW (90) |
529
+ +----------------------+-------------------------+
530
+ | CustomHook(s) | defaults to NORMAL (50) |
531
+ +----------------------+-------------------------+
532
+
533
+ If custom hooks have same priority with default hooks, custom hooks
534
+ will be triggered after default hooks.
535
+ """
536
+ self.register_lr_hook(lr_config)
537
+ self.register_momentum_hook(momentum_config)
538
+ self.register_optimizer_hook(optimizer_config)
539
+ self.register_checkpoint_hook(checkpoint_config)
540
+ self.register_timer_hook(timer_config)
541
+ self.register_logger_hooks(log_config)
542
+ self.register_custom_hooks(custom_hooks_config)
CCEdit-main/src/controlnet11/annotator/uniformer/mmcv/runner/builder.py ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) OpenMMLab. All rights reserved.
2
+ import copy
3
+
4
+ from ..utils import Registry
5
+
6
+ RUNNERS = Registry('runner')
7
+ RUNNER_BUILDERS = Registry('runner builder')
8
+
9
+
10
+ def build_runner_constructor(cfg):
11
+ return RUNNER_BUILDERS.build(cfg)
12
+
13
+
14
+ def build_runner(cfg, default_args=None):
15
+ runner_cfg = copy.deepcopy(cfg)
16
+ constructor_type = runner_cfg.pop('constructor',
17
+ 'DefaultRunnerConstructor')
18
+ runner_constructor = build_runner_constructor(
19
+ dict(
20
+ type=constructor_type,
21
+ runner_cfg=runner_cfg,
22
+ default_args=default_args))
23
+ runner = runner_constructor()
24
+ return runner
CCEdit-main/src/controlnet11/annotator/uniformer/mmcv/runner/checkpoint.py ADDED
@@ -0,0 +1,707 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) OpenMMLab. All rights reserved.
2
+ import io
3
+ import os
4
+ import os.path as osp
5
+ import pkgutil
6
+ import re
7
+ import time
8
+ import warnings
9
+ from collections import OrderedDict
10
+ from importlib import import_module
11
+ from tempfile import TemporaryDirectory
12
+
13
+ import torch
14
+ import torchvision
15
+ from torch.optim import Optimizer
16
+ from torch.utils import model_zoo
17
+
18
+ import annotator.uniformer.mmcv as mmcv
19
+ from ..fileio import FileClient
20
+ from ..fileio import load as load_file
21
+ from ..parallel import is_module_wrapper
22
+ from ..utils import mkdir_or_exist
23
+ from .dist_utils import get_dist_info
24
+
25
+ ENV_MMCV_HOME = 'MMCV_HOME'
26
+ ENV_XDG_CACHE_HOME = 'XDG_CACHE_HOME'
27
+ DEFAULT_CACHE_DIR = '~/.cache'
28
+
29
+
30
+ def _get_mmcv_home():
31
+ mmcv_home = os.path.expanduser(
32
+ os.getenv(
33
+ ENV_MMCV_HOME,
34
+ os.path.join(
35
+ os.getenv(ENV_XDG_CACHE_HOME, DEFAULT_CACHE_DIR), 'mmcv')))
36
+
37
+ mkdir_or_exist(mmcv_home)
38
+ return mmcv_home
39
+
40
+
41
+ def load_state_dict(module, state_dict, strict=False, logger=None):
42
+ """Load state_dict to a module.
43
+
44
+ This method is modified from :meth:`torch.nn.Module.load_state_dict`.
45
+ Default value for ``strict`` is set to ``False`` and the message for
46
+ param mismatch will be shown even if strict is False.
47
+
48
+ Args:
49
+ module (Module): Module that receives the state_dict.
50
+ state_dict (OrderedDict): Weights.
51
+ strict (bool): whether to strictly enforce that the keys
52
+ in :attr:`state_dict` match the keys returned by this module's
53
+ :meth:`~torch.nn.Module.state_dict` function. Default: ``False``.
54
+ logger (:obj:`logging.Logger`, optional): Logger to log the error
55
+ message. If not specified, print function will be used.
56
+ """
57
+ unexpected_keys = []
58
+ all_missing_keys = []
59
+ err_msg = []
60
+
61
+ metadata = getattr(state_dict, '_metadata', None)
62
+ state_dict = state_dict.copy()
63
+ if metadata is not None:
64
+ state_dict._metadata = metadata
65
+
66
+ # use _load_from_state_dict to enable checkpoint version control
67
+ def load(module, prefix=''):
68
+ # recursively check parallel module in case that the model has a
69
+ # complicated structure, e.g., nn.Module(nn.Module(DDP))
70
+ if is_module_wrapper(module):
71
+ module = module.module
72
+ local_metadata = {} if metadata is None else metadata.get(
73
+ prefix[:-1], {})
74
+ module._load_from_state_dict(state_dict, prefix, local_metadata, True,
75
+ all_missing_keys, unexpected_keys,
76
+ err_msg)
77
+ for name, child in module._modules.items():
78
+ if child is not None:
79
+ load(child, prefix + name + '.')
80
+
81
+ load(module)
82
+ load = None # break load->load reference cycle
83
+
84
+ # ignore "num_batches_tracked" of BN layers
85
+ missing_keys = [
86
+ key for key in all_missing_keys if 'num_batches_tracked' not in key
87
+ ]
88
+
89
+ if unexpected_keys:
90
+ err_msg.append('unexpected key in source '
91
+ f'state_dict: {", ".join(unexpected_keys)}\n')
92
+ if missing_keys:
93
+ err_msg.append(
94
+ f'missing keys in source state_dict: {", ".join(missing_keys)}\n')
95
+
96
+ rank, _ = get_dist_info()
97
+ if len(err_msg) > 0 and rank == 0:
98
+ err_msg.insert(
99
+ 0, 'The model and loaded state dict do not match exactly\n')
100
+ err_msg = '\n'.join(err_msg)
101
+ if strict:
102
+ raise RuntimeError(err_msg)
103
+ elif logger is not None:
104
+ logger.warning(err_msg)
105
+ else:
106
+ print(err_msg)
107
+
108
+
109
+ def get_torchvision_models():
110
+ model_urls = dict()
111
+ for _, name, ispkg in pkgutil.walk_packages(torchvision.models.__path__):
112
+ if ispkg:
113
+ continue
114
+ _zoo = import_module(f'torchvision.models.{name}')
115
+ if hasattr(_zoo, 'model_urls'):
116
+ _urls = getattr(_zoo, 'model_urls')
117
+ model_urls.update(_urls)
118
+ return model_urls
119
+
120
+
121
+ def get_external_models():
122
+ mmcv_home = _get_mmcv_home()
123
+ default_json_path = osp.join(mmcv.__path__[0], 'model_zoo/open_mmlab.json')
124
+ default_urls = load_file(default_json_path)
125
+ assert isinstance(default_urls, dict)
126
+ external_json_path = osp.join(mmcv_home, 'open_mmlab.json')
127
+ if osp.exists(external_json_path):
128
+ external_urls = load_file(external_json_path)
129
+ assert isinstance(external_urls, dict)
130
+ default_urls.update(external_urls)
131
+
132
+ return default_urls
133
+
134
+
135
+ def get_mmcls_models():
136
+ mmcls_json_path = osp.join(mmcv.__path__[0], 'model_zoo/mmcls.json')
137
+ mmcls_urls = load_file(mmcls_json_path)
138
+
139
+ return mmcls_urls
140
+
141
+
142
+ def get_deprecated_model_names():
143
+ deprecate_json_path = osp.join(mmcv.__path__[0],
144
+ 'model_zoo/deprecated.json')
145
+ deprecate_urls = load_file(deprecate_json_path)
146
+ assert isinstance(deprecate_urls, dict)
147
+
148
+ return deprecate_urls
149
+
150
+
151
+ def _process_mmcls_checkpoint(checkpoint):
152
+ state_dict = checkpoint['state_dict']
153
+ new_state_dict = OrderedDict()
154
+ for k, v in state_dict.items():
155
+ if k.startswith('backbone.'):
156
+ new_state_dict[k[9:]] = v
157
+ new_checkpoint = dict(state_dict=new_state_dict)
158
+
159
+ return new_checkpoint
160
+
161
+
162
+ class CheckpointLoader:
163
+ """A general checkpoint loader to manage all schemes."""
164
+
165
+ _schemes = {}
166
+
167
+ @classmethod
168
+ def _register_scheme(cls, prefixes, loader, force=False):
169
+ if isinstance(prefixes, str):
170
+ prefixes = [prefixes]
171
+ else:
172
+ assert isinstance(prefixes, (list, tuple))
173
+ for prefix in prefixes:
174
+ if (prefix not in cls._schemes) or force:
175
+ cls._schemes[prefix] = loader
176
+ else:
177
+ raise KeyError(
178
+ f'{prefix} is already registered as a loader backend, '
179
+ 'add "force=True" if you want to override it')
180
+ # sort, longer prefixes take priority
181
+ cls._schemes = OrderedDict(
182
+ sorted(cls._schemes.items(), key=lambda t: t[0], reverse=True))
183
+
184
+ @classmethod
185
+ def register_scheme(cls, prefixes, loader=None, force=False):
186
+ """Register a loader to CheckpointLoader.
187
+
188
+ This method can be used as a normal class method or a decorator.
189
+
190
+ Args:
191
+ prefixes (str or list[str] or tuple[str]):
192
+ The prefix of the registered loader.
193
+ loader (function, optional): The loader function to be registered.
194
+ When this method is used as a decorator, loader is None.
195
+ Defaults to None.
196
+ force (bool, optional): Whether to override the loader
197
+ if the prefix has already been registered. Defaults to False.
198
+ """
199
+
200
+ if loader is not None:
201
+ cls._register_scheme(prefixes, loader, force=force)
202
+ return
203
+
204
+ def _register(loader_cls):
205
+ cls._register_scheme(prefixes, loader_cls, force=force)
206
+ return loader_cls
207
+
208
+ return _register
209
+
210
+ @classmethod
211
+ def _get_checkpoint_loader(cls, path):
212
+ """Finds a loader that supports the given path. Falls back to the local
213
+ loader if no other loader is found.
214
+
215
+ Args:
216
+ path (str): checkpoint path
217
+
218
+ Returns:
219
+ loader (function): checkpoint loader
220
+ """
221
+
222
+ for p in cls._schemes:
223
+ if path.startswith(p):
224
+ return cls._schemes[p]
225
+
226
+ @classmethod
227
+ def load_checkpoint(cls, filename, map_location=None, logger=None):
228
+ """load checkpoint through URL scheme path.
229
+
230
+ Args:
231
+ filename (str): checkpoint file name with given prefix
232
+ map_location (str, optional): Same as :func:`torch.load`.
233
+ Default: None
234
+ logger (:mod:`logging.Logger`, optional): The logger for message.
235
+ Default: None
236
+
237
+ Returns:
238
+ dict or OrderedDict: The loaded checkpoint.
239
+ """
240
+
241
+ checkpoint_loader = cls._get_checkpoint_loader(filename)
242
+ class_name = checkpoint_loader.__name__
243
+ mmcv.print_log(
244
+ f'load checkpoint from {class_name[10:]} path: {filename}', logger)
245
+ return checkpoint_loader(filename, map_location)
246
+
247
+
248
+ @CheckpointLoader.register_scheme(prefixes='')
249
+ def load_from_local(filename, map_location):
250
+ """load checkpoint by local file path.
251
+
252
+ Args:
253
+ filename (str): local checkpoint file path
254
+ map_location (str, optional): Same as :func:`torch.load`.
255
+
256
+ Returns:
257
+ dict or OrderedDict: The loaded checkpoint.
258
+ """
259
+
260
+ if not osp.isfile(filename):
261
+ raise IOError(f'{filename} is not a checkpoint file')
262
+ checkpoint = torch.load(filename, map_location=map_location)
263
+ return checkpoint
264
+
265
+
266
+ @CheckpointLoader.register_scheme(prefixes=('http://', 'https://'))
267
+ def load_from_http(filename, map_location=None, model_dir=None):
268
+ """load checkpoint through HTTP or HTTPS scheme path. In distributed
269
+ setting, this function only download checkpoint at local rank 0.
270
+
271
+ Args:
272
+ filename (str): checkpoint file path with modelzoo or
273
+ torchvision prefix
274
+ map_location (str, optional): Same as :func:`torch.load`.
275
+ model_dir (string, optional): directory in which to save the object,
276
+ Default: None
277
+
278
+ Returns:
279
+ dict or OrderedDict: The loaded checkpoint.
280
+ """
281
+ rank, world_size = get_dist_info()
282
+ rank = int(os.environ.get('LOCAL_RANK', rank))
283
+ if rank == 0:
284
+ checkpoint = model_zoo.load_url(
285
+ filename, model_dir=model_dir, map_location=map_location)
286
+ if world_size > 1:
287
+ torch.distributed.barrier()
288
+ if rank > 0:
289
+ checkpoint = model_zoo.load_url(
290
+ filename, model_dir=model_dir, map_location=map_location)
291
+ return checkpoint
292
+
293
+
294
+ @CheckpointLoader.register_scheme(prefixes='pavi://')
295
+ def load_from_pavi(filename, map_location=None):
296
+ """load checkpoint through the file path prefixed with pavi. In distributed
297
+ setting, this function download ckpt at all ranks to different temporary
298
+ directories.
299
+
300
+ Args:
301
+ filename (str): checkpoint file path with pavi prefix
302
+ map_location (str, optional): Same as :func:`torch.load`.
303
+ Default: None
304
+
305
+ Returns:
306
+ dict or OrderedDict: The loaded checkpoint.
307
+ """
308
+ assert filename.startswith('pavi://'), \
309
+ f'Expected filename startswith `pavi://`, but get {filename}'
310
+ model_path = filename[7:]
311
+
312
+ try:
313
+ from pavi import modelcloud
314
+ except ImportError:
315
+ raise ImportError(
316
+ 'Please install pavi to load checkpoint from modelcloud.')
317
+
318
+ model = modelcloud.get(model_path)
319
+ with TemporaryDirectory() as tmp_dir:
320
+ downloaded_file = osp.join(tmp_dir, model.name)
321
+ model.download(downloaded_file)
322
+ checkpoint = torch.load(downloaded_file, map_location=map_location)
323
+ return checkpoint
324
+
325
+
326
+ @CheckpointLoader.register_scheme(prefixes='s3://')
327
+ def load_from_ceph(filename, map_location=None, backend='petrel'):
328
+ """load checkpoint through the file path prefixed with s3. In distributed
329
+ setting, this function download ckpt at all ranks to different temporary
330
+ directories.
331
+
332
+ Args:
333
+ filename (str): checkpoint file path with s3 prefix
334
+ map_location (str, optional): Same as :func:`torch.load`.
335
+ backend (str, optional): The storage backend type. Options are 'ceph',
336
+ 'petrel'. Default: 'petrel'.
337
+
338
+ .. warning::
339
+ :class:`mmcv.fileio.file_client.CephBackend` will be deprecated,
340
+ please use :class:`mmcv.fileio.file_client.PetrelBackend` instead.
341
+
342
+ Returns:
343
+ dict or OrderedDict: The loaded checkpoint.
344
+ """
345
+ allowed_backends = ['ceph', 'petrel']
346
+ if backend not in allowed_backends:
347
+ raise ValueError(f'Load from Backend {backend} is not supported.')
348
+
349
+ if backend == 'ceph':
350
+ warnings.warn(
351
+ 'CephBackend will be deprecated, please use PetrelBackend instead')
352
+
353
+ # CephClient and PetrelBackend have the same prefix 's3://' and the latter
354
+ # will be chosen as default. If PetrelBackend can not be instantiated
355
+ # successfully, the CephClient will be chosen.
356
+ try:
357
+ file_client = FileClient(backend=backend)
358
+ except ImportError:
359
+ allowed_backends.remove(backend)
360
+ file_client = FileClient(backend=allowed_backends[0])
361
+
362
+ with io.BytesIO(file_client.get(filename)) as buffer:
363
+ checkpoint = torch.load(buffer, map_location=map_location)
364
+ return checkpoint
365
+
366
+
367
+ @CheckpointLoader.register_scheme(prefixes=('modelzoo://', 'torchvision://'))
368
+ def load_from_torchvision(filename, map_location=None):
369
+ """load checkpoint through the file path prefixed with modelzoo or
370
+ torchvision.
371
+
372
+ Args:
373
+ filename (str): checkpoint file path with modelzoo or
374
+ torchvision prefix
375
+ map_location (str, optional): Same as :func:`torch.load`.
376
+
377
+ Returns:
378
+ dict or OrderedDict: The loaded checkpoint.
379
+ """
380
+ model_urls = get_torchvision_models()
381
+ if filename.startswith('modelzoo://'):
382
+ warnings.warn('The URL scheme of "modelzoo://" is deprecated, please '
383
+ 'use "torchvision://" instead')
384
+ model_name = filename[11:]
385
+ else:
386
+ model_name = filename[14:]
387
+ return load_from_http(model_urls[model_name], map_location=map_location)
388
+
389
+
390
+ @CheckpointLoader.register_scheme(prefixes=('open-mmlab://', 'openmmlab://'))
391
+ def load_from_openmmlab(filename, map_location=None):
392
+ """load checkpoint through the file path prefixed with open-mmlab or
393
+ openmmlab.
394
+
395
+ Args:
396
+ filename (str): checkpoint file path with open-mmlab or
397
+ openmmlab prefix
398
+ map_location (str, optional): Same as :func:`torch.load`.
399
+ Default: None
400
+
401
+ Returns:
402
+ dict or OrderedDict: The loaded checkpoint.
403
+ """
404
+
405
+ model_urls = get_external_models()
406
+ prefix_str = 'open-mmlab://'
407
+ if filename.startswith(prefix_str):
408
+ model_name = filename[13:]
409
+ else:
410
+ model_name = filename[12:]
411
+ prefix_str = 'openmmlab://'
412
+
413
+ deprecated_urls = get_deprecated_model_names()
414
+ if model_name in deprecated_urls:
415
+ warnings.warn(f'{prefix_str}{model_name} is deprecated in favor '
416
+ f'of {prefix_str}{deprecated_urls[model_name]}')
417
+ model_name = deprecated_urls[model_name]
418
+ model_url = model_urls[model_name]
419
+ # check if is url
420
+ if model_url.startswith(('http://', 'https://')):
421
+ checkpoint = load_from_http(model_url, map_location=map_location)
422
+ else:
423
+ filename = osp.join(_get_mmcv_home(), model_url)
424
+ if not osp.isfile(filename):
425
+ raise IOError(f'{filename} is not a checkpoint file')
426
+ checkpoint = torch.load(filename, map_location=map_location)
427
+ return checkpoint
428
+
429
+
430
+ @CheckpointLoader.register_scheme(prefixes='mmcls://')
431
+ def load_from_mmcls(filename, map_location=None):
432
+ """load checkpoint through the file path prefixed with mmcls.
433
+
434
+ Args:
435
+ filename (str): checkpoint file path with mmcls prefix
436
+ map_location (str, optional): Same as :func:`torch.load`.
437
+
438
+ Returns:
439
+ dict or OrderedDict: The loaded checkpoint.
440
+ """
441
+
442
+ model_urls = get_mmcls_models()
443
+ model_name = filename[8:]
444
+ checkpoint = load_from_http(
445
+ model_urls[model_name], map_location=map_location)
446
+ checkpoint = _process_mmcls_checkpoint(checkpoint)
447
+ return checkpoint
448
+
449
+
450
+ def _load_checkpoint(filename, map_location=None, logger=None):
451
+ """Load checkpoint from somewhere (modelzoo, file, url).
452
+
453
+ Args:
454
+ filename (str): Accept local filepath, URL, ``torchvision://xxx``,
455
+ ``open-mmlab://xxx``. Please refer to ``docs/model_zoo.md`` for
456
+ details.
457
+ map_location (str, optional): Same as :func:`torch.load`.
458
+ Default: None.
459
+ logger (:mod:`logging.Logger`, optional): The logger for error message.
460
+ Default: None
461
+
462
+ Returns:
463
+ dict or OrderedDict: The loaded checkpoint. It can be either an
464
+ OrderedDict storing model weights or a dict containing other
465
+ information, which depends on the checkpoint.
466
+ """
467
+ return CheckpointLoader.load_checkpoint(filename, map_location, logger)
468
+
469
+
470
+ def _load_checkpoint_with_prefix(prefix, filename, map_location=None):
471
+ """Load partial pretrained model with specific prefix.
472
+
473
+ Args:
474
+ prefix (str): The prefix of sub-module.
475
+ filename (str): Accept local filepath, URL, ``torchvision://xxx``,
476
+ ``open-mmlab://xxx``. Please refer to ``docs/model_zoo.md`` for
477
+ details.
478
+ map_location (str | None): Same as :func:`torch.load`. Default: None.
479
+
480
+ Returns:
481
+ dict or OrderedDict: The loaded checkpoint.
482
+ """
483
+
484
+ checkpoint = _load_checkpoint(filename, map_location=map_location)
485
+
486
+ if 'state_dict' in checkpoint:
487
+ state_dict = checkpoint['state_dict']
488
+ else:
489
+ state_dict = checkpoint
490
+ if not prefix.endswith('.'):
491
+ prefix += '.'
492
+ prefix_len = len(prefix)
493
+
494
+ state_dict = {
495
+ k[prefix_len:]: v
496
+ for k, v in state_dict.items() if k.startswith(prefix)
497
+ }
498
+
499
+ assert state_dict, f'{prefix} is not in the pretrained model'
500
+ return state_dict
501
+
502
+
503
+ def load_checkpoint(model,
504
+ filename,
505
+ map_location=None,
506
+ strict=False,
507
+ logger=None,
508
+ revise_keys=[(r'^module\.', '')]):
509
+ """Load checkpoint from a file or URI.
510
+
511
+ Args:
512
+ model (Module): Module to load checkpoint.
513
+ filename (str): Accept local filepath, URL, ``torchvision://xxx``,
514
+ ``open-mmlab://xxx``. Please refer to ``docs/model_zoo.md`` for
515
+ details.
516
+ map_location (str): Same as :func:`torch.load`.
517
+ strict (bool): Whether to allow different params for the model and
518
+ checkpoint.
519
+ logger (:mod:`logging.Logger` or None): The logger for error message.
520
+ revise_keys (list): A list of customized keywords to modify the
521
+ state_dict in checkpoint. Each item is a (pattern, replacement)
522
+ pair of the regular expression operations. Default: strip
523
+ the prefix 'module.' by [(r'^module\\.', '')].
524
+
525
+ Returns:
526
+ dict or OrderedDict: The loaded checkpoint.
527
+ """
528
+ checkpoint = _load_checkpoint(filename, map_location, logger)
529
+ # OrderedDict is a subclass of dict
530
+ if not isinstance(checkpoint, dict):
531
+ raise RuntimeError(
532
+ f'No state_dict found in checkpoint file {filename}')
533
+ # get state_dict from checkpoint
534
+ if 'state_dict' in checkpoint:
535
+ state_dict = checkpoint['state_dict']
536
+ else:
537
+ state_dict = checkpoint
538
+
539
+ # strip prefix of state_dict
540
+ metadata = getattr(state_dict, '_metadata', OrderedDict())
541
+ for p, r in revise_keys:
542
+ state_dict = OrderedDict(
543
+ {re.sub(p, r, k): v
544
+ for k, v in state_dict.items()})
545
+ # Keep metadata in state_dict
546
+ state_dict._metadata = metadata
547
+
548
+ # load state_dict
549
+ load_state_dict(model, state_dict, strict, logger)
550
+ return checkpoint
551
+
552
+
553
+ def weights_to_cpu(state_dict):
554
+ """Copy a model state_dict to cpu.
555
+
556
+ Args:
557
+ state_dict (OrderedDict): Model weights on GPU.
558
+
559
+ Returns:
560
+ OrderedDict: Model weights on GPU.
561
+ """
562
+ state_dict_cpu = OrderedDict()
563
+ for key, val in state_dict.items():
564
+ state_dict_cpu[key] = val.cpu()
565
+ # Keep metadata in state_dict
566
+ state_dict_cpu._metadata = getattr(state_dict, '_metadata', OrderedDict())
567
+ return state_dict_cpu
568
+
569
+
570
+ def _save_to_state_dict(module, destination, prefix, keep_vars):
571
+ """Saves module state to `destination` dictionary.
572
+
573
+ This method is modified from :meth:`torch.nn.Module._save_to_state_dict`.
574
+
575
+ Args:
576
+ module (nn.Module): The module to generate state_dict.
577
+ destination (dict): A dict where state will be stored.
578
+ prefix (str): The prefix for parameters and buffers used in this
579
+ module.
580
+ """
581
+ for name, param in module._parameters.items():
582
+ if param is not None:
583
+ destination[prefix + name] = param if keep_vars else param.detach()
584
+ for name, buf in module._buffers.items():
585
+ # remove check of _non_persistent_buffers_set to allow nn.BatchNorm2d
586
+ if buf is not None:
587
+ destination[prefix + name] = buf if keep_vars else buf.detach()
588
+
589
+
590
+ def get_state_dict(module, destination=None, prefix='', keep_vars=False):
591
+ """Returns a dictionary containing a whole state of the module.
592
+
593
+ Both parameters and persistent buffers (e.g. running averages) are
594
+ included. Keys are corresponding parameter and buffer names.
595
+
596
+ This method is modified from :meth:`torch.nn.Module.state_dict` to
597
+ recursively check parallel module in case that the model has a complicated
598
+ structure, e.g., nn.Module(nn.Module(DDP)).
599
+
600
+ Args:
601
+ module (nn.Module): The module to generate state_dict.
602
+ destination (OrderedDict): Returned dict for the state of the
603
+ module.
604
+ prefix (str): Prefix of the key.
605
+ keep_vars (bool): Whether to keep the variable property of the
606
+ parameters. Default: False.
607
+
608
+ Returns:
609
+ dict: A dictionary containing a whole state of the module.
610
+ """
611
+ # recursively check parallel module in case that the model has a
612
+ # complicated structure, e.g., nn.Module(nn.Module(DDP))
613
+ if is_module_wrapper(module):
614
+ module = module.module
615
+
616
+ # below is the same as torch.nn.Module.state_dict()
617
+ if destination is None:
618
+ destination = OrderedDict()
619
+ destination._metadata = OrderedDict()
620
+ destination._metadata[prefix[:-1]] = local_metadata = dict(
621
+ version=module._version)
622
+ _save_to_state_dict(module, destination, prefix, keep_vars)
623
+ for name, child in module._modules.items():
624
+ if child is not None:
625
+ get_state_dict(
626
+ child, destination, prefix + name + '.', keep_vars=keep_vars)
627
+ for hook in module._state_dict_hooks.values():
628
+ hook_result = hook(module, destination, prefix, local_metadata)
629
+ if hook_result is not None:
630
+ destination = hook_result
631
+ return destination
632
+
633
+
634
+ def save_checkpoint(model,
635
+ filename,
636
+ optimizer=None,
637
+ meta=None,
638
+ file_client_args=None):
639
+ """Save checkpoint to file.
640
+
641
+ The checkpoint will have 3 fields: ``meta``, ``state_dict`` and
642
+ ``optimizer``. By default ``meta`` will contain version and time info.
643
+
644
+ Args:
645
+ model (Module): Module whose params are to be saved.
646
+ filename (str): Checkpoint filename.
647
+ optimizer (:obj:`Optimizer`, optional): Optimizer to be saved.
648
+ meta (dict, optional): Metadata to be saved in checkpoint.
649
+ file_client_args (dict, optional): Arguments to instantiate a
650
+ FileClient. See :class:`mmcv.fileio.FileClient` for details.
651
+ Default: None.
652
+ `New in version 1.3.16.`
653
+ """
654
+ if meta is None:
655
+ meta = {}
656
+ elif not isinstance(meta, dict):
657
+ raise TypeError(f'meta must be a dict or None, but got {type(meta)}')
658
+ meta.update(mmcv_version=mmcv.__version__, time=time.asctime())
659
+
660
+ if is_module_wrapper(model):
661
+ model = model.module
662
+
663
+ if hasattr(model, 'CLASSES') and model.CLASSES is not None:
664
+ # save class name to the meta
665
+ meta.update(CLASSES=model.CLASSES)
666
+
667
+ checkpoint = {
668
+ 'meta': meta,
669
+ 'state_dict': weights_to_cpu(get_state_dict(model))
670
+ }
671
+ # save optimizer state dict in the checkpoint
672
+ if isinstance(optimizer, Optimizer):
673
+ checkpoint['optimizer'] = optimizer.state_dict()
674
+ elif isinstance(optimizer, dict):
675
+ checkpoint['optimizer'] = {}
676
+ for name, optim in optimizer.items():
677
+ checkpoint['optimizer'][name] = optim.state_dict()
678
+
679
+ if filename.startswith('pavi://'):
680
+ if file_client_args is not None:
681
+ raise ValueError(
682
+ 'file_client_args should be "None" if filename starts with'
683
+ f'"pavi://", but got {file_client_args}')
684
+ try:
685
+ from pavi import modelcloud
686
+ from pavi import exception
687
+ except ImportError:
688
+ raise ImportError(
689
+ 'Please install pavi to load checkpoint from modelcloud.')
690
+ model_path = filename[7:]
691
+ root = modelcloud.Folder()
692
+ model_dir, model_name = osp.split(model_path)
693
+ try:
694
+ model = modelcloud.get(model_dir)
695
+ except exception.NodeNotFoundError:
696
+ model = root.create_training_model(model_dir)
697
+ with TemporaryDirectory() as tmp_dir:
698
+ checkpoint_file = osp.join(tmp_dir, model_name)
699
+ with open(checkpoint_file, 'wb') as f:
700
+ torch.save(checkpoint, f)
701
+ f.flush()
702
+ model.create_file(checkpoint_file, name=model_name)
703
+ else:
704
+ file_client = FileClient.infer_client(file_client_args, filename)
705
+ with io.BytesIO() as f:
706
+ torch.save(checkpoint, f)
707
+ file_client.put(f.getvalue(), filename)
CCEdit-main/src/controlnet11/annotator/uniformer/mmcv/runner/default_constructor.py ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .builder import RUNNER_BUILDERS, RUNNERS
2
+
3
+
4
+ @RUNNER_BUILDERS.register_module()
5
+ class DefaultRunnerConstructor:
6
+ """Default constructor for runners.
7
+
8
+ Custom existing `Runner` like `EpocBasedRunner` though `RunnerConstructor`.
9
+ For example, We can inject some new properties and functions for `Runner`.
10
+
11
+ Example:
12
+ >>> from annotator.uniformer.mmcv.runner import RUNNER_BUILDERS, build_runner
13
+ >>> # Define a new RunnerReconstructor
14
+ >>> @RUNNER_BUILDERS.register_module()
15
+ >>> class MyRunnerConstructor:
16
+ ... def __init__(self, runner_cfg, default_args=None):
17
+ ... if not isinstance(runner_cfg, dict):
18
+ ... raise TypeError('runner_cfg should be a dict',
19
+ ... f'but got {type(runner_cfg)}')
20
+ ... self.runner_cfg = runner_cfg
21
+ ... self.default_args = default_args
22
+ ...
23
+ ... def __call__(self):
24
+ ... runner = RUNNERS.build(self.runner_cfg,
25
+ ... default_args=self.default_args)
26
+ ... # Add new properties for existing runner
27
+ ... runner.my_name = 'my_runner'
28
+ ... runner.my_function = lambda self: print(self.my_name)
29
+ ... ...
30
+ >>> # build your runner
31
+ >>> runner_cfg = dict(type='EpochBasedRunner', max_epochs=40,
32
+ ... constructor='MyRunnerConstructor')
33
+ >>> runner = build_runner(runner_cfg)
34
+ """
35
+
36
+ def __init__(self, runner_cfg, default_args=None):
37
+ if not isinstance(runner_cfg, dict):
38
+ raise TypeError('runner_cfg should be a dict',
39
+ f'but got {type(runner_cfg)}')
40
+ self.runner_cfg = runner_cfg
41
+ self.default_args = default_args
42
+
43
+ def __call__(self):
44
+ return RUNNERS.build(self.runner_cfg, default_args=self.default_args)
CCEdit-main/src/controlnet11/annotator/uniformer/mmcv/runner/dist_utils.py ADDED
@@ -0,0 +1,164 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) OpenMMLab. All rights reserved.
2
+ import functools
3
+ import os
4
+ import subprocess
5
+ from collections import OrderedDict
6
+
7
+ import torch
8
+ import torch.multiprocessing as mp
9
+ from torch import distributed as dist
10
+ from torch._utils import (_flatten_dense_tensors, _take_tensors,
11
+ _unflatten_dense_tensors)
12
+
13
+
14
+ def init_dist(launcher, backend='nccl', **kwargs):
15
+ if mp.get_start_method(allow_none=True) is None:
16
+ mp.set_start_method('spawn')
17
+ if launcher == 'pytorch':
18
+ _init_dist_pytorch(backend, **kwargs)
19
+ elif launcher == 'mpi':
20
+ _init_dist_mpi(backend, **kwargs)
21
+ elif launcher == 'slurm':
22
+ _init_dist_slurm(backend, **kwargs)
23
+ else:
24
+ raise ValueError(f'Invalid launcher type: {launcher}')
25
+
26
+
27
+ def _init_dist_pytorch(backend, **kwargs):
28
+ # TODO: use local_rank instead of rank % num_gpus
29
+ rank = int(os.environ['RANK'])
30
+ num_gpus = torch.cuda.device_count()
31
+ torch.cuda.set_device(rank % num_gpus)
32
+ dist.init_process_group(backend=backend, **kwargs)
33
+
34
+
35
+ def _init_dist_mpi(backend, **kwargs):
36
+ # TODO: use local_rank instead of rank % num_gpus
37
+ rank = int(os.environ['OMPI_COMM_WORLD_RANK'])
38
+ num_gpus = torch.cuda.device_count()
39
+ torch.cuda.set_device(rank % num_gpus)
40
+ dist.init_process_group(backend=backend, **kwargs)
41
+
42
+
43
+ def _init_dist_slurm(backend, port=None):
44
+ """Initialize slurm distributed training environment.
45
+
46
+ If argument ``port`` is not specified, then the master port will be system
47
+ environment variable ``MASTER_PORT``. If ``MASTER_PORT`` is not in system
48
+ environment variable, then a default port ``29500`` will be used.
49
+
50
+ Args:
51
+ backend (str): Backend of torch.distributed.
52
+ port (int, optional): Master port. Defaults to None.
53
+ """
54
+ proc_id = int(os.environ['SLURM_PROCID'])
55
+ ntasks = int(os.environ['SLURM_NTASKS'])
56
+ node_list = os.environ['SLURM_NODELIST']
57
+ num_gpus = torch.cuda.device_count()
58
+ torch.cuda.set_device(proc_id % num_gpus)
59
+ addr = subprocess.getoutput(
60
+ f'scontrol show hostname {node_list} | head -n1')
61
+ # specify master port
62
+ if port is not None:
63
+ os.environ['MASTER_PORT'] = str(port)
64
+ elif 'MASTER_PORT' in os.environ:
65
+ pass # use MASTER_PORT in the environment variable
66
+ else:
67
+ # 29500 is torch.distributed default port
68
+ os.environ['MASTER_PORT'] = '29500'
69
+ # use MASTER_ADDR in the environment variable if it already exists
70
+ if 'MASTER_ADDR' not in os.environ:
71
+ os.environ['MASTER_ADDR'] = addr
72
+ os.environ['WORLD_SIZE'] = str(ntasks)
73
+ os.environ['LOCAL_RANK'] = str(proc_id % num_gpus)
74
+ os.environ['RANK'] = str(proc_id)
75
+ dist.init_process_group(backend=backend)
76
+
77
+
78
+ def get_dist_info():
79
+ if dist.is_available() and dist.is_initialized():
80
+ rank = dist.get_rank()
81
+ world_size = dist.get_world_size()
82
+ else:
83
+ rank = 0
84
+ world_size = 1
85
+ return rank, world_size
86
+
87
+
88
+ def master_only(func):
89
+
90
+ @functools.wraps(func)
91
+ def wrapper(*args, **kwargs):
92
+ rank, _ = get_dist_info()
93
+ if rank == 0:
94
+ return func(*args, **kwargs)
95
+
96
+ return wrapper
97
+
98
+
99
+ def allreduce_params(params, coalesce=True, bucket_size_mb=-1):
100
+ """Allreduce parameters.
101
+
102
+ Args:
103
+ params (list[torch.Parameters]): List of parameters or buffers of a
104
+ model.
105
+ coalesce (bool, optional): Whether allreduce parameters as a whole.
106
+ Defaults to True.
107
+ bucket_size_mb (int, optional): Size of bucket, the unit is MB.
108
+ Defaults to -1.
109
+ """
110
+ _, world_size = get_dist_info()
111
+ if world_size == 1:
112
+ return
113
+ params = [param.data for param in params]
114
+ if coalesce:
115
+ _allreduce_coalesced(params, world_size, bucket_size_mb)
116
+ else:
117
+ for tensor in params:
118
+ dist.all_reduce(tensor.div_(world_size))
119
+
120
+
121
+ def allreduce_grads(params, coalesce=True, bucket_size_mb=-1):
122
+ """Allreduce gradients.
123
+
124
+ Args:
125
+ params (list[torch.Parameters]): List of parameters of a model
126
+ coalesce (bool, optional): Whether allreduce parameters as a whole.
127
+ Defaults to True.
128
+ bucket_size_mb (int, optional): Size of bucket, the unit is MB.
129
+ Defaults to -1.
130
+ """
131
+ grads = [
132
+ param.grad.data for param in params
133
+ if param.requires_grad and param.grad is not None
134
+ ]
135
+ _, world_size = get_dist_info()
136
+ if world_size == 1:
137
+ return
138
+ if coalesce:
139
+ _allreduce_coalesced(grads, world_size, bucket_size_mb)
140
+ else:
141
+ for tensor in grads:
142
+ dist.all_reduce(tensor.div_(world_size))
143
+
144
+
145
+ def _allreduce_coalesced(tensors, world_size, bucket_size_mb=-1):
146
+ if bucket_size_mb > 0:
147
+ bucket_size_bytes = bucket_size_mb * 1024 * 1024
148
+ buckets = _take_tensors(tensors, bucket_size_bytes)
149
+ else:
150
+ buckets = OrderedDict()
151
+ for tensor in tensors:
152
+ tp = tensor.type()
153
+ if tp not in buckets:
154
+ buckets[tp] = []
155
+ buckets[tp].append(tensor)
156
+ buckets = buckets.values()
157
+
158
+ for bucket in buckets:
159
+ flat_tensors = _flatten_dense_tensors(bucket)
160
+ dist.all_reduce(flat_tensors)
161
+ flat_tensors.div_(world_size)
162
+ for tensor, synced in zip(
163
+ bucket, _unflatten_dense_tensors(flat_tensors, bucket)):
164
+ tensor.copy_(synced)
CCEdit-main/src/controlnet11/annotator/uniformer/mmcv/runner/epoch_based_runner.py ADDED
@@ -0,0 +1,187 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) OpenMMLab. All rights reserved.
2
+ import os.path as osp
3
+ import platform
4
+ import shutil
5
+ import time
6
+ import warnings
7
+
8
+ import torch
9
+
10
+ import annotator.uniformer.mmcv as mmcv
11
+ from .base_runner import BaseRunner
12
+ from .builder import RUNNERS
13
+ from .checkpoint import save_checkpoint
14
+ from .utils import get_host_info
15
+
16
+
17
+ @RUNNERS.register_module()
18
+ class EpochBasedRunner(BaseRunner):
19
+ """Epoch-based Runner.
20
+
21
+ This runner train models epoch by epoch.
22
+ """
23
+
24
+ def run_iter(self, data_batch, train_mode, **kwargs):
25
+ if self.batch_processor is not None:
26
+ outputs = self.batch_processor(
27
+ self.model, data_batch, train_mode=train_mode, **kwargs)
28
+ elif train_mode:
29
+ outputs = self.model.train_step(data_batch, self.optimizer,
30
+ **kwargs)
31
+ else:
32
+ outputs = self.model.val_step(data_batch, self.optimizer, **kwargs)
33
+ if not isinstance(outputs, dict):
34
+ raise TypeError('"batch_processor()" or "model.train_step()"'
35
+ 'and "model.val_step()" must return a dict')
36
+ if 'log_vars' in outputs:
37
+ self.log_buffer.update(outputs['log_vars'], outputs['num_samples'])
38
+ self.outputs = outputs
39
+
40
+ def train(self, data_loader, **kwargs):
41
+ self.model.train()
42
+ self.mode = 'train'
43
+ self.data_loader = data_loader
44
+ self._max_iters = self._max_epochs * len(self.data_loader)
45
+ self.call_hook('before_train_epoch')
46
+ time.sleep(2) # Prevent possible deadlock during epoch transition
47
+ for i, data_batch in enumerate(self.data_loader):
48
+ self._inner_iter = i
49
+ self.call_hook('before_train_iter')
50
+ self.run_iter(data_batch, train_mode=True, **kwargs)
51
+ self.call_hook('after_train_iter')
52
+ self._iter += 1
53
+
54
+ self.call_hook('after_train_epoch')
55
+ self._epoch += 1
56
+
57
+ @torch.no_grad()
58
+ def val(self, data_loader, **kwargs):
59
+ self.model.eval()
60
+ self.mode = 'val'
61
+ self.data_loader = data_loader
62
+ self.call_hook('before_val_epoch')
63
+ time.sleep(2) # Prevent possible deadlock during epoch transition
64
+ for i, data_batch in enumerate(self.data_loader):
65
+ self._inner_iter = i
66
+ self.call_hook('before_val_iter')
67
+ self.run_iter(data_batch, train_mode=False)
68
+ self.call_hook('after_val_iter')
69
+
70
+ self.call_hook('after_val_epoch')
71
+
72
+ def run(self, data_loaders, workflow, max_epochs=None, **kwargs):
73
+ """Start running.
74
+
75
+ Args:
76
+ data_loaders (list[:obj:`DataLoader`]): Dataloaders for training
77
+ and validation.
78
+ workflow (list[tuple]): A list of (phase, epochs) to specify the
79
+ running order and epochs. E.g, [('train', 2), ('val', 1)] means
80
+ running 2 epochs for training and 1 epoch for validation,
81
+ iteratively.
82
+ """
83
+ assert isinstance(data_loaders, list)
84
+ assert mmcv.is_list_of(workflow, tuple)
85
+ assert len(data_loaders) == len(workflow)
86
+ if max_epochs is not None:
87
+ warnings.warn(
88
+ 'setting max_epochs in run is deprecated, '
89
+ 'please set max_epochs in runner_config', DeprecationWarning)
90
+ self._max_epochs = max_epochs
91
+
92
+ assert self._max_epochs is not None, (
93
+ 'max_epochs must be specified during instantiation')
94
+
95
+ for i, flow in enumerate(workflow):
96
+ mode, epochs = flow
97
+ if mode == 'train':
98
+ self._max_iters = self._max_epochs * len(data_loaders[i])
99
+ break
100
+
101
+ work_dir = self.work_dir if self.work_dir is not None else 'NONE'
102
+ self.logger.info('Start running, host: %s, work_dir: %s',
103
+ get_host_info(), work_dir)
104
+ self.logger.info('Hooks will be executed in the following order:\n%s',
105
+ self.get_hook_info())
106
+ self.logger.info('workflow: %s, max: %d epochs', workflow,
107
+ self._max_epochs)
108
+ self.call_hook('before_run')
109
+
110
+ while self.epoch < self._max_epochs:
111
+ for i, flow in enumerate(workflow):
112
+ mode, epochs = flow
113
+ if isinstance(mode, str): # self.train()
114
+ if not hasattr(self, mode):
115
+ raise ValueError(
116
+ f'runner has no method named "{mode}" to run an '
117
+ 'epoch')
118
+ epoch_runner = getattr(self, mode)
119
+ else:
120
+ raise TypeError(
121
+ 'mode in workflow must be a str, but got {}'.format(
122
+ type(mode)))
123
+
124
+ for _ in range(epochs):
125
+ if mode == 'train' and self.epoch >= self._max_epochs:
126
+ break
127
+ epoch_runner(data_loaders[i], **kwargs)
128
+
129
+ time.sleep(1) # wait for some hooks like loggers to finish
130
+ self.call_hook('after_run')
131
+
132
+ def save_checkpoint(self,
133
+ out_dir,
134
+ filename_tmpl='epoch_{}.pth',
135
+ save_optimizer=True,
136
+ meta=None,
137
+ create_symlink=True):
138
+ """Save the checkpoint.
139
+
140
+ Args:
141
+ out_dir (str): The directory that checkpoints are saved.
142
+ filename_tmpl (str, optional): The checkpoint filename template,
143
+ which contains a placeholder for the epoch number.
144
+ Defaults to 'epoch_{}.pth'.
145
+ save_optimizer (bool, optional): Whether to save the optimizer to
146
+ the checkpoint. Defaults to True.
147
+ meta (dict, optional): The meta information to be saved in the
148
+ checkpoint. Defaults to None.
149
+ create_symlink (bool, optional): Whether to create a symlink
150
+ "latest.pth" to point to the latest checkpoint.
151
+ Defaults to True.
152
+ """
153
+ if meta is None:
154
+ meta = {}
155
+ elif not isinstance(meta, dict):
156
+ raise TypeError(
157
+ f'meta should be a dict or None, but got {type(meta)}')
158
+ if self.meta is not None:
159
+ meta.update(self.meta)
160
+ # Note: meta.update(self.meta) should be done before
161
+ # meta.update(epoch=self.epoch + 1, iter=self.iter) otherwise
162
+ # there will be problems with resumed checkpoints.
163
+ # More details in https://github.com/open-mmlab/mmcv/pull/1108
164
+ meta.update(epoch=self.epoch + 1, iter=self.iter)
165
+
166
+ filename = filename_tmpl.format(self.epoch + 1)
167
+ filepath = osp.join(out_dir, filename)
168
+ optimizer = self.optimizer if save_optimizer else None
169
+ save_checkpoint(self.model, filepath, optimizer=optimizer, meta=meta)
170
+ # in some environments, `os.symlink` is not supported, you may need to
171
+ # set `create_symlink` to False
172
+ if create_symlink:
173
+ dst_file = osp.join(out_dir, 'latest.pth')
174
+ if platform.system() != 'Windows':
175
+ mmcv.symlink(filename, dst_file)
176
+ else:
177
+ shutil.copy(filepath, dst_file)
178
+
179
+
180
+ @RUNNERS.register_module()
181
+ class Runner(EpochBasedRunner):
182
+ """Deprecated name of EpochBasedRunner."""
183
+
184
+ def __init__(self, *args, **kwargs):
185
+ warnings.warn(
186
+ 'Runner was deprecated, please use EpochBasedRunner instead')
187
+ super().__init__(*args, **kwargs)
CCEdit-main/src/controlnet11/annotator/uniformer/mmcv/runner/fp16_utils.py ADDED
@@ -0,0 +1,410 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) OpenMMLab. All rights reserved.
2
+ import functools
3
+ import warnings
4
+ from collections import abc
5
+ from inspect import getfullargspec
6
+
7
+ import numpy as np
8
+ import torch
9
+ import torch.nn as nn
10
+
11
+ from annotator.uniformer.mmcv.utils import TORCH_VERSION, digit_version
12
+ from .dist_utils import allreduce_grads as _allreduce_grads
13
+
14
+ try:
15
+ # If PyTorch version >= 1.6.0, torch.cuda.amp.autocast would be imported
16
+ # and used; otherwise, auto fp16 will adopt mmcv's implementation.
17
+ # Note that when PyTorch >= 1.6.0, we still cast tensor types to fp16
18
+ # manually, so the behavior may not be consistent with real amp.
19
+ from torch.cuda.amp import autocast
20
+ except ImportError:
21
+ pass
22
+
23
+
24
+ def cast_tensor_type(inputs, src_type, dst_type):
25
+ """Recursively convert Tensor in inputs from src_type to dst_type.
26
+
27
+ Args:
28
+ inputs: Inputs that to be casted.
29
+ src_type (torch.dtype): Source type..
30
+ dst_type (torch.dtype): Destination type.
31
+
32
+ Returns:
33
+ The same type with inputs, but all contained Tensors have been cast.
34
+ """
35
+ if isinstance(inputs, nn.Module):
36
+ return inputs
37
+ elif isinstance(inputs, torch.Tensor):
38
+ return inputs.to(dst_type)
39
+ elif isinstance(inputs, str):
40
+ return inputs
41
+ elif isinstance(inputs, np.ndarray):
42
+ return inputs
43
+ elif isinstance(inputs, abc.Mapping):
44
+ return type(inputs)({
45
+ k: cast_tensor_type(v, src_type, dst_type)
46
+ for k, v in inputs.items()
47
+ })
48
+ elif isinstance(inputs, abc.Iterable):
49
+ return type(inputs)(
50
+ cast_tensor_type(item, src_type, dst_type) for item in inputs)
51
+ else:
52
+ return inputs
53
+
54
+
55
+ def auto_fp16(apply_to=None, out_fp32=False):
56
+ """Decorator to enable fp16 training automatically.
57
+
58
+ This decorator is useful when you write custom modules and want to support
59
+ mixed precision training. If inputs arguments are fp32 tensors, they will
60
+ be converted to fp16 automatically. Arguments other than fp32 tensors are
61
+ ignored. If you are using PyTorch >= 1.6, torch.cuda.amp is used as the
62
+ backend, otherwise, original mmcv implementation will be adopted.
63
+
64
+ Args:
65
+ apply_to (Iterable, optional): The argument names to be converted.
66
+ `None` indicates all arguments.
67
+ out_fp32 (bool): Whether to convert the output back to fp32.
68
+
69
+ Example:
70
+
71
+ >>> import torch.nn as nn
72
+ >>> class MyModule1(nn.Module):
73
+ >>>
74
+ >>> # Convert x and y to fp16
75
+ >>> @auto_fp16()
76
+ >>> def forward(self, x, y):
77
+ >>> pass
78
+
79
+ >>> import torch.nn as nn
80
+ >>> class MyModule2(nn.Module):
81
+ >>>
82
+ >>> # convert pred to fp16
83
+ >>> @auto_fp16(apply_to=('pred', ))
84
+ >>> def do_something(self, pred, others):
85
+ >>> pass
86
+ """
87
+
88
+ def auto_fp16_wrapper(old_func):
89
+
90
+ @functools.wraps(old_func)
91
+ def new_func(*args, **kwargs):
92
+ # check if the module has set the attribute `fp16_enabled`, if not,
93
+ # just fallback to the original method.
94
+ if not isinstance(args[0], torch.nn.Module):
95
+ raise TypeError('@auto_fp16 can only be used to decorate the '
96
+ 'method of nn.Module')
97
+ if not (hasattr(args[0], 'fp16_enabled') and args[0].fp16_enabled):
98
+ return old_func(*args, **kwargs)
99
+
100
+ # get the arg spec of the decorated method
101
+ args_info = getfullargspec(old_func)
102
+ # get the argument names to be casted
103
+ args_to_cast = args_info.args if apply_to is None else apply_to
104
+ # convert the args that need to be processed
105
+ new_args = []
106
+ # NOTE: default args are not taken into consideration
107
+ if args:
108
+ arg_names = args_info.args[:len(args)]
109
+ for i, arg_name in enumerate(arg_names):
110
+ if arg_name in args_to_cast:
111
+ new_args.append(
112
+ cast_tensor_type(args[i], torch.float, torch.half))
113
+ else:
114
+ new_args.append(args[i])
115
+ # convert the kwargs that need to be processed
116
+ new_kwargs = {}
117
+ if kwargs:
118
+ for arg_name, arg_value in kwargs.items():
119
+ if arg_name in args_to_cast:
120
+ new_kwargs[arg_name] = cast_tensor_type(
121
+ arg_value, torch.float, torch.half)
122
+ else:
123
+ new_kwargs[arg_name] = arg_value
124
+ # apply converted arguments to the decorated method
125
+ if (TORCH_VERSION != 'parrots' and
126
+ digit_version(TORCH_VERSION) >= digit_version('1.6.0')):
127
+ with autocast(enabled=True):
128
+ output = old_func(*new_args, **new_kwargs)
129
+ else:
130
+ output = old_func(*new_args, **new_kwargs)
131
+ # cast the results back to fp32 if necessary
132
+ if out_fp32:
133
+ output = cast_tensor_type(output, torch.half, torch.float)
134
+ return output
135
+
136
+ return new_func
137
+
138
+ return auto_fp16_wrapper
139
+
140
+
141
+ def force_fp32(apply_to=None, out_fp16=False):
142
+ """Decorator to convert input arguments to fp32 in force.
143
+
144
+ This decorator is useful when you write custom modules and want to support
145
+ mixed precision training. If there are some inputs that must be processed
146
+ in fp32 mode, then this decorator can handle it. If inputs arguments are
147
+ fp16 tensors, they will be converted to fp32 automatically. Arguments other
148
+ than fp16 tensors are ignored. If you are using PyTorch >= 1.6,
149
+ torch.cuda.amp is used as the backend, otherwise, original mmcv
150
+ implementation will be adopted.
151
+
152
+ Args:
153
+ apply_to (Iterable, optional): The argument names to be converted.
154
+ `None` indicates all arguments.
155
+ out_fp16 (bool): Whether to convert the output back to fp16.
156
+
157
+ Example:
158
+
159
+ >>> import torch.nn as nn
160
+ >>> class MyModule1(nn.Module):
161
+ >>>
162
+ >>> # Convert x and y to fp32
163
+ >>> @force_fp32()
164
+ >>> def loss(self, x, y):
165
+ >>> pass
166
+
167
+ >>> import torch.nn as nn
168
+ >>> class MyModule2(nn.Module):
169
+ >>>
170
+ >>> # convert pred to fp32
171
+ >>> @force_fp32(apply_to=('pred', ))
172
+ >>> def post_process(self, pred, others):
173
+ >>> pass
174
+ """
175
+
176
+ def force_fp32_wrapper(old_func):
177
+
178
+ @functools.wraps(old_func)
179
+ def new_func(*args, **kwargs):
180
+ # check if the module has set the attribute `fp16_enabled`, if not,
181
+ # just fallback to the original method.
182
+ if not isinstance(args[0], torch.nn.Module):
183
+ raise TypeError('@force_fp32 can only be used to decorate the '
184
+ 'method of nn.Module')
185
+ if not (hasattr(args[0], 'fp16_enabled') and args[0].fp16_enabled):
186
+ return old_func(*args, **kwargs)
187
+ # get the arg spec of the decorated method
188
+ args_info = getfullargspec(old_func)
189
+ # get the argument names to be casted
190
+ args_to_cast = args_info.args if apply_to is None else apply_to
191
+ # convert the args that need to be processed
192
+ new_args = []
193
+ if args:
194
+ arg_names = args_info.args[:len(args)]
195
+ for i, arg_name in enumerate(arg_names):
196
+ if arg_name in args_to_cast:
197
+ new_args.append(
198
+ cast_tensor_type(args[i], torch.half, torch.float))
199
+ else:
200
+ new_args.append(args[i])
201
+ # convert the kwargs that need to be processed
202
+ new_kwargs = dict()
203
+ if kwargs:
204
+ for arg_name, arg_value in kwargs.items():
205
+ if arg_name in args_to_cast:
206
+ new_kwargs[arg_name] = cast_tensor_type(
207
+ arg_value, torch.half, torch.float)
208
+ else:
209
+ new_kwargs[arg_name] = arg_value
210
+ # apply converted arguments to the decorated method
211
+ if (TORCH_VERSION != 'parrots' and
212
+ digit_version(TORCH_VERSION) >= digit_version('1.6.0')):
213
+ with autocast(enabled=False):
214
+ output = old_func(*new_args, **new_kwargs)
215
+ else:
216
+ output = old_func(*new_args, **new_kwargs)
217
+ # cast the results back to fp32 if necessary
218
+ if out_fp16:
219
+ output = cast_tensor_type(output, torch.float, torch.half)
220
+ return output
221
+
222
+ return new_func
223
+
224
+ return force_fp32_wrapper
225
+
226
+
227
+ def allreduce_grads(params, coalesce=True, bucket_size_mb=-1):
228
+ warnings.warning(
229
+ '"mmcv.runner.fp16_utils.allreduce_grads" is deprecated, and will be '
230
+ 'removed in v2.8. Please switch to "mmcv.runner.allreduce_grads')
231
+ _allreduce_grads(params, coalesce=coalesce, bucket_size_mb=bucket_size_mb)
232
+
233
+
234
+ def wrap_fp16_model(model):
235
+ """Wrap the FP32 model to FP16.
236
+
237
+ If you are using PyTorch >= 1.6, torch.cuda.amp is used as the
238
+ backend, otherwise, original mmcv implementation will be adopted.
239
+
240
+ For PyTorch >= 1.6, this function will
241
+ 1. Set fp16 flag inside the model to True.
242
+
243
+ Otherwise:
244
+ 1. Convert FP32 model to FP16.
245
+ 2. Remain some necessary layers to be FP32, e.g., normalization layers.
246
+ 3. Set `fp16_enabled` flag inside the model to True.
247
+
248
+ Args:
249
+ model (nn.Module): Model in FP32.
250
+ """
251
+ if (TORCH_VERSION == 'parrots'
252
+ or digit_version(TORCH_VERSION) < digit_version('1.6.0')):
253
+ # convert model to fp16
254
+ model.half()
255
+ # patch the normalization layers to make it work in fp32 mode
256
+ patch_norm_fp32(model)
257
+ # set `fp16_enabled` flag
258
+ for m in model.modules():
259
+ if hasattr(m, 'fp16_enabled'):
260
+ m.fp16_enabled = True
261
+
262
+
263
+ def patch_norm_fp32(module):
264
+ """Recursively convert normalization layers from FP16 to FP32.
265
+
266
+ Args:
267
+ module (nn.Module): The modules to be converted in FP16.
268
+
269
+ Returns:
270
+ nn.Module: The converted module, the normalization layers have been
271
+ converted to FP32.
272
+ """
273
+ if isinstance(module, (nn.modules.batchnorm._BatchNorm, nn.GroupNorm)):
274
+ module.float()
275
+ if isinstance(module, nn.GroupNorm) or torch.__version__ < '1.3':
276
+ module.forward = patch_forward_method(module.forward, torch.half,
277
+ torch.float)
278
+ for child in module.children():
279
+ patch_norm_fp32(child)
280
+ return module
281
+
282
+
283
+ def patch_forward_method(func, src_type, dst_type, convert_output=True):
284
+ """Patch the forward method of a module.
285
+
286
+ Args:
287
+ func (callable): The original forward method.
288
+ src_type (torch.dtype): Type of input arguments to be converted from.
289
+ dst_type (torch.dtype): Type of input arguments to be converted to.
290
+ convert_output (bool): Whether to convert the output back to src_type.
291
+
292
+ Returns:
293
+ callable: The patched forward method.
294
+ """
295
+
296
+ def new_forward(*args, **kwargs):
297
+ output = func(*cast_tensor_type(args, src_type, dst_type),
298
+ **cast_tensor_type(kwargs, src_type, dst_type))
299
+ if convert_output:
300
+ output = cast_tensor_type(output, dst_type, src_type)
301
+ return output
302
+
303
+ return new_forward
304
+
305
+
306
+ class LossScaler:
307
+ """Class that manages loss scaling in mixed precision training which
308
+ supports both dynamic or static mode.
309
+
310
+ The implementation refers to
311
+ https://github.com/NVIDIA/apex/blob/master/apex/fp16_utils/loss_scaler.py.
312
+ Indirectly, by supplying ``mode='dynamic'`` for dynamic loss scaling.
313
+ It's important to understand how :class:`LossScaler` operates.
314
+ Loss scaling is designed to combat the problem of underflowing
315
+ gradients encountered at long times when training fp16 networks.
316
+ Dynamic loss scaling begins by attempting a very high loss
317
+ scale. Ironically, this may result in OVERflowing gradients.
318
+ If overflowing gradients are encountered, :class:`FP16_Optimizer` then
319
+ skips the update step for this particular iteration/minibatch,
320
+ and :class:`LossScaler` adjusts the loss scale to a lower value.
321
+ If a certain number of iterations occur without overflowing gradients
322
+ detected,:class:`LossScaler` increases the loss scale once more.
323
+ In this way :class:`LossScaler` attempts to "ride the edge" of always
324
+ using the highest loss scale possible without incurring overflow.
325
+
326
+ Args:
327
+ init_scale (float): Initial loss scale value, default: 2**32.
328
+ scale_factor (float): Factor used when adjusting the loss scale.
329
+ Default: 2.
330
+ mode (str): Loss scaling mode. 'dynamic' or 'static'
331
+ scale_window (int): Number of consecutive iterations without an
332
+ overflow to wait before increasing the loss scale. Default: 1000.
333
+ """
334
+
335
+ def __init__(self,
336
+ init_scale=2**32,
337
+ mode='dynamic',
338
+ scale_factor=2.,
339
+ scale_window=1000):
340
+ self.cur_scale = init_scale
341
+ self.cur_iter = 0
342
+ assert mode in ('dynamic',
343
+ 'static'), 'mode can only be dynamic or static'
344
+ self.mode = mode
345
+ self.last_overflow_iter = -1
346
+ self.scale_factor = scale_factor
347
+ self.scale_window = scale_window
348
+
349
+ def has_overflow(self, params):
350
+ """Check if params contain overflow."""
351
+ if self.mode != 'dynamic':
352
+ return False
353
+ for p in params:
354
+ if p.grad is not None and LossScaler._has_inf_or_nan(p.grad.data):
355
+ return True
356
+ return False
357
+
358
+ def _has_inf_or_nan(x):
359
+ """Check if params contain NaN."""
360
+ try:
361
+ cpu_sum = float(x.float().sum())
362
+ except RuntimeError as instance:
363
+ if 'value cannot be converted' not in instance.args[0]:
364
+ raise
365
+ return True
366
+ else:
367
+ if cpu_sum == float('inf') or cpu_sum == -float('inf') \
368
+ or cpu_sum != cpu_sum:
369
+ return True
370
+ return False
371
+
372
+ def update_scale(self, overflow):
373
+ """update the current loss scale value when overflow happens."""
374
+ if self.mode != 'dynamic':
375
+ return
376
+ if overflow:
377
+ self.cur_scale = max(self.cur_scale / self.scale_factor, 1)
378
+ self.last_overflow_iter = self.cur_iter
379
+ else:
380
+ if (self.cur_iter - self.last_overflow_iter) % \
381
+ self.scale_window == 0:
382
+ self.cur_scale *= self.scale_factor
383
+ self.cur_iter += 1
384
+
385
+ def state_dict(self):
386
+ """Returns the state of the scaler as a :class:`dict`."""
387
+ return dict(
388
+ cur_scale=self.cur_scale,
389
+ cur_iter=self.cur_iter,
390
+ mode=self.mode,
391
+ last_overflow_iter=self.last_overflow_iter,
392
+ scale_factor=self.scale_factor,
393
+ scale_window=self.scale_window)
394
+
395
+ def load_state_dict(self, state_dict):
396
+ """Loads the loss_scaler state dict.
397
+
398
+ Args:
399
+ state_dict (dict): scaler state.
400
+ """
401
+ self.cur_scale = state_dict['cur_scale']
402
+ self.cur_iter = state_dict['cur_iter']
403
+ self.mode = state_dict['mode']
404
+ self.last_overflow_iter = state_dict['last_overflow_iter']
405
+ self.scale_factor = state_dict['scale_factor']
406
+ self.scale_window = state_dict['scale_window']
407
+
408
+ @property
409
+ def loss_scale(self):
410
+ return self.cur_scale
CCEdit-main/src/controlnet11/annotator/uniformer/mmcv/runner/hooks/__init__.py ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) OpenMMLab. All rights reserved.
2
+ from .checkpoint import CheckpointHook
3
+ from .closure import ClosureHook
4
+ from .ema import EMAHook
5
+ from .evaluation import DistEvalHook, EvalHook
6
+ from .hook import HOOKS, Hook
7
+ from .iter_timer import IterTimerHook
8
+ from .logger import (DvcliveLoggerHook, LoggerHook, MlflowLoggerHook,
9
+ NeptuneLoggerHook, PaviLoggerHook, TensorboardLoggerHook,
10
+ TextLoggerHook, WandbLoggerHook)
11
+ from .lr_updater import LrUpdaterHook
12
+ from .memory import EmptyCacheHook
13
+ from .momentum_updater import MomentumUpdaterHook
14
+ from .optimizer import (Fp16OptimizerHook, GradientCumulativeFp16OptimizerHook,
15
+ GradientCumulativeOptimizerHook, OptimizerHook)
16
+ from .profiler import ProfilerHook
17
+ from .sampler_seed import DistSamplerSeedHook
18
+ from .sync_buffer import SyncBuffersHook
19
+
20
+ __all__ = [
21
+ 'HOOKS', 'Hook', 'CheckpointHook', 'ClosureHook', 'LrUpdaterHook',
22
+ 'OptimizerHook', 'Fp16OptimizerHook', 'IterTimerHook',
23
+ 'DistSamplerSeedHook', 'EmptyCacheHook', 'LoggerHook', 'MlflowLoggerHook',
24
+ 'PaviLoggerHook', 'TextLoggerHook', 'TensorboardLoggerHook',
25
+ 'NeptuneLoggerHook', 'WandbLoggerHook', 'DvcliveLoggerHook',
26
+ 'MomentumUpdaterHook', 'SyncBuffersHook', 'EMAHook', 'EvalHook',
27
+ 'DistEvalHook', 'ProfilerHook', 'GradientCumulativeOptimizerHook',
28
+ 'GradientCumulativeFp16OptimizerHook'
29
+ ]
CCEdit-main/src/controlnet11/annotator/uniformer/mmcv/runner/hooks/checkpoint.py ADDED
@@ -0,0 +1,167 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) OpenMMLab. All rights reserved.
2
+ import os.path as osp
3
+ import warnings
4
+
5
+ from annotator.uniformer.mmcv.fileio import FileClient
6
+ from ..dist_utils import allreduce_params, master_only
7
+ from .hook import HOOKS, Hook
8
+
9
+
10
+ @HOOKS.register_module()
11
+ class CheckpointHook(Hook):
12
+ """Save checkpoints periodically.
13
+
14
+ Args:
15
+ interval (int): The saving period. If ``by_epoch=True``, interval
16
+ indicates epochs, otherwise it indicates iterations.
17
+ Default: -1, which means "never".
18
+ by_epoch (bool): Saving checkpoints by epoch or by iteration.
19
+ Default: True.
20
+ save_optimizer (bool): Whether to save optimizer state_dict in the
21
+ checkpoint. It is usually used for resuming experiments.
22
+ Default: True.
23
+ out_dir (str, optional): The root directory to save checkpoints. If not
24
+ specified, ``runner.work_dir`` will be used by default. If
25
+ specified, the ``out_dir`` will be the concatenation of ``out_dir``
26
+ and the last level directory of ``runner.work_dir``.
27
+ `Changed in version 1.3.16.`
28
+ max_keep_ckpts (int, optional): The maximum checkpoints to keep.
29
+ In some cases we want only the latest few checkpoints and would
30
+ like to delete old ones to save the disk space.
31
+ Default: -1, which means unlimited.
32
+ save_last (bool, optional): Whether to force the last checkpoint to be
33
+ saved regardless of interval. Default: True.
34
+ sync_buffer (bool, optional): Whether to synchronize buffers in
35
+ different gpus. Default: False.
36
+ file_client_args (dict, optional): Arguments to instantiate a
37
+ FileClient. See :class:`mmcv.fileio.FileClient` for details.
38
+ Default: None.
39
+ `New in version 1.3.16.`
40
+
41
+ .. warning::
42
+ Before v1.3.16, the ``out_dir`` argument indicates the path where the
43
+ checkpoint is stored. However, since v1.3.16, ``out_dir`` indicates the
44
+ root directory and the final path to save checkpoint is the
45
+ concatenation of ``out_dir`` and the last level directory of
46
+ ``runner.work_dir``. Suppose the value of ``out_dir`` is "/path/of/A"
47
+ and the value of ``runner.work_dir`` is "/path/of/B", then the final
48
+ path will be "/path/of/A/B".
49
+ """
50
+
51
+ def __init__(self,
52
+ interval=-1,
53
+ by_epoch=True,
54
+ save_optimizer=True,
55
+ out_dir=None,
56
+ max_keep_ckpts=-1,
57
+ save_last=True,
58
+ sync_buffer=False,
59
+ file_client_args=None,
60
+ **kwargs):
61
+ self.interval = interval
62
+ self.by_epoch = by_epoch
63
+ self.save_optimizer = save_optimizer
64
+ self.out_dir = out_dir
65
+ self.max_keep_ckpts = max_keep_ckpts
66
+ self.save_last = save_last
67
+ self.args = kwargs
68
+ self.sync_buffer = sync_buffer
69
+ self.file_client_args = file_client_args
70
+
71
+ def before_run(self, runner):
72
+ if not self.out_dir:
73
+ self.out_dir = runner.work_dir
74
+
75
+ self.file_client = FileClient.infer_client(self.file_client_args,
76
+ self.out_dir)
77
+
78
+ # if `self.out_dir` is not equal to `runner.work_dir`, it means that
79
+ # `self.out_dir` is set so the final `self.out_dir` is the
80
+ # concatenation of `self.out_dir` and the last level directory of
81
+ # `runner.work_dir`
82
+ if self.out_dir != runner.work_dir:
83
+ basename = osp.basename(runner.work_dir.rstrip(osp.sep))
84
+ self.out_dir = self.file_client.join_path(self.out_dir, basename)
85
+
86
+ runner.logger.info((f'Checkpoints will be saved to {self.out_dir} by '
87
+ f'{self.file_client.name}.'))
88
+
89
+ # disable the create_symlink option because some file backends do not
90
+ # allow to create a symlink
91
+ if 'create_symlink' in self.args:
92
+ if self.args[
93
+ 'create_symlink'] and not self.file_client.allow_symlink:
94
+ self.args['create_symlink'] = False
95
+ warnings.warn(
96
+ ('create_symlink is set as True by the user but is changed'
97
+ 'to be False because creating symbolic link is not '
98
+ f'allowed in {self.file_client.name}'))
99
+ else:
100
+ self.args['create_symlink'] = self.file_client.allow_symlink
101
+
102
+ def after_train_epoch(self, runner):
103
+ if not self.by_epoch:
104
+ return
105
+
106
+ # save checkpoint for following cases:
107
+ # 1. every ``self.interval`` epochs
108
+ # 2. reach the last epoch of training
109
+ if self.every_n_epochs(
110
+ runner, self.interval) or (self.save_last
111
+ and self.is_last_epoch(runner)):
112
+ runner.logger.info(
113
+ f'Saving checkpoint at {runner.epoch + 1} epochs')
114
+ if self.sync_buffer:
115
+ allreduce_params(runner.model.buffers())
116
+ self._save_checkpoint(runner)
117
+
118
+ @master_only
119
+ def _save_checkpoint(self, runner):
120
+ """Save the current checkpoint and delete unwanted checkpoint."""
121
+ runner.save_checkpoint(
122
+ self.out_dir, save_optimizer=self.save_optimizer, **self.args)
123
+ if runner.meta is not None:
124
+ if self.by_epoch:
125
+ cur_ckpt_filename = self.args.get(
126
+ 'filename_tmpl', 'epoch_{}.pth').format(runner.epoch + 1)
127
+ else:
128
+ cur_ckpt_filename = self.args.get(
129
+ 'filename_tmpl', 'iter_{}.pth').format(runner.iter + 1)
130
+ runner.meta.setdefault('hook_msgs', dict())
131
+ runner.meta['hook_msgs']['last_ckpt'] = self.file_client.join_path(
132
+ self.out_dir, cur_ckpt_filename)
133
+ # remove other checkpoints
134
+ if self.max_keep_ckpts > 0:
135
+ if self.by_epoch:
136
+ name = 'epoch_{}.pth'
137
+ current_ckpt = runner.epoch + 1
138
+ else:
139
+ name = 'iter_{}.pth'
140
+ current_ckpt = runner.iter + 1
141
+ redundant_ckpts = range(
142
+ current_ckpt - self.max_keep_ckpts * self.interval, 0,
143
+ -self.interval)
144
+ filename_tmpl = self.args.get('filename_tmpl', name)
145
+ for _step in redundant_ckpts:
146
+ ckpt_path = self.file_client.join_path(
147
+ self.out_dir, filename_tmpl.format(_step))
148
+ if self.file_client.isfile(ckpt_path):
149
+ self.file_client.remove(ckpt_path)
150
+ else:
151
+ break
152
+
153
+ def after_train_iter(self, runner):
154
+ if self.by_epoch:
155
+ return
156
+
157
+ # save checkpoint for following cases:
158
+ # 1. every ``self.interval`` iterations
159
+ # 2. reach the last iteration of training
160
+ if self.every_n_iters(
161
+ runner, self.interval) or (self.save_last
162
+ and self.is_last_iter(runner)):
163
+ runner.logger.info(
164
+ f'Saving checkpoint at {runner.iter + 1} iterations')
165
+ if self.sync_buffer:
166
+ allreduce_params(runner.model.buffers())
167
+ self._save_checkpoint(runner)
CCEdit-main/src/controlnet11/annotator/uniformer/mmcv/runner/hooks/closure.py ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) OpenMMLab. All rights reserved.
2
+ from .hook import HOOKS, Hook
3
+
4
+
5
+ @HOOKS.register_module()
6
+ class ClosureHook(Hook):
7
+
8
+ def __init__(self, fn_name, fn):
9
+ assert hasattr(self, fn_name)
10
+ assert callable(fn)
11
+ setattr(self, fn_name, fn)
CCEdit-main/src/controlnet11/annotator/uniformer/mmcv/runner/hooks/ema.py ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) OpenMMLab. All rights reserved.
2
+ from ...parallel import is_module_wrapper
3
+ from ..hooks.hook import HOOKS, Hook
4
+
5
+
6
+ @HOOKS.register_module()
7
+ class EMAHook(Hook):
8
+ r"""Exponential Moving Average Hook.
9
+
10
+ Use Exponential Moving Average on all parameters of model in training
11
+ process. All parameters have a ema backup, which update by the formula
12
+ as below. EMAHook takes priority over EvalHook and CheckpointSaverHook.
13
+
14
+ .. math::
15
+
16
+ \text{Xema\_{t+1}} = (1 - \text{momentum}) \times
17
+ \text{Xema\_{t}} + \text{momentum} \times X_t
18
+
19
+ Args:
20
+ momentum (float): The momentum used for updating ema parameter.
21
+ Defaults to 0.0002.
22
+ interval (int): Update ema parameter every interval iteration.
23
+ Defaults to 1.
24
+ warm_up (int): During first warm_up steps, we may use smaller momentum
25
+ to update ema parameters more slowly. Defaults to 100.
26
+ resume_from (str): The checkpoint path. Defaults to None.
27
+ """
28
+
29
+ def __init__(self,
30
+ momentum=0.0002,
31
+ interval=1,
32
+ warm_up=100,
33
+ resume_from=None):
34
+ assert isinstance(interval, int) and interval > 0
35
+ self.warm_up = warm_up
36
+ self.interval = interval
37
+ assert momentum > 0 and momentum < 1
38
+ self.momentum = momentum**interval
39
+ self.checkpoint = resume_from
40
+
41
+ def before_run(self, runner):
42
+ """To resume model with it's ema parameters more friendly.
43
+
44
+ Register ema parameter as ``named_buffer`` to model
45
+ """
46
+ model = runner.model
47
+ if is_module_wrapper(model):
48
+ model = model.module
49
+ self.param_ema_buffer = {}
50
+ self.model_parameters = dict(model.named_parameters(recurse=True))
51
+ for name, value in self.model_parameters.items():
52
+ # "." is not allowed in module's buffer name
53
+ buffer_name = f"ema_{name.replace('.', '_')}"
54
+ self.param_ema_buffer[name] = buffer_name
55
+ model.register_buffer(buffer_name, value.data.clone())
56
+ self.model_buffers = dict(model.named_buffers(recurse=True))
57
+ if self.checkpoint is not None:
58
+ runner.resume(self.checkpoint)
59
+
60
+ def after_train_iter(self, runner):
61
+ """Update ema parameter every self.interval iterations."""
62
+ curr_step = runner.iter
63
+ # We warm up the momentum considering the instability at beginning
64
+ momentum = min(self.momentum,
65
+ (1 + curr_step) / (self.warm_up + curr_step))
66
+ if curr_step % self.interval != 0:
67
+ return
68
+ for name, parameter in self.model_parameters.items():
69
+ buffer_name = self.param_ema_buffer[name]
70
+ buffer_parameter = self.model_buffers[buffer_name]
71
+ buffer_parameter.mul_(1 - momentum).add_(momentum, parameter.data)
72
+
73
+ def after_train_epoch(self, runner):
74
+ """We load parameter values from ema backup to model before the
75
+ EvalHook."""
76
+ self._swap_ema_parameters()
77
+
78
+ def before_train_epoch(self, runner):
79
+ """We recover model's parameter from ema backup after last epoch's
80
+ EvalHook."""
81
+ self._swap_ema_parameters()
82
+
83
+ def _swap_ema_parameters(self):
84
+ """Swap the parameter of model with parameter in ema_buffer."""
85
+ for name, value in self.model_parameters.items():
86
+ temp = value.data.clone()
87
+ ema_buffer = self.model_buffers[self.param_ema_buffer[name]]
88
+ value.data.copy_(ema_buffer.data)
89
+ ema_buffer.data.copy_(temp)
CCEdit-main/src/controlnet11/annotator/uniformer/mmcv/runner/hooks/evaluation.py ADDED
@@ -0,0 +1,509 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) OpenMMLab. All rights reserved.
2
+ import os.path as osp
3
+ import warnings
4
+ from math import inf
5
+
6
+ import torch.distributed as dist
7
+ from torch.nn.modules.batchnorm import _BatchNorm
8
+ from torch.utils.data import DataLoader
9
+
10
+ from annotator.uniformer.mmcv.fileio import FileClient
11
+ from annotator.uniformer.mmcv.utils import is_seq_of
12
+ from .hook import Hook
13
+ from .logger import LoggerHook
14
+
15
+
16
+ class EvalHook(Hook):
17
+ """Non-Distributed evaluation hook.
18
+
19
+ This hook will regularly perform evaluation in a given interval when
20
+ performing in non-distributed environment.
21
+
22
+ Args:
23
+ dataloader (DataLoader): A PyTorch dataloader, whose dataset has
24
+ implemented ``evaluate`` function.
25
+ start (int | None, optional): Evaluation starting epoch. It enables
26
+ evaluation before the training starts if ``start`` <= the resuming
27
+ epoch. If None, whether to evaluate is merely decided by
28
+ ``interval``. Default: None.
29
+ interval (int): Evaluation interval. Default: 1.
30
+ by_epoch (bool): Determine perform evaluation by epoch or by iteration.
31
+ If set to True, it will perform by epoch. Otherwise, by iteration.
32
+ Default: True.
33
+ save_best (str, optional): If a metric is specified, it would measure
34
+ the best checkpoint during evaluation. The information about best
35
+ checkpoint would be saved in ``runner.meta['hook_msgs']`` to keep
36
+ best score value and best checkpoint path, which will be also
37
+ loaded when resume checkpoint. Options are the evaluation metrics
38
+ on the test dataset. e.g., ``bbox_mAP``, ``segm_mAP`` for bbox
39
+ detection and instance segmentation. ``AR@100`` for proposal
40
+ recall. If ``save_best`` is ``auto``, the first key of the returned
41
+ ``OrderedDict`` result will be used. Default: None.
42
+ rule (str | None, optional): Comparison rule for best score. If set to
43
+ None, it will infer a reasonable rule. Keys such as 'acc', 'top'
44
+ .etc will be inferred by 'greater' rule. Keys contain 'loss' will
45
+ be inferred by 'less' rule. Options are 'greater', 'less', None.
46
+ Default: None.
47
+ test_fn (callable, optional): test a model with samples from a
48
+ dataloader, and return the test results. If ``None``, the default
49
+ test function ``mmcv.engine.single_gpu_test`` will be used.
50
+ (default: ``None``)
51
+ greater_keys (List[str] | None, optional): Metric keys that will be
52
+ inferred by 'greater' comparison rule. If ``None``,
53
+ _default_greater_keys will be used. (default: ``None``)
54
+ less_keys (List[str] | None, optional): Metric keys that will be
55
+ inferred by 'less' comparison rule. If ``None``, _default_less_keys
56
+ will be used. (default: ``None``)
57
+ out_dir (str, optional): The root directory to save checkpoints. If not
58
+ specified, `runner.work_dir` will be used by default. If specified,
59
+ the `out_dir` will be the concatenation of `out_dir` and the last
60
+ level directory of `runner.work_dir`.
61
+ `New in version 1.3.16.`
62
+ file_client_args (dict): Arguments to instantiate a FileClient.
63
+ See :class:`mmcv.fileio.FileClient` for details. Default: None.
64
+ `New in version 1.3.16.`
65
+ **eval_kwargs: Evaluation arguments fed into the evaluate function of
66
+ the dataset.
67
+
68
+ Notes:
69
+ If new arguments are added for EvalHook, tools/test.py,
70
+ tools/eval_metric.py may be affected.
71
+ """
72
+
73
+ # Since the key for determine greater or less is related to the downstream
74
+ # tasks, downstream repos may need to overwrite the following inner
75
+ # variable accordingly.
76
+
77
+ rule_map = {'greater': lambda x, y: x > y, 'less': lambda x, y: x < y}
78
+ init_value_map = {'greater': -inf, 'less': inf}
79
+ _default_greater_keys = [
80
+ 'acc', 'top', 'AR@', 'auc', 'precision', 'mAP', 'mDice', 'mIoU',
81
+ 'mAcc', 'aAcc'
82
+ ]
83
+ _default_less_keys = ['loss']
84
+
85
+ def __init__(self,
86
+ dataloader,
87
+ start=None,
88
+ interval=1,
89
+ by_epoch=True,
90
+ save_best=None,
91
+ rule=None,
92
+ test_fn=None,
93
+ greater_keys=None,
94
+ less_keys=None,
95
+ out_dir=None,
96
+ file_client_args=None,
97
+ **eval_kwargs):
98
+ if not isinstance(dataloader, DataLoader):
99
+ raise TypeError(f'dataloader must be a pytorch DataLoader, '
100
+ f'but got {type(dataloader)}')
101
+
102
+ if interval <= 0:
103
+ raise ValueError(f'interval must be a positive number, '
104
+ f'but got {interval}')
105
+
106
+ assert isinstance(by_epoch, bool), '``by_epoch`` should be a boolean'
107
+
108
+ if start is not None and start < 0:
109
+ raise ValueError(f'The evaluation start epoch {start} is smaller '
110
+ f'than 0')
111
+
112
+ self.dataloader = dataloader
113
+ self.interval = interval
114
+ self.start = start
115
+ self.by_epoch = by_epoch
116
+
117
+ assert isinstance(save_best, str) or save_best is None, \
118
+ '""save_best"" should be a str or None ' \
119
+ f'rather than {type(save_best)}'
120
+ self.save_best = save_best
121
+ self.eval_kwargs = eval_kwargs
122
+ self.initial_flag = True
123
+
124
+ if test_fn is None:
125
+ from annotator.uniformer.mmcv.engine import single_gpu_test
126
+ self.test_fn = single_gpu_test
127
+ else:
128
+ self.test_fn = test_fn
129
+
130
+ if greater_keys is None:
131
+ self.greater_keys = self._default_greater_keys
132
+ else:
133
+ if not isinstance(greater_keys, (list, tuple)):
134
+ greater_keys = (greater_keys, )
135
+ assert is_seq_of(greater_keys, str)
136
+ self.greater_keys = greater_keys
137
+
138
+ if less_keys is None:
139
+ self.less_keys = self._default_less_keys
140
+ else:
141
+ if not isinstance(less_keys, (list, tuple)):
142
+ less_keys = (less_keys, )
143
+ assert is_seq_of(less_keys, str)
144
+ self.less_keys = less_keys
145
+
146
+ if self.save_best is not None:
147
+ self.best_ckpt_path = None
148
+ self._init_rule(rule, self.save_best)
149
+
150
+ self.out_dir = out_dir
151
+ self.file_client_args = file_client_args
152
+
153
+ def _init_rule(self, rule, key_indicator):
154
+ """Initialize rule, key_indicator, comparison_func, and best score.
155
+
156
+ Here is the rule to determine which rule is used for key indicator
157
+ when the rule is not specific (note that the key indicator matching
158
+ is case-insensitive):
159
+ 1. If the key indicator is in ``self.greater_keys``, the rule will be
160
+ specified as 'greater'.
161
+ 2. Or if the key indicator is in ``self.less_keys``, the rule will be
162
+ specified as 'less'.
163
+ 3. Or if the key indicator is equal to the substring in any one item
164
+ in ``self.greater_keys``, the rule will be specified as 'greater'.
165
+ 4. Or if the key indicator is equal to the substring in any one item
166
+ in ``self.less_keys``, the rule will be specified as 'less'.
167
+
168
+ Args:
169
+ rule (str | None): Comparison rule for best score.
170
+ key_indicator (str | None): Key indicator to determine the
171
+ comparison rule.
172
+ """
173
+ if rule not in self.rule_map and rule is not None:
174
+ raise KeyError(f'rule must be greater, less or None, '
175
+ f'but got {rule}.')
176
+
177
+ if rule is None:
178
+ if key_indicator != 'auto':
179
+ # `_lc` here means we use the lower case of keys for
180
+ # case-insensitive matching
181
+ key_indicator_lc = key_indicator.lower()
182
+ greater_keys = [key.lower() for key in self.greater_keys]
183
+ less_keys = [key.lower() for key in self.less_keys]
184
+
185
+ if key_indicator_lc in greater_keys:
186
+ rule = 'greater'
187
+ elif key_indicator_lc in less_keys:
188
+ rule = 'less'
189
+ elif any(key in key_indicator_lc for key in greater_keys):
190
+ rule = 'greater'
191
+ elif any(key in key_indicator_lc for key in less_keys):
192
+ rule = 'less'
193
+ else:
194
+ raise ValueError(f'Cannot infer the rule for key '
195
+ f'{key_indicator}, thus a specific rule '
196
+ f'must be specified.')
197
+ self.rule = rule
198
+ self.key_indicator = key_indicator
199
+ if self.rule is not None:
200
+ self.compare_func = self.rule_map[self.rule]
201
+
202
+ def before_run(self, runner):
203
+ if not self.out_dir:
204
+ self.out_dir = runner.work_dir
205
+
206
+ self.file_client = FileClient.infer_client(self.file_client_args,
207
+ self.out_dir)
208
+
209
+ # if `self.out_dir` is not equal to `runner.work_dir`, it means that
210
+ # `self.out_dir` is set so the final `self.out_dir` is the
211
+ # concatenation of `self.out_dir` and the last level directory of
212
+ # `runner.work_dir`
213
+ if self.out_dir != runner.work_dir:
214
+ basename = osp.basename(runner.work_dir.rstrip(osp.sep))
215
+ self.out_dir = self.file_client.join_path(self.out_dir, basename)
216
+ runner.logger.info(
217
+ (f'The best checkpoint will be saved to {self.out_dir} by '
218
+ f'{self.file_client.name}'))
219
+
220
+ if self.save_best is not None:
221
+ if runner.meta is None:
222
+ warnings.warn('runner.meta is None. Creating an empty one.')
223
+ runner.meta = dict()
224
+ runner.meta.setdefault('hook_msgs', dict())
225
+ self.best_ckpt_path = runner.meta['hook_msgs'].get(
226
+ 'best_ckpt', None)
227
+
228
+ def before_train_iter(self, runner):
229
+ """Evaluate the model only at the start of training by iteration."""
230
+ if self.by_epoch or not self.initial_flag:
231
+ return
232
+ if self.start is not None and runner.iter >= self.start:
233
+ self.after_train_iter(runner)
234
+ self.initial_flag = False
235
+
236
+ def before_train_epoch(self, runner):
237
+ """Evaluate the model only at the start of training by epoch."""
238
+ if not (self.by_epoch and self.initial_flag):
239
+ return
240
+ if self.start is not None and runner.epoch >= self.start:
241
+ self.after_train_epoch(runner)
242
+ self.initial_flag = False
243
+
244
+ def after_train_iter(self, runner):
245
+ """Called after every training iter to evaluate the results."""
246
+ if not self.by_epoch and self._should_evaluate(runner):
247
+ # Because the priority of EvalHook is higher than LoggerHook, the
248
+ # training log and the evaluating log are mixed. Therefore,
249
+ # we need to dump the training log and clear it before evaluating
250
+ # log is generated. In addition, this problem will only appear in
251
+ # `IterBasedRunner` whose `self.by_epoch` is False, because
252
+ # `EpochBasedRunner` whose `self.by_epoch` is True calls
253
+ # `_do_evaluate` in `after_train_epoch` stage, and at this stage
254
+ # the training log has been printed, so it will not cause any
255
+ # problem. more details at
256
+ # https://github.com/open-mmlab/mmsegmentation/issues/694
257
+ for hook in runner._hooks:
258
+ if isinstance(hook, LoggerHook):
259
+ hook.after_train_iter(runner)
260
+ runner.log_buffer.clear()
261
+
262
+ self._do_evaluate(runner)
263
+
264
+ def after_train_epoch(self, runner):
265
+ """Called after every training epoch to evaluate the results."""
266
+ if self.by_epoch and self._should_evaluate(runner):
267
+ self._do_evaluate(runner)
268
+
269
+ def _do_evaluate(self, runner):
270
+ """perform evaluation and save ckpt."""
271
+ results = self.test_fn(runner.model, self.dataloader)
272
+ runner.log_buffer.output['eval_iter_num'] = len(self.dataloader)
273
+ key_score = self.evaluate(runner, results)
274
+ # the key_score may be `None` so it needs to skip the action to save
275
+ # the best checkpoint
276
+ if self.save_best and key_score:
277
+ self._save_ckpt(runner, key_score)
278
+
279
+ def _should_evaluate(self, runner):
280
+ """Judge whether to perform evaluation.
281
+
282
+ Here is the rule to judge whether to perform evaluation:
283
+ 1. It will not perform evaluation during the epoch/iteration interval,
284
+ which is determined by ``self.interval``.
285
+ 2. It will not perform evaluation if the start time is larger than
286
+ current time.
287
+ 3. It will not perform evaluation when current time is larger than
288
+ the start time but during epoch/iteration interval.
289
+
290
+ Returns:
291
+ bool: The flag indicating whether to perform evaluation.
292
+ """
293
+ if self.by_epoch:
294
+ current = runner.epoch
295
+ check_time = self.every_n_epochs
296
+ else:
297
+ current = runner.iter
298
+ check_time = self.every_n_iters
299
+
300
+ if self.start is None:
301
+ if not check_time(runner, self.interval):
302
+ # No evaluation during the interval.
303
+ return False
304
+ elif (current + 1) < self.start:
305
+ # No evaluation if start is larger than the current time.
306
+ return False
307
+ else:
308
+ # Evaluation only at epochs/iters 3, 5, 7...
309
+ # if start==3 and interval==2
310
+ if (current + 1 - self.start) % self.interval:
311
+ return False
312
+ return True
313
+
314
+ def _save_ckpt(self, runner, key_score):
315
+ """Save the best checkpoint.
316
+
317
+ It will compare the score according to the compare function, write
318
+ related information (best score, best checkpoint path) and save the
319
+ best checkpoint into ``work_dir``.
320
+ """
321
+ if self.by_epoch:
322
+ current = f'epoch_{runner.epoch + 1}'
323
+ cur_type, cur_time = 'epoch', runner.epoch + 1
324
+ else:
325
+ current = f'iter_{runner.iter + 1}'
326
+ cur_type, cur_time = 'iter', runner.iter + 1
327
+
328
+ best_score = runner.meta['hook_msgs'].get(
329
+ 'best_score', self.init_value_map[self.rule])
330
+ if self.compare_func(key_score, best_score):
331
+ best_score = key_score
332
+ runner.meta['hook_msgs']['best_score'] = best_score
333
+
334
+ if self.best_ckpt_path and self.file_client.isfile(
335
+ self.best_ckpt_path):
336
+ self.file_client.remove(self.best_ckpt_path)
337
+ runner.logger.info(
338
+ (f'The previous best checkpoint {self.best_ckpt_path} was '
339
+ 'removed'))
340
+
341
+ best_ckpt_name = f'best_{self.key_indicator}_{current}.pth'
342
+ self.best_ckpt_path = self.file_client.join_path(
343
+ self.out_dir, best_ckpt_name)
344
+ runner.meta['hook_msgs']['best_ckpt'] = self.best_ckpt_path
345
+
346
+ runner.save_checkpoint(
347
+ self.out_dir, best_ckpt_name, create_symlink=False)
348
+ runner.logger.info(
349
+ f'Now best checkpoint is saved as {best_ckpt_name}.')
350
+ runner.logger.info(
351
+ f'Best {self.key_indicator} is {best_score:0.4f} '
352
+ f'at {cur_time} {cur_type}.')
353
+
354
+ def evaluate(self, runner, results):
355
+ """Evaluate the results.
356
+
357
+ Args:
358
+ runner (:obj:`mmcv.Runner`): The underlined training runner.
359
+ results (list): Output results.
360
+ """
361
+ eval_res = self.dataloader.dataset.evaluate(
362
+ results, logger=runner.logger, **self.eval_kwargs)
363
+
364
+ for name, val in eval_res.items():
365
+ runner.log_buffer.output[name] = val
366
+ runner.log_buffer.ready = True
367
+
368
+ if self.save_best is not None:
369
+ # If the performance of model is pool, the `eval_res` may be an
370
+ # empty dict and it will raise exception when `self.save_best` is
371
+ # not None. More details at
372
+ # https://github.com/open-mmlab/mmdetection/issues/6265.
373
+ if not eval_res:
374
+ warnings.warn(
375
+ 'Since `eval_res` is an empty dict, the behavior to save '
376
+ 'the best checkpoint will be skipped in this evaluation.')
377
+ return None
378
+
379
+ if self.key_indicator == 'auto':
380
+ # infer from eval_results
381
+ self._init_rule(self.rule, list(eval_res.keys())[0])
382
+ return eval_res[self.key_indicator]
383
+
384
+ return None
385
+
386
+
387
+ class DistEvalHook(EvalHook):
388
+ """Distributed evaluation hook.
389
+
390
+ This hook will regularly perform evaluation in a given interval when
391
+ performing in distributed environment.
392
+
393
+ Args:
394
+ dataloader (DataLoader): A PyTorch dataloader, whose dataset has
395
+ implemented ``evaluate`` function.
396
+ start (int | None, optional): Evaluation starting epoch. It enables
397
+ evaluation before the training starts if ``start`` <= the resuming
398
+ epoch. If None, whether to evaluate is merely decided by
399
+ ``interval``. Default: None.
400
+ interval (int): Evaluation interval. Default: 1.
401
+ by_epoch (bool): Determine perform evaluation by epoch or by iteration.
402
+ If set to True, it will perform by epoch. Otherwise, by iteration.
403
+ default: True.
404
+ save_best (str, optional): If a metric is specified, it would measure
405
+ the best checkpoint during evaluation. The information about best
406
+ checkpoint would be saved in ``runner.meta['hook_msgs']`` to keep
407
+ best score value and best checkpoint path, which will be also
408
+ loaded when resume checkpoint. Options are the evaluation metrics
409
+ on the test dataset. e.g., ``bbox_mAP``, ``segm_mAP`` for bbox
410
+ detection and instance segmentation. ``AR@100`` for proposal
411
+ recall. If ``save_best`` is ``auto``, the first key of the returned
412
+ ``OrderedDict`` result will be used. Default: None.
413
+ rule (str | None, optional): Comparison rule for best score. If set to
414
+ None, it will infer a reasonable rule. Keys such as 'acc', 'top'
415
+ .etc will be inferred by 'greater' rule. Keys contain 'loss' will
416
+ be inferred by 'less' rule. Options are 'greater', 'less', None.
417
+ Default: None.
418
+ test_fn (callable, optional): test a model with samples from a
419
+ dataloader in a multi-gpu manner, and return the test results. If
420
+ ``None``, the default test function ``mmcv.engine.multi_gpu_test``
421
+ will be used. (default: ``None``)
422
+ tmpdir (str | None): Temporary directory to save the results of all
423
+ processes. Default: None.
424
+ gpu_collect (bool): Whether to use gpu or cpu to collect results.
425
+ Default: False.
426
+ broadcast_bn_buffer (bool): Whether to broadcast the
427
+ buffer(running_mean and running_var) of rank 0 to other rank
428
+ before evaluation. Default: True.
429
+ out_dir (str, optional): The root directory to save checkpoints. If not
430
+ specified, `runner.work_dir` will be used by default. If specified,
431
+ the `out_dir` will be the concatenation of `out_dir` and the last
432
+ level directory of `runner.work_dir`.
433
+ file_client_args (dict): Arguments to instantiate a FileClient.
434
+ See :class:`mmcv.fileio.FileClient` for details. Default: None.
435
+ **eval_kwargs: Evaluation arguments fed into the evaluate function of
436
+ the dataset.
437
+ """
438
+
439
+ def __init__(self,
440
+ dataloader,
441
+ start=None,
442
+ interval=1,
443
+ by_epoch=True,
444
+ save_best=None,
445
+ rule=None,
446
+ test_fn=None,
447
+ greater_keys=None,
448
+ less_keys=None,
449
+ broadcast_bn_buffer=True,
450
+ tmpdir=None,
451
+ gpu_collect=False,
452
+ out_dir=None,
453
+ file_client_args=None,
454
+ **eval_kwargs):
455
+
456
+ if test_fn is None:
457
+ from annotator.uniformer.mmcv.engine import multi_gpu_test
458
+ test_fn = multi_gpu_test
459
+
460
+ super().__init__(
461
+ dataloader,
462
+ start=start,
463
+ interval=interval,
464
+ by_epoch=by_epoch,
465
+ save_best=save_best,
466
+ rule=rule,
467
+ test_fn=test_fn,
468
+ greater_keys=greater_keys,
469
+ less_keys=less_keys,
470
+ out_dir=out_dir,
471
+ file_client_args=file_client_args,
472
+ **eval_kwargs)
473
+
474
+ self.broadcast_bn_buffer = broadcast_bn_buffer
475
+ self.tmpdir = tmpdir
476
+ self.gpu_collect = gpu_collect
477
+
478
+ def _do_evaluate(self, runner):
479
+ """perform evaluation and save ckpt."""
480
+ # Synchronization of BatchNorm's buffer (running_mean
481
+ # and running_var) is not supported in the DDP of pytorch,
482
+ # which may cause the inconsistent performance of models in
483
+ # different ranks, so we broadcast BatchNorm's buffers
484
+ # of rank 0 to other ranks to avoid this.
485
+ if self.broadcast_bn_buffer:
486
+ model = runner.model
487
+ for name, module in model.named_modules():
488
+ if isinstance(module,
489
+ _BatchNorm) and module.track_running_stats:
490
+ dist.broadcast(module.running_var, 0)
491
+ dist.broadcast(module.running_mean, 0)
492
+
493
+ tmpdir = self.tmpdir
494
+ if tmpdir is None:
495
+ tmpdir = osp.join(runner.work_dir, '.eval_hook')
496
+
497
+ results = self.test_fn(
498
+ runner.model,
499
+ self.dataloader,
500
+ tmpdir=tmpdir,
501
+ gpu_collect=self.gpu_collect)
502
+ if runner.rank == 0:
503
+ print('\n')
504
+ runner.log_buffer.output['eval_iter_num'] = len(self.dataloader)
505
+ key_score = self.evaluate(runner, results)
506
+ # the key_score may be `None` so it needs to skip the action to
507
+ # save the best checkpoint
508
+ if self.save_best and key_score:
509
+ self._save_ckpt(runner, key_score)
CCEdit-main/src/controlnet11/annotator/uniformer/mmcv/runner/hooks/hook.py ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) OpenMMLab. All rights reserved.
2
+ from annotator.uniformer.mmcv.utils import Registry, is_method_overridden
3
+
4
+ HOOKS = Registry('hook')
5
+
6
+
7
+ class Hook:
8
+ stages = ('before_run', 'before_train_epoch', 'before_train_iter',
9
+ 'after_train_iter', 'after_train_epoch', 'before_val_epoch',
10
+ 'before_val_iter', 'after_val_iter', 'after_val_epoch',
11
+ 'after_run')
12
+
13
+ def before_run(self, runner):
14
+ pass
15
+
16
+ def after_run(self, runner):
17
+ pass
18
+
19
+ def before_epoch(self, runner):
20
+ pass
21
+
22
+ def after_epoch(self, runner):
23
+ pass
24
+
25
+ def before_iter(self, runner):
26
+ pass
27
+
28
+ def after_iter(self, runner):
29
+ pass
30
+
31
+ def before_train_epoch(self, runner):
32
+ self.before_epoch(runner)
33
+
34
+ def before_val_epoch(self, runner):
35
+ self.before_epoch(runner)
36
+
37
+ def after_train_epoch(self, runner):
38
+ self.after_epoch(runner)
39
+
40
+ def after_val_epoch(self, runner):
41
+ self.after_epoch(runner)
42
+
43
+ def before_train_iter(self, runner):
44
+ self.before_iter(runner)
45
+
46
+ def before_val_iter(self, runner):
47
+ self.before_iter(runner)
48
+
49
+ def after_train_iter(self, runner):
50
+ self.after_iter(runner)
51
+
52
+ def after_val_iter(self, runner):
53
+ self.after_iter(runner)
54
+
55
+ def every_n_epochs(self, runner, n):
56
+ return (runner.epoch + 1) % n == 0 if n > 0 else False
57
+
58
+ def every_n_inner_iters(self, runner, n):
59
+ return (runner.inner_iter + 1) % n == 0 if n > 0 else False
60
+
61
+ def every_n_iters(self, runner, n):
62
+ return (runner.iter + 1) % n == 0 if n > 0 else False
63
+
64
+ def end_of_epoch(self, runner):
65
+ return runner.inner_iter + 1 == len(runner.data_loader)
66
+
67
+ def is_last_epoch(self, runner):
68
+ return runner.epoch + 1 == runner._max_epochs
69
+
70
+ def is_last_iter(self, runner):
71
+ return runner.iter + 1 == runner._max_iters
72
+
73
+ def get_triggered_stages(self):
74
+ trigger_stages = set()
75
+ for stage in Hook.stages:
76
+ if is_method_overridden(stage, Hook, self):
77
+ trigger_stages.add(stage)
78
+
79
+ # some methods will be triggered in multi stages
80
+ # use this dict to map method to stages.
81
+ method_stages_map = {
82
+ 'before_epoch': ['before_train_epoch', 'before_val_epoch'],
83
+ 'after_epoch': ['after_train_epoch', 'after_val_epoch'],
84
+ 'before_iter': ['before_train_iter', 'before_val_iter'],
85
+ 'after_iter': ['after_train_iter', 'after_val_iter'],
86
+ }
87
+
88
+ for method, map_stages in method_stages_map.items():
89
+ if is_method_overridden(method, Hook, self):
90
+ trigger_stages.update(map_stages)
91
+
92
+ return [stage for stage in Hook.stages if stage in trigger_stages]
CCEdit-main/src/controlnet11/annotator/uniformer/mmcv/runner/hooks/iter_timer.py ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) OpenMMLab. All rights reserved.
2
+ import time
3
+
4
+ from .hook import HOOKS, Hook
5
+
6
+
7
+ @HOOKS.register_module()
8
+ class IterTimerHook(Hook):
9
+
10
+ def before_epoch(self, runner):
11
+ self.t = time.time()
12
+
13
+ def before_iter(self, runner):
14
+ runner.log_buffer.update({'data_time': time.time() - self.t})
15
+
16
+ def after_iter(self, runner):
17
+ runner.log_buffer.update({'time': time.time() - self.t})
18
+ self.t = time.time()
CCEdit-main/src/controlnet11/annotator/uniformer/mmcv/runner/hooks/logger/__init__.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) OpenMMLab. All rights reserved.
2
+ from .base import LoggerHook
3
+ from .dvclive import DvcliveLoggerHook
4
+ from .mlflow import MlflowLoggerHook
5
+ from .neptune import NeptuneLoggerHook
6
+ from .pavi import PaviLoggerHook
7
+ from .tensorboard import TensorboardLoggerHook
8
+ from .text import TextLoggerHook
9
+ from .wandb import WandbLoggerHook
10
+
11
+ __all__ = [
12
+ 'LoggerHook', 'MlflowLoggerHook', 'PaviLoggerHook',
13
+ 'TensorboardLoggerHook', 'TextLoggerHook', 'WandbLoggerHook',
14
+ 'NeptuneLoggerHook', 'DvcliveLoggerHook'
15
+ ]
CCEdit-main/src/controlnet11/annotator/uniformer/mmcv/runner/hooks/logger/base.py ADDED
@@ -0,0 +1,166 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) OpenMMLab. All rights reserved.
2
+ import numbers
3
+ from abc import ABCMeta, abstractmethod
4
+
5
+ import numpy as np
6
+ import torch
7
+
8
+ from ..hook import Hook
9
+
10
+
11
+ class LoggerHook(Hook):
12
+ """Base class for logger hooks.
13
+
14
+ Args:
15
+ interval (int): Logging interval (every k iterations).
16
+ ignore_last (bool): Ignore the log of last iterations in each epoch
17
+ if less than `interval`.
18
+ reset_flag (bool): Whether to clear the output buffer after logging.
19
+ by_epoch (bool): Whether EpochBasedRunner is used.
20
+ """
21
+
22
+ __metaclass__ = ABCMeta
23
+
24
+ def __init__(self,
25
+ interval=10,
26
+ ignore_last=True,
27
+ reset_flag=False,
28
+ by_epoch=True):
29
+ self.interval = interval
30
+ self.ignore_last = ignore_last
31
+ self.reset_flag = reset_flag
32
+ self.by_epoch = by_epoch
33
+
34
+ @abstractmethod
35
+ def log(self, runner):
36
+ pass
37
+
38
+ @staticmethod
39
+ def is_scalar(val, include_np=True, include_torch=True):
40
+ """Tell the input variable is a scalar or not.
41
+
42
+ Args:
43
+ val: Input variable.
44
+ include_np (bool): Whether include 0-d np.ndarray as a scalar.
45
+ include_torch (bool): Whether include 0-d torch.Tensor as a scalar.
46
+
47
+ Returns:
48
+ bool: True or False.
49
+ """
50
+ if isinstance(val, numbers.Number):
51
+ return True
52
+ elif include_np and isinstance(val, np.ndarray) and val.ndim == 0:
53
+ return True
54
+ elif include_torch and isinstance(val, torch.Tensor) and len(val) == 1:
55
+ return True
56
+ else:
57
+ return False
58
+
59
+ def get_mode(self, runner):
60
+ if runner.mode == 'train':
61
+ if 'time' in runner.log_buffer.output:
62
+ mode = 'train'
63
+ else:
64
+ mode = 'val'
65
+ elif runner.mode == 'val':
66
+ mode = 'val'
67
+ else:
68
+ raise ValueError(f"runner mode should be 'train' or 'val', "
69
+ f'but got {runner.mode}')
70
+ return mode
71
+
72
+ def get_epoch(self, runner):
73
+ if runner.mode == 'train':
74
+ epoch = runner.epoch + 1
75
+ elif runner.mode == 'val':
76
+ # normal val mode
77
+ # runner.epoch += 1 has been done before val workflow
78
+ epoch = runner.epoch
79
+ else:
80
+ raise ValueError(f"runner mode should be 'train' or 'val', "
81
+ f'but got {runner.mode}')
82
+ return epoch
83
+
84
+ def get_iter(self, runner, inner_iter=False):
85
+ """Get the current training iteration step."""
86
+ if self.by_epoch and inner_iter:
87
+ current_iter = runner.inner_iter + 1
88
+ else:
89
+ current_iter = runner.iter + 1
90
+ return current_iter
91
+
92
+ def get_lr_tags(self, runner):
93
+ tags = {}
94
+ lrs = runner.current_lr()
95
+ if isinstance(lrs, dict):
96
+ for name, value in lrs.items():
97
+ tags[f'learning_rate/{name}'] = value[0]
98
+ else:
99
+ tags['learning_rate'] = lrs[0]
100
+ return tags
101
+
102
+ def get_momentum_tags(self, runner):
103
+ tags = {}
104
+ momentums = runner.current_momentum()
105
+ if isinstance(momentums, dict):
106
+ for name, value in momentums.items():
107
+ tags[f'momentum/{name}'] = value[0]
108
+ else:
109
+ tags['momentum'] = momentums[0]
110
+ return tags
111
+
112
+ def get_loggable_tags(self,
113
+ runner,
114
+ allow_scalar=True,
115
+ allow_text=False,
116
+ add_mode=True,
117
+ tags_to_skip=('time', 'data_time')):
118
+ tags = {}
119
+ for var, val in runner.log_buffer.output.items():
120
+ if var in tags_to_skip:
121
+ continue
122
+ if self.is_scalar(val) and not allow_scalar:
123
+ continue
124
+ if isinstance(val, str) and not allow_text:
125
+ continue
126
+ if add_mode:
127
+ var = f'{self.get_mode(runner)}/{var}'
128
+ tags[var] = val
129
+ tags.update(self.get_lr_tags(runner))
130
+ tags.update(self.get_momentum_tags(runner))
131
+ return tags
132
+
133
+ def before_run(self, runner):
134
+ for hook in runner.hooks[::-1]:
135
+ if isinstance(hook, LoggerHook):
136
+ hook.reset_flag = True
137
+ break
138
+
139
+ def before_epoch(self, runner):
140
+ runner.log_buffer.clear() # clear logs of last epoch
141
+
142
+ def after_train_iter(self, runner):
143
+ if self.by_epoch and self.every_n_inner_iters(runner, self.interval):
144
+ runner.log_buffer.average(self.interval)
145
+ elif not self.by_epoch and self.every_n_iters(runner, self.interval):
146
+ runner.log_buffer.average(self.interval)
147
+ elif self.end_of_epoch(runner) and not self.ignore_last:
148
+ # not precise but more stable
149
+ runner.log_buffer.average(self.interval)
150
+
151
+ if runner.log_buffer.ready:
152
+ self.log(runner)
153
+ if self.reset_flag:
154
+ runner.log_buffer.clear_output()
155
+
156
+ def after_train_epoch(self, runner):
157
+ if runner.log_buffer.ready:
158
+ self.log(runner)
159
+ if self.reset_flag:
160
+ runner.log_buffer.clear_output()
161
+
162
+ def after_val_epoch(self, runner):
163
+ runner.log_buffer.average()
164
+ self.log(runner)
165
+ if self.reset_flag:
166
+ runner.log_buffer.clear_output()
CCEdit-main/src/controlnet11/annotator/uniformer/mmcv/runner/hooks/logger/dvclive.py ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) OpenMMLab. All rights reserved.
2
+ from ...dist_utils import master_only
3
+ from ..hook import HOOKS
4
+ from .base import LoggerHook
5
+
6
+
7
+ @HOOKS.register_module()
8
+ class DvcliveLoggerHook(LoggerHook):
9
+ """Class to log metrics with dvclive.
10
+
11
+ It requires `dvclive`_ to be installed.
12
+
13
+ Args:
14
+ path (str): Directory where dvclive will write TSV log files.
15
+ interval (int): Logging interval (every k iterations).
16
+ Default 10.
17
+ ignore_last (bool): Ignore the log of last iterations in each epoch
18
+ if less than `interval`.
19
+ Default: True.
20
+ reset_flag (bool): Whether to clear the output buffer after logging.
21
+ Default: True.
22
+ by_epoch (bool): Whether EpochBasedRunner is used.
23
+ Default: True.
24
+
25
+ .. _dvclive:
26
+ https://dvc.org/doc/dvclive
27
+ """
28
+
29
+ def __init__(self,
30
+ path,
31
+ interval=10,
32
+ ignore_last=True,
33
+ reset_flag=True,
34
+ by_epoch=True):
35
+
36
+ super(DvcliveLoggerHook, self).__init__(interval, ignore_last,
37
+ reset_flag, by_epoch)
38
+ self.path = path
39
+ self.import_dvclive()
40
+
41
+ def import_dvclive(self):
42
+ try:
43
+ import dvclive
44
+ except ImportError:
45
+ raise ImportError(
46
+ 'Please run "pip install dvclive" to install dvclive')
47
+ self.dvclive = dvclive
48
+
49
+ @master_only
50
+ def before_run(self, runner):
51
+ self.dvclive.init(self.path)
52
+
53
+ @master_only
54
+ def log(self, runner):
55
+ tags = self.get_loggable_tags(runner)
56
+ if tags:
57
+ for k, v in tags.items():
58
+ self.dvclive.log(k, v, step=self.get_iter(runner))
CCEdit-main/src/controlnet11/annotator/uniformer/mmcv/runner/hooks/logger/mlflow.py ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) OpenMMLab. All rights reserved.
2
+ from ...dist_utils import master_only
3
+ from ..hook import HOOKS
4
+ from .base import LoggerHook
5
+
6
+
7
+ @HOOKS.register_module()
8
+ class MlflowLoggerHook(LoggerHook):
9
+
10
+ def __init__(self,
11
+ exp_name=None,
12
+ tags=None,
13
+ log_model=True,
14
+ interval=10,
15
+ ignore_last=True,
16
+ reset_flag=False,
17
+ by_epoch=True):
18
+ """Class to log metrics and (optionally) a trained model to MLflow.
19
+
20
+ It requires `MLflow`_ to be installed.
21
+
22
+ Args:
23
+ exp_name (str, optional): Name of the experiment to be used.
24
+ Default None.
25
+ If not None, set the active experiment.
26
+ If experiment does not exist, an experiment with provided name
27
+ will be created.
28
+ tags (dict of str: str, optional): Tags for the current run.
29
+ Default None.
30
+ If not None, set tags for the current run.
31
+ log_model (bool, optional): Whether to log an MLflow artifact.
32
+ Default True.
33
+ If True, log runner.model as an MLflow artifact
34
+ for the current run.
35
+ interval (int): Logging interval (every k iterations).
36
+ ignore_last (bool): Ignore the log of last iterations in each epoch
37
+ if less than `interval`.
38
+ reset_flag (bool): Whether to clear the output buffer after logging
39
+ by_epoch (bool): Whether EpochBasedRunner is used.
40
+
41
+ .. _MLflow:
42
+ https://www.mlflow.org/docs/latest/index.html
43
+ """
44
+ super(MlflowLoggerHook, self).__init__(interval, ignore_last,
45
+ reset_flag, by_epoch)
46
+ self.import_mlflow()
47
+ self.exp_name = exp_name
48
+ self.tags = tags
49
+ self.log_model = log_model
50
+
51
+ def import_mlflow(self):
52
+ try:
53
+ import mlflow
54
+ import mlflow.pytorch as mlflow_pytorch
55
+ except ImportError:
56
+ raise ImportError(
57
+ 'Please run "pip install mlflow" to install mlflow')
58
+ self.mlflow = mlflow
59
+ self.mlflow_pytorch = mlflow_pytorch
60
+
61
+ @master_only
62
+ def before_run(self, runner):
63
+ super(MlflowLoggerHook, self).before_run(runner)
64
+ if self.exp_name is not None:
65
+ self.mlflow.set_experiment(self.exp_name)
66
+ if self.tags is not None:
67
+ self.mlflow.set_tags(self.tags)
68
+
69
+ @master_only
70
+ def log(self, runner):
71
+ tags = self.get_loggable_tags(runner)
72
+ if tags:
73
+ self.mlflow.log_metrics(tags, step=self.get_iter(runner))
74
+
75
+ @master_only
76
+ def after_run(self, runner):
77
+ if self.log_model:
78
+ self.mlflow_pytorch.log_model(runner.model, 'models')
CCEdit-main/src/controlnet11/annotator/uniformer/mmcv/runner/hooks/logger/neptune.py ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) OpenMMLab. All rights reserved.
2
+ from ...dist_utils import master_only
3
+ from ..hook import HOOKS
4
+ from .base import LoggerHook
5
+
6
+
7
+ @HOOKS.register_module()
8
+ class NeptuneLoggerHook(LoggerHook):
9
+ """Class to log metrics to NeptuneAI.
10
+
11
+ It requires `neptune-client` to be installed.
12
+
13
+ Args:
14
+ init_kwargs (dict): a dict contains the initialization keys as below:
15
+ - project (str): Name of a project in a form of
16
+ namespace/project_name. If None, the value of
17
+ NEPTUNE_PROJECT environment variable will be taken.
18
+ - api_token (str): User’s API token.
19
+ If None, the value of NEPTUNE_API_TOKEN environment
20
+ variable will be taken. Note: It is strongly recommended
21
+ to use NEPTUNE_API_TOKEN environment variable rather than
22
+ placing your API token in plain text in your source code.
23
+ - name (str, optional, default is 'Untitled'): Editable name of
24
+ the run. Name is displayed in the run's Details and in
25
+ Runs table as a column.
26
+ Check https://docs.neptune.ai/api-reference/neptune#init for
27
+ more init arguments.
28
+ interval (int): Logging interval (every k iterations).
29
+ ignore_last (bool): Ignore the log of last iterations in each epoch
30
+ if less than `interval`.
31
+ reset_flag (bool): Whether to clear the output buffer after logging
32
+ by_epoch (bool): Whether EpochBasedRunner is used.
33
+
34
+ .. _NeptuneAI:
35
+ https://docs.neptune.ai/you-should-know/logging-metadata
36
+ """
37
+
38
+ def __init__(self,
39
+ init_kwargs=None,
40
+ interval=10,
41
+ ignore_last=True,
42
+ reset_flag=True,
43
+ with_step=True,
44
+ by_epoch=True):
45
+
46
+ super(NeptuneLoggerHook, self).__init__(interval, ignore_last,
47
+ reset_flag, by_epoch)
48
+ self.import_neptune()
49
+ self.init_kwargs = init_kwargs
50
+ self.with_step = with_step
51
+
52
+ def import_neptune(self):
53
+ try:
54
+ import neptune.new as neptune
55
+ except ImportError:
56
+ raise ImportError(
57
+ 'Please run "pip install neptune-client" to install neptune')
58
+ self.neptune = neptune
59
+ self.run = None
60
+
61
+ @master_only
62
+ def before_run(self, runner):
63
+ if self.init_kwargs:
64
+ self.run = self.neptune.init(**self.init_kwargs)
65
+ else:
66
+ self.run = self.neptune.init()
67
+
68
+ @master_only
69
+ def log(self, runner):
70
+ tags = self.get_loggable_tags(runner)
71
+ if tags:
72
+ for tag_name, tag_value in tags.items():
73
+ if self.with_step:
74
+ self.run[tag_name].log(
75
+ tag_value, step=self.get_iter(runner))
76
+ else:
77
+ tags['global_step'] = self.get_iter(runner)
78
+ self.run[tag_name].log(tags)
79
+
80
+ @master_only
81
+ def after_run(self, runner):
82
+ self.run.stop()
CCEdit-main/src/controlnet11/annotator/uniformer/mmcv/runner/hooks/logger/pavi.py ADDED
@@ -0,0 +1,117 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) OpenMMLab. All rights reserved.
2
+ import json
3
+ import os
4
+ import os.path as osp
5
+
6
+ import torch
7
+ import yaml
8
+
9
+ import annotator.uniformer.mmcv as mmcv
10
+ from ....parallel.utils import is_module_wrapper
11
+ from ...dist_utils import master_only
12
+ from ..hook import HOOKS
13
+ from .base import LoggerHook
14
+
15
+
16
+ @HOOKS.register_module()
17
+ class PaviLoggerHook(LoggerHook):
18
+
19
+ def __init__(self,
20
+ init_kwargs=None,
21
+ add_graph=False,
22
+ add_last_ckpt=False,
23
+ interval=10,
24
+ ignore_last=True,
25
+ reset_flag=False,
26
+ by_epoch=True,
27
+ img_key='img_info'):
28
+ super(PaviLoggerHook, self).__init__(interval, ignore_last, reset_flag,
29
+ by_epoch)
30
+ self.init_kwargs = init_kwargs
31
+ self.add_graph = add_graph
32
+ self.add_last_ckpt = add_last_ckpt
33
+ self.img_key = img_key
34
+
35
+ @master_only
36
+ def before_run(self, runner):
37
+ super(PaviLoggerHook, self).before_run(runner)
38
+ try:
39
+ from pavi import SummaryWriter
40
+ except ImportError:
41
+ raise ImportError('Please run "pip install pavi" to install pavi.')
42
+
43
+ self.run_name = runner.work_dir.split('/')[-1]
44
+
45
+ if not self.init_kwargs:
46
+ self.init_kwargs = dict()
47
+ self.init_kwargs['name'] = self.run_name
48
+ self.init_kwargs['model'] = runner._model_name
49
+ if runner.meta is not None:
50
+ if 'config_dict' in runner.meta:
51
+ config_dict = runner.meta['config_dict']
52
+ assert isinstance(
53
+ config_dict,
54
+ dict), ('meta["config_dict"] has to be of a dict, '
55
+ f'but got {type(config_dict)}')
56
+ elif 'config_file' in runner.meta:
57
+ config_file = runner.meta['config_file']
58
+ config_dict = dict(mmcv.Config.fromfile(config_file))
59
+ else:
60
+ config_dict = None
61
+ if config_dict is not None:
62
+ # 'max_.*iter' is parsed in pavi sdk as the maximum iterations
63
+ # to properly set up the progress bar.
64
+ config_dict = config_dict.copy()
65
+ config_dict.setdefault('max_iter', runner.max_iters)
66
+ # non-serializable values are first converted in
67
+ # mmcv.dump to json
68
+ config_dict = json.loads(
69
+ mmcv.dump(config_dict, file_format='json'))
70
+ session_text = yaml.dump(config_dict)
71
+ self.init_kwargs['session_text'] = session_text
72
+ self.writer = SummaryWriter(**self.init_kwargs)
73
+
74
+ def get_step(self, runner):
75
+ """Get the total training step/epoch."""
76
+ if self.get_mode(runner) == 'val' and self.by_epoch:
77
+ return self.get_epoch(runner)
78
+ else:
79
+ return self.get_iter(runner)
80
+
81
+ @master_only
82
+ def log(self, runner):
83
+ tags = self.get_loggable_tags(runner, add_mode=False)
84
+ if tags:
85
+ self.writer.add_scalars(
86
+ self.get_mode(runner), tags, self.get_step(runner))
87
+
88
+ @master_only
89
+ def after_run(self, runner):
90
+ if self.add_last_ckpt:
91
+ ckpt_path = osp.join(runner.work_dir, 'latest.pth')
92
+ if osp.islink(ckpt_path):
93
+ ckpt_path = osp.join(runner.work_dir, os.readlink(ckpt_path))
94
+
95
+ if osp.isfile(ckpt_path):
96
+ # runner.epoch += 1 has been done before `after_run`.
97
+ iteration = runner.epoch if self.by_epoch else runner.iter
98
+ return self.writer.add_snapshot_file(
99
+ tag=self.run_name,
100
+ snapshot_file_path=ckpt_path,
101
+ iteration=iteration)
102
+
103
+ # flush the buffer and send a task ending signal to Pavi
104
+ self.writer.close()
105
+
106
+ @master_only
107
+ def before_epoch(self, runner):
108
+ if runner.epoch == 0 and self.add_graph:
109
+ if is_module_wrapper(runner.model):
110
+ _model = runner.model.module
111
+ else:
112
+ _model = runner.model
113
+ device = next(_model.parameters()).device
114
+ data = next(iter(runner.data_loader))
115
+ image = data[self.img_key][0:1].to(device)
116
+ with torch.no_grad():
117
+ self.writer.add_graph(_model, image)