code
stringlengths
17
6.64M
@ACTIVATION_LAYERS.register_module() class Swish(nn.Module): 'Swish Module.\n\n This module applies the swish function:\n\n .. math::\n Swish(x) = x * Sigmoid(x)\n\n Returns:\n Tensor: The output tensor.\n ' def __init__(self): super(Swish, self).__init__() def forward(self, x): return (x * torch.sigmoid(x))
def build_positional_encoding(cfg, default_args=None): 'Builder for Position Encoding.' return build_from_cfg(cfg, POSITIONAL_ENCODING, default_args)
def build_attention(cfg, default_args=None): 'Builder for attention.' return build_from_cfg(cfg, ATTENTION, default_args)
def build_feedforward_network(cfg, default_args=None): 'Builder for feed-forward network (FFN).' return build_from_cfg(cfg, FEEDFORWARD_NETWORK, default_args)
def build_transformer_layer(cfg, default_args=None): 'Builder for transformer layer.' return build_from_cfg(cfg, TRANSFORMER_LAYER, default_args)
def build_transformer_layer_sequence(cfg, default_args=None): 'Builder for transformer encoder and transformer decoder.' return build_from_cfg(cfg, TRANSFORMER_LAYER_SEQUENCE, default_args)
class AdaptivePadding(nn.Module): 'Applies padding adaptively to the input.\n\n This module can make input get fully covered by filter\n you specified. It support two modes "same" and "corner". The\n "same" mode is same with "SAME" padding mode in TensorFlow, pad\n zero around input. The "corner" mode would pad zero\n to bottom right.\n\n Args:\n kernel_size (int | tuple): Size of the kernel. Default: 1.\n stride (int | tuple): Stride of the filter. Default: 1.\n dilation (int | tuple): Spacing between kernel elements.\n Default: 1.\n padding (str): Support "same" and "corner", "corner" mode\n would pad zero to bottom right, and "same" mode would\n pad zero around input. Default: "corner".\n\n Example:\n >>> kernel_size = 16\n >>> stride = 16\n >>> dilation = 1\n >>> input = torch.rand(1, 1, 15, 17)\n >>> adap_pad = AdaptivePadding(\n >>> kernel_size=kernel_size,\n >>> stride=stride,\n >>> dilation=dilation,\n >>> padding="corner")\n >>> out = adap_pad(input)\n >>> assert (out.shape[2], out.shape[3]) == (16, 32)\n >>> input = torch.rand(1, 1, 16, 17)\n >>> out = adap_pad(input)\n >>> assert (out.shape[2], out.shape[3]) == (16, 32)\n ' def __init__(self, kernel_size=1, stride=1, dilation=1, padding='corner'): super(AdaptivePadding, self).__init__() assert (padding in ('same', 'corner')) kernel_size = to_2tuple(kernel_size) stride = to_2tuple(stride) dilation = to_2tuple(dilation) self.padding = padding self.kernel_size = kernel_size self.stride = stride self.dilation = dilation def get_pad_shape(self, input_shape): 'Calculate the padding size of input.\n\n Args:\n input_shape (:obj:`torch.Size`): arrange as (H, W).\n\n Returns:\n Tuple[int]: The padding size along the\n original H and W directions\n ' (input_h, input_w) = input_shape (kernel_h, kernel_w) = self.kernel_size (stride_h, stride_w) = self.stride output_h = math.ceil((input_h / stride_h)) output_w = math.ceil((input_w / stride_w)) pad_h = max((((((output_h - 1) * stride_h) + ((kernel_h - 1) * self.dilation[0])) + 1) - input_h), 0) pad_w = max((((((output_w - 1) * stride_w) + ((kernel_w - 1) * self.dilation[1])) + 1) - input_w), 0) return (pad_h, pad_w) def forward(self, x): 'Add padding to `x`\n\n Args:\n x (Tensor): Input tensor has shape (B, C, H, W).\n\n Returns:\n Tensor: The tensor with adaptive padding\n ' (pad_h, pad_w) = self.get_pad_shape(x.size()[(- 2):]) if ((pad_h > 0) or (pad_w > 0)): if (self.padding == 'corner'): x = F.pad(x, [0, pad_w, 0, pad_h]) elif (self.padding == 'same'): x = F.pad(x, [(pad_w // 2), (pad_w - (pad_w // 2)), (pad_h // 2), (pad_h - (pad_h // 2))]) return x
class PatchEmbed(BaseModule): 'Image to Patch Embedding.\n\n We use a conv layer to implement PatchEmbed.\n\n Args:\n in_channels (int): The num of input channels. Default: 3\n embed_dims (int): The dimensions of embedding. Default: 768\n conv_type (str): The type of convolution\n to generate patch embedding. Default: "Conv2d".\n kernel_size (int): The kernel_size of embedding conv. Default: 16.\n stride (int): The slide stride of embedding conv.\n Default: 16.\n padding (int | tuple | string): The padding length of\n embedding conv. When it is a string, it means the mode\n of adaptive padding, support "same" and "corner" now.\n Default: "corner".\n dilation (int): The dilation rate of embedding conv. Default: 1.\n bias (bool): Bias of embed conv. Default: True.\n norm_cfg (dict, optional): Config dict for normalization layer.\n Default: None.\n input_size (int | tuple | None): The size of input, which will be\n used to calculate the out size. Only works when `dynamic_size`\n is False. Default: None.\n init_cfg (`mmcv.ConfigDict`, optional): The Config for initialization.\n Default: None.\n ' def __init__(self, in_channels=3, embed_dims=768, conv_type='Conv2d', kernel_size=16, stride=16, padding='corner', dilation=1, bias=True, norm_cfg=None, input_size=None, init_cfg=None): super(PatchEmbed, self).__init__(init_cfg=init_cfg) self.embed_dims = embed_dims if (stride is None): stride = kernel_size kernel_size = to_2tuple(kernel_size) stride = to_2tuple(stride) dilation = to_2tuple(dilation) if isinstance(padding, str): self.adaptive_padding = AdaptivePadding(kernel_size=kernel_size, stride=stride, dilation=dilation, padding=padding) padding = 0 else: self.adaptive_padding = None padding = to_2tuple(padding) self.projection = build_conv_layer(dict(type=conv_type), in_channels=in_channels, out_channels=embed_dims, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, bias=bias) if (norm_cfg is not None): self.norm = build_norm_layer(norm_cfg, embed_dims)[1] else: self.norm = None if input_size: input_size = to_2tuple(input_size) self.init_input_size = input_size if self.adaptive_padding: (pad_h, pad_w) = self.adaptive_padding.get_pad_shape(input_size) (input_h, input_w) = input_size input_h = (input_h + pad_h) input_w = (input_w + pad_w) input_size = (input_h, input_w) h_out = (((((input_size[0] + (2 * padding[0])) - (dilation[0] * (kernel_size[0] - 1))) - 1) // stride[0]) + 1) w_out = (((((input_size[1] + (2 * padding[1])) - (dilation[1] * (kernel_size[1] - 1))) - 1) // stride[1]) + 1) self.init_out_size = (h_out, w_out) else: self.init_input_size = None self.init_out_size = None def forward(self, x): '\n Args:\n x (Tensor): Has shape (B, C, H, W). In most case, C is 3.\n\n Returns:\n tuple: Contains merged results and its spatial shape.\n\n - x (Tensor): Has shape (B, out_h * out_w, embed_dims)\n - out_size (tuple[int]): Spatial shape of x, arrange as\n (out_h, out_w).\n ' if self.adaptive_padding: x = self.adaptive_padding(x) x = self.projection(x) out_size = (x.shape[2], x.shape[3]) x = x.flatten(2).transpose(1, 2) if (self.norm is not None): x = self.norm(x) return (x, out_size)
class PatchMerging(BaseModule): 'Merge patch feature map.\n\n This layer groups feature map by kernel_size, and applies norm and linear\n layers to the grouped feature map ((used in Swin Transformer)).\n Our implementation uses `nn.Unfold` to\n merge patches, which is about 25% faster than the original\n implementation. However, we need to modify pretrained\n models for compatibility.\n\n Args:\n in_channels (int): The num of input channels.\n to gets fully covered by filter and stride you specified.\n out_channels (int): The num of output channels.\n kernel_size (int | tuple, optional): the kernel size in the unfold\n layer. Defaults to 2.\n stride (int | tuple, optional): the stride of the sliding blocks in the\n unfold layer. Default: None. (Would be set as `kernel_size`)\n padding (int | tuple | string ): The padding length of\n embedding conv. When it is a string, it means the mode\n of adaptive padding, support "same" and "corner" now.\n Default: "corner".\n dilation (int | tuple, optional): dilation parameter in the unfold\n layer. Default: 1.\n bias (bool, optional): Whether to add bias in linear layer or not.\n Defaults: False.\n norm_cfg (dict, optional): Config dict for normalization layer.\n Default: dict(type=\'LN\').\n init_cfg (dict, optional): The extra config for initialization.\n Default: None.\n ' def __init__(self, in_channels, out_channels, kernel_size=2, stride=None, padding='corner', dilation=1, bias=False, norm_cfg=dict(type='LN'), init_cfg=None): super().__init__(init_cfg=init_cfg) self.in_channels = in_channels self.out_channels = out_channels if stride: stride = stride else: stride = kernel_size kernel_size = to_2tuple(kernel_size) stride = to_2tuple(stride) dilation = to_2tuple(dilation) if isinstance(padding, str): self.adaptive_padding = AdaptivePadding(kernel_size=kernel_size, stride=stride, dilation=dilation, padding=padding) padding = 0 else: self.adaptive_padding = None padding = to_2tuple(padding) self.sampler = nn.Unfold(kernel_size=kernel_size, dilation=dilation, padding=padding, stride=stride) sample_dim = ((kernel_size[0] * kernel_size[1]) * in_channels) if (norm_cfg is not None): self.norm = build_norm_layer(norm_cfg, sample_dim)[1] else: self.norm = None self.reduction = nn.Linear(sample_dim, out_channels, bias=bias) def forward(self, x, input_size): '\n Args:\n x (Tensor): Has shape (B, H*W, C_in).\n input_size (tuple[int]): The spatial shape of x, arrange as (H, W).\n Default: None.\n\n Returns:\n tuple: Contains merged results and its spatial shape.\n\n - x (Tensor): Has shape (B, Merged_H * Merged_W, C_out)\n - out_size (tuple[int]): Spatial shape of x, arrange as\n (Merged_H, Merged_W).\n ' (B, L, C) = x.shape assert isinstance(input_size, Sequence), f'Expect input_size is `Sequence` but get {input_size}' (H, W) = input_size assert (L == (H * W)), 'input feature has wrong size' x = x.view(B, H, W, C).permute([0, 3, 1, 2]) if self.adaptive_padding: x = self.adaptive_padding(x) (H, W) = x.shape[(- 2):] x = self.sampler(x) out_h = (((((H + (2 * self.sampler.padding[0])) - (self.sampler.dilation[0] * (self.sampler.kernel_size[0] - 1))) - 1) // self.sampler.stride[0]) + 1) out_w = (((((W + (2 * self.sampler.padding[1])) - (self.sampler.dilation[1] * (self.sampler.kernel_size[1] - 1))) - 1) // self.sampler.stride[1]) + 1) output_size = (out_h, out_w) x = x.transpose(1, 2) x = (self.norm(x) if self.norm else x) x = self.reduction(x) return (x, output_size)
@ATTENTION.register_module() class MultiheadAttention(BaseModule): 'A wrapper for ``torch.nn.MultiheadAttention``.\n\n This module implements MultiheadAttention with identity connection,\n and positional encoding is also passed as input.\n\n Args:\n embed_dims (int): The embedding dimension.\n num_heads (int): Parallel attention heads.\n attn_drop (float): A Dropout layer on attn_output_weights.\n Default: 0.0.\n proj_drop (float): A Dropout layer after `nn.MultiheadAttention`.\n Default: 0.0.\n dropout_layer (obj:`ConfigDict`): The dropout_layer used\n when adding the shortcut.\n init_cfg (obj:`mmcv.ConfigDict`): The Config for initialization.\n Default: None.\n batch_first (bool): When it is True, Key, Query and Value are shape of\n (batch, n, embed_dim), otherwise (n, batch, embed_dim).\n Default to False.\n ' def __init__(self, embed_dims, num_heads, attn_drop=0.0, proj_drop=0.0, dropout_layer=dict(type='Dropout', drop_prob=0.0), init_cfg=None, batch_first=False, **kwargs): super(MultiheadAttention, self).__init__(init_cfg) if ('dropout' in kwargs): warnings.warn('The arguments `dropout` in MultiheadAttention has been deprecated, now you can separately set `attn_drop`(float), proj_drop(float), and `dropout_layer`(dict) ', DeprecationWarning) attn_drop = kwargs['dropout'] dropout_layer['drop_prob'] = kwargs.pop('dropout') self.embed_dims = embed_dims self.num_heads = num_heads self.batch_first = batch_first self.attn = nn.MultiheadAttention(embed_dims, num_heads, attn_drop, **kwargs) self.proj_drop = nn.Dropout(proj_drop) self.dropout_layer = (build_dropout(dropout_layer) if dropout_layer else nn.Identity()) @deprecated_api_warning({'residual': 'identity'}, cls_name='MultiheadAttention') def forward(self, query, key=None, value=None, identity=None, query_pos=None, key_pos=None, attn_mask=None, key_padding_mask=None, **kwargs): 'Forward function for `MultiheadAttention`.\n\n **kwargs allow passing a more general data flow when combining\n with other operations in `transformerlayer`.\n\n Args:\n query (Tensor): The input query with shape [num_queries, bs,\n embed_dims] if self.batch_first is False, else\n [bs, num_queries embed_dims].\n key (Tensor): The key tensor with shape [num_keys, bs,\n embed_dims] if self.batch_first is False, else\n [bs, num_keys, embed_dims] .\n If None, the ``query`` will be used. Defaults to None.\n value (Tensor): The value tensor with same shape as `key`.\n Same in `nn.MultiheadAttention.forward`. Defaults to None.\n If None, the `key` will be used.\n identity (Tensor): This tensor, with the same shape as x,\n will be used for the identity link.\n If None, `x` will be used. Defaults to None.\n query_pos (Tensor): The positional encoding for query, with\n the same shape as `x`. If not None, it will\n be added to `x` before forward function. Defaults to None.\n key_pos (Tensor): The positional encoding for `key`, with the\n same shape as `key`. Defaults to None. If not None, it will\n be added to `key` before forward function. If None, and\n `query_pos` has the same shape as `key`, then `query_pos`\n will be used for `key_pos`. Defaults to None.\n attn_mask (Tensor): ByteTensor mask with shape [num_queries,\n num_keys]. Same in `nn.MultiheadAttention.forward`.\n Defaults to None.\n key_padding_mask (Tensor): ByteTensor with shape [bs, num_keys].\n Defaults to None.\n\n Returns:\n Tensor: forwarded results with shape\n [num_queries, bs, embed_dims]\n if self.batch_first is False, else\n [bs, num_queries embed_dims].\n ' if (key is None): key = query if (value is None): value = key if (identity is None): identity = query if (key_pos is None): if (query_pos is not None): if (query_pos.shape == key.shape): key_pos = query_pos else: warnings.warn(f'position encoding of key ismissing in {self.__class__.__name__}.') if (query_pos is not None): query = (query + query_pos) if (key_pos is not None): key = (key + key_pos) if self.batch_first: query = query.transpose(0, 1) key = key.transpose(0, 1) value = value.transpose(0, 1) out = self.attn(query=query, key=key, value=value, attn_mask=attn_mask, key_padding_mask=key_padding_mask)[0] if self.batch_first: out = out.transpose(0, 1) return (identity + self.dropout_layer(self.proj_drop(out)))
@FEEDFORWARD_NETWORK.register_module() class FFN(BaseModule): "Implements feed-forward networks (FFNs) with identity connection.\n\n Args:\n embed_dims (int): The feature dimension. Same as\n `MultiheadAttention`. Defaults: 256.\n feedforward_channels (int): The hidden dimension of FFNs.\n Defaults: 1024.\n num_fcs (int, optional): The number of fully-connected layers in\n FFNs. Default: 2.\n act_cfg (dict, optional): The activation config for FFNs.\n Default: dict(type='ReLU')\n ffn_drop (float, optional): Probability of an element to be\n zeroed in FFN. Default 0.0.\n add_identity (bool, optional): Whether to add the\n identity connection. Default: `True`.\n dropout_layer (obj:`ConfigDict`): The dropout_layer used\n when adding the shortcut.\n init_cfg (obj:`mmcv.ConfigDict`): The Config for initialization.\n Default: None.\n " @deprecated_api_warning({'dropout': 'ffn_drop', 'add_residual': 'add_identity'}, cls_name='FFN') def __init__(self, embed_dims=256, feedforward_channels=1024, num_fcs=2, act_cfg=dict(type='ReLU', inplace=True), ffn_drop=0.0, dropout_layer=None, add_identity=True, init_cfg=None, **kwargs): super(FFN, self).__init__(init_cfg) assert (num_fcs >= 2), f'num_fcs should be no less than 2. got {num_fcs}.' self.embed_dims = embed_dims self.feedforward_channels = feedforward_channels self.num_fcs = num_fcs self.act_cfg = act_cfg self.activate = build_activation_layer(act_cfg) layers = [] in_channels = embed_dims for _ in range((num_fcs - 1)): layers.append(Sequential(Linear(in_channels, feedforward_channels), self.activate, nn.Dropout(ffn_drop))) in_channels = feedforward_channels layers.append(Linear(feedforward_channels, embed_dims)) layers.append(nn.Dropout(ffn_drop)) self.layers = Sequential(*layers) self.dropout_layer = (build_dropout(dropout_layer) if dropout_layer else torch.nn.Identity()) self.add_identity = add_identity @deprecated_api_warning({'residual': 'identity'}, cls_name='FFN') def forward(self, x, identity=None): 'Forward function for `FFN`.\n\n The function would add x to the output tensor if residue is None.\n ' out = self.layers(x) if (not self.add_identity): return self.dropout_layer(out) if (identity is None): identity = x return (identity + self.dropout_layer(out))
@TRANSFORMER_LAYER.register_module() class BaseTransformerLayer(BaseModule): "Base `TransformerLayer` for vision transformer.\n\n It can be built from `mmcv.ConfigDict` and support more flexible\n customization, for example, using any number of `FFN or LN ` and\n use different kinds of `attention` by specifying a list of `ConfigDict`\n named `attn_cfgs`. It is worth mentioning that it supports `prenorm`\n when you specifying `norm` as the first element of `operation_order`.\n More details about the `prenorm`: `On Layer Normalization in the\n Transformer Architecture <https://arxiv.org/abs/2002.04745>`_ .\n\n Args:\n attn_cfgs (list[`mmcv.ConfigDict`] | obj:`mmcv.ConfigDict` | None )):\n Configs for `self_attention` or `cross_attention` modules,\n The order of the configs in the list should be consistent with\n corresponding attentions in operation_order.\n If it is a dict, all of the attention modules in operation_order\n will be built with this config. Default: None.\n ffn_cfgs (list[`mmcv.ConfigDict`] | obj:`mmcv.ConfigDict` | None )):\n Configs for FFN, The order of the configs in the list should be\n consistent with corresponding ffn in operation_order.\n If it is a dict, all of the attention modules in operation_order\n will be built with this config.\n operation_order (tuple[str]): The execution order of operation\n in transformer. Such as ('self_attn', 'norm', 'ffn', 'norm').\n Support `prenorm` when you specifying first element as `norm`.\n Default:None.\n norm_cfg (dict): Config dict for normalization layer.\n Default: dict(type='LN').\n init_cfg (obj:`mmcv.ConfigDict`): The Config for initialization.\n Default: None.\n batch_first (bool): Key, Query and Value are shape\n of (batch, n, embed_dim)\n or (n, batch, embed_dim). Default to False.\n " def __init__(self, attn_cfgs=None, ffn_cfgs=dict(type='FFN', embed_dims=256, feedforward_channels=1024, num_fcs=2, ffn_drop=0.0, act_cfg=dict(type='ReLU', inplace=True)), operation_order=None, norm_cfg=dict(type='LN'), init_cfg=None, batch_first=False, **kwargs): deprecated_args = dict(feedforward_channels='feedforward_channels', ffn_dropout='ffn_drop', ffn_num_fcs='num_fcs') for (ori_name, new_name) in deprecated_args.items(): if (ori_name in kwargs): warnings.warn(f'The arguments `{ori_name}` in BaseTransformerLayer has been deprecated, now you should set `{new_name}` and other FFN related arguments to a dict named `ffn_cfgs`. ', DeprecationWarning) ffn_cfgs[new_name] = kwargs[ori_name] super(BaseTransformerLayer, self).__init__(init_cfg) self.batch_first = batch_first assert ((set(operation_order) & set(['self_attn', 'norm', 'ffn', 'cross_attn'])) == set(operation_order)), f"The operation_order of {self.__class__.__name__} should contains all four operation type {['self_attn', 'norm', 'ffn', 'cross_attn']}" num_attn = (operation_order.count('self_attn') + operation_order.count('cross_attn')) if isinstance(attn_cfgs, dict): attn_cfgs = [copy.deepcopy(attn_cfgs) for _ in range(num_attn)] else: assert (num_attn == len(attn_cfgs)), f'The length of attn_cfg {num_attn} is not consistent with the number of attentionin operation_order {operation_order}.' self.num_attn = num_attn self.operation_order = operation_order self.norm_cfg = norm_cfg self.pre_norm = (operation_order[0] == 'norm') self.attentions = ModuleList() index = 0 for operation_name in operation_order: if (operation_name in ['self_attn', 'cross_attn']): if ('batch_first' in attn_cfgs[index]): assert (self.batch_first == attn_cfgs[index]['batch_first']) else: attn_cfgs[index]['batch_first'] = self.batch_first attention = build_attention(attn_cfgs[index]) attention.operation_name = operation_name self.attentions.append(attention) index += 1 self.embed_dims = self.attentions[0].embed_dims self.ffns = ModuleList() num_ffns = operation_order.count('ffn') if isinstance(ffn_cfgs, dict): ffn_cfgs = ConfigDict(ffn_cfgs) if isinstance(ffn_cfgs, dict): ffn_cfgs = [copy.deepcopy(ffn_cfgs) for _ in range(num_ffns)] assert (len(ffn_cfgs) == num_ffns) for ffn_index in range(num_ffns): if ('embed_dims' not in ffn_cfgs[ffn_index]): ffn_cfgs[ffn_index]['embed_dims'] = self.embed_dims else: assert (ffn_cfgs[ffn_index]['embed_dims'] == self.embed_dims) self.ffns.append(build_feedforward_network(ffn_cfgs[ffn_index], dict(type='FFN'))) self.norms = ModuleList() num_norms = operation_order.count('norm') for _ in range(num_norms): self.norms.append(build_norm_layer(norm_cfg, self.embed_dims)[1]) def forward(self, query, key=None, value=None, query_pos=None, key_pos=None, attn_masks=None, query_key_padding_mask=None, key_padding_mask=None, **kwargs): 'Forward function for `TransformerDecoderLayer`.\n\n **kwargs contains some specific arguments of attentions.\n\n Args:\n query (Tensor): The input query with shape\n [num_queries, bs, embed_dims] if\n self.batch_first is False, else\n [bs, num_queries embed_dims].\n key (Tensor): The key tensor with shape [num_keys, bs,\n embed_dims] if self.batch_first is False, else\n [bs, num_keys, embed_dims] .\n value (Tensor): The value tensor with same shape as `key`.\n query_pos (Tensor): The positional encoding for `query`.\n Default: None.\n key_pos (Tensor): The positional encoding for `key`.\n Default: None.\n attn_masks (List[Tensor] | None): 2D Tensor used in\n calculation of corresponding attention. The length of\n it should equal to the number of `attention` in\n `operation_order`. Default: None.\n query_key_padding_mask (Tensor): ByteTensor for `query`, with\n shape [bs, num_queries]. Only used in `self_attn` layer.\n Defaults to None.\n key_padding_mask (Tensor): ByteTensor for `query`, with\n shape [bs, num_keys]. Default: None.\n\n Returns:\n Tensor: forwarded results with shape [num_queries, bs, embed_dims].\n ' norm_index = 0 attn_index = 0 ffn_index = 0 identity = query if (attn_masks is None): attn_masks = [None for _ in range(self.num_attn)] elif isinstance(attn_masks, torch.Tensor): attn_masks = [copy.deepcopy(attn_masks) for _ in range(self.num_attn)] warnings.warn(f'Use same attn_mask in all attentions in {self.__class__.__name__} ') else: assert (len(attn_masks) == self.num_attn), f'The length of attn_masks {len(attn_masks)} must be equal to the number of attention in operation_order {self.num_attn}' for layer in self.operation_order: if (layer == 'self_attn'): temp_key = temp_value = query query = self.attentions[attn_index](query, temp_key, temp_value, (identity if self.pre_norm else None), query_pos=query_pos, key_pos=query_pos, attn_mask=attn_masks[attn_index], key_padding_mask=query_key_padding_mask, **kwargs) attn_index += 1 identity = query elif (layer == 'norm'): query = self.norms[norm_index](query) norm_index += 1 elif (layer == 'cross_attn'): query = self.attentions[attn_index](query, key, value, (identity if self.pre_norm else None), query_pos=query_pos, key_pos=key_pos, attn_mask=attn_masks[attn_index], key_padding_mask=key_padding_mask, **kwargs) attn_index += 1 identity = query elif (layer == 'ffn'): query = self.ffns[ffn_index](query, (identity if self.pre_norm else None)) ffn_index += 1 return query
@TRANSFORMER_LAYER_SEQUENCE.register_module() class TransformerLayerSequence(BaseModule): 'Base class for TransformerEncoder and TransformerDecoder in vision\n transformer.\n\n As base-class of Encoder and Decoder in vision transformer.\n Support customization such as specifying different kind\n of `transformer_layer` in `transformer_coder`.\n\n Args:\n transformerlayer (list[obj:`mmcv.ConfigDict`] |\n obj:`mmcv.ConfigDict`): Config of transformerlayer\n in TransformerCoder. If it is obj:`mmcv.ConfigDict`,\n it would be repeated `num_layer` times to a\n list[`mmcv.ConfigDict`]. Default: None.\n num_layers (int): The number of `TransformerLayer`. Default: None.\n init_cfg (obj:`mmcv.ConfigDict`): The Config for initialization.\n Default: None.\n ' def __init__(self, transformerlayers=None, num_layers=None, init_cfg=None): super(TransformerLayerSequence, self).__init__(init_cfg) if isinstance(transformerlayers, dict): transformerlayers = [copy.deepcopy(transformerlayers) for _ in range(num_layers)] else: assert (isinstance(transformerlayers, list) and (len(transformerlayers) == num_layers)) self.num_layers = num_layers self.layers = ModuleList() for i in range(num_layers): self.layers.append(build_transformer_layer(transformerlayers[i])) self.embed_dims = self.layers[0].embed_dims self.pre_norm = self.layers[0].pre_norm def forward(self, query, key, value, query_pos=None, key_pos=None, attn_masks=None, query_key_padding_mask=None, key_padding_mask=None, **kwargs): 'Forward function for `TransformerCoder`.\n\n Args:\n query (Tensor): Input query with shape\n `(num_queries, bs, embed_dims)`.\n key (Tensor): The key tensor with shape\n `(num_keys, bs, embed_dims)`.\n value (Tensor): The value tensor with shape\n `(num_keys, bs, embed_dims)`.\n query_pos (Tensor): The positional encoding for `query`.\n Default: None.\n key_pos (Tensor): The positional encoding for `key`.\n Default: None.\n attn_masks (List[Tensor], optional): Each element is 2D Tensor\n which is used in calculation of corresponding attention in\n operation_order. Default: None.\n query_key_padding_mask (Tensor): ByteTensor for `query`, with\n shape [bs, num_queries]. Only used in self-attention\n Default: None.\n key_padding_mask (Tensor): ByteTensor for `query`, with\n shape [bs, num_keys]. Default: None.\n\n Returns:\n Tensor: results with shape [num_queries, bs, embed_dims].\n ' for layer in self.layers: query = layer(query, key, value, query_pos=query_pos, key_pos=key_pos, attn_masks=attn_masks, query_key_padding_mask=query_key_padding_mask, key_padding_mask=key_padding_mask, **kwargs) return query
@UPSAMPLE_LAYERS.register_module(name='pixel_shuffle') class PixelShufflePack(nn.Module): 'Pixel Shuffle upsample layer.\n\n This module packs `F.pixel_shuffle()` and a nn.Conv2d module together to\n achieve a simple upsampling with pixel shuffle.\n\n Args:\n in_channels (int): Number of input channels.\n out_channels (int): Number of output channels.\n scale_factor (int): Upsample ratio.\n upsample_kernel (int): Kernel size of the conv layer to expand the\n channels.\n ' def __init__(self, in_channels, out_channels, scale_factor, upsample_kernel): super(PixelShufflePack, self).__init__() self.in_channels = in_channels self.out_channels = out_channels self.scale_factor = scale_factor self.upsample_kernel = upsample_kernel self.upsample_conv = nn.Conv2d(self.in_channels, ((self.out_channels * scale_factor) * scale_factor), self.upsample_kernel, padding=((self.upsample_kernel - 1) // 2)) self.init_weights() def init_weights(self): xavier_init(self.upsample_conv, distribution='uniform') def forward(self, x): x = self.upsample_conv(x) x = F.pixel_shuffle(x, self.scale_factor) return x
def build_upsample_layer(cfg, *args, **kwargs): 'Build upsample layer.\n\n Args:\n cfg (dict): The upsample layer config, which should contain:\n\n - type (str): Layer type.\n - scale_factor (int): Upsample ratio, which is not applicable to\n deconv.\n - layer args: Args needed to instantiate a upsample layer.\n args (argument list): Arguments passed to the ``__init__``\n method of the corresponding conv layer.\n kwargs (keyword arguments): Keyword arguments passed to the\n ``__init__`` method of the corresponding conv layer.\n\n Returns:\n nn.Module: Created upsample layer.\n ' if (not isinstance(cfg, dict)): raise TypeError(f'cfg must be a dict, but got {type(cfg)}') if ('type' not in cfg): raise KeyError(f'the cfg dict must contain the key "type", but got {cfg}') cfg_ = cfg.copy() layer_type = cfg_.pop('type') if (layer_type not in UPSAMPLE_LAYERS): raise KeyError(f'Unrecognized upsample type {layer_type}') else: upsample = UPSAMPLE_LAYERS.get(layer_type) if (upsample is nn.Upsample): cfg_['mode'] = layer_type layer = upsample(*args, **kwargs, **cfg_) return layer
def obsolete_torch_version(torch_version, version_threshold): return ((torch_version == 'parrots') or (torch_version <= version_threshold))
class NewEmptyTensorOp(torch.autograd.Function): @staticmethod def forward(ctx, x, new_shape): ctx.shape = x.shape return x.new_empty(new_shape) @staticmethod def backward(ctx, grad): shape = ctx.shape return (NewEmptyTensorOp.apply(grad, shape), None)
@CONV_LAYERS.register_module('Conv', force=True) class Conv2d(nn.Conv2d): def forward(self, x): if ((x.numel() == 0) and obsolete_torch_version(TORCH_VERSION, (1, 4))): out_shape = [x.shape[0], self.out_channels] for (i, k, p, s, d) in zip(x.shape[(- 2):], self.kernel_size, self.padding, self.stride, self.dilation): o = ((((i + (2 * p)) - ((d * (k - 1)) + 1)) // s) + 1) out_shape.append(o) empty = NewEmptyTensorOp.apply(x, out_shape) if self.training: dummy = (sum((x.view((- 1))[0] for x in self.parameters())) * 0.0) return (empty + dummy) else: return empty return super().forward(x)
@CONV_LAYERS.register_module('Conv3d', force=True) class Conv3d(nn.Conv3d): def forward(self, x): if ((x.numel() == 0) and obsolete_torch_version(TORCH_VERSION, (1, 4))): out_shape = [x.shape[0], self.out_channels] for (i, k, p, s, d) in zip(x.shape[(- 3):], self.kernel_size, self.padding, self.stride, self.dilation): o = ((((i + (2 * p)) - ((d * (k - 1)) + 1)) // s) + 1) out_shape.append(o) empty = NewEmptyTensorOp.apply(x, out_shape) if self.training: dummy = (sum((x.view((- 1))[0] for x in self.parameters())) * 0.0) return (empty + dummy) else: return empty return super().forward(x)
@CONV_LAYERS.register_module() @CONV_LAYERS.register_module('deconv') @UPSAMPLE_LAYERS.register_module('deconv', force=True) class ConvTranspose2d(nn.ConvTranspose2d): def forward(self, x): if ((x.numel() == 0) and obsolete_torch_version(TORCH_VERSION, (1, 4))): out_shape = [x.shape[0], self.out_channels] for (i, k, p, s, d, op) in zip(x.shape[(- 2):], self.kernel_size, self.padding, self.stride, self.dilation, self.output_padding): out_shape.append((((((i - 1) * s) - (2 * p)) + ((d * (k - 1)) + 1)) + op)) empty = NewEmptyTensorOp.apply(x, out_shape) if self.training: dummy = (sum((x.view((- 1))[0] for x in self.parameters())) * 0.0) return (empty + dummy) else: return empty return super().forward(x)
@CONV_LAYERS.register_module() @CONV_LAYERS.register_module('deconv3d') @UPSAMPLE_LAYERS.register_module('deconv3d', force=True) class ConvTranspose3d(nn.ConvTranspose3d): def forward(self, x): if ((x.numel() == 0) and obsolete_torch_version(TORCH_VERSION, (1, 4))): out_shape = [x.shape[0], self.out_channels] for (i, k, p, s, d, op) in zip(x.shape[(- 3):], self.kernel_size, self.padding, self.stride, self.dilation, self.output_padding): out_shape.append((((((i - 1) * s) - (2 * p)) + ((d * (k - 1)) + 1)) + op)) empty = NewEmptyTensorOp.apply(x, out_shape) if self.training: dummy = (sum((x.view((- 1))[0] for x in self.parameters())) * 0.0) return (empty + dummy) else: return empty return super().forward(x)
class MaxPool2d(nn.MaxPool2d): def forward(self, x): if ((x.numel() == 0) and obsolete_torch_version(TORCH_VERSION, (1, 9))): out_shape = list(x.shape[:2]) for (i, k, p, s, d) in zip(x.shape[(- 2):], _pair(self.kernel_size), _pair(self.padding), _pair(self.stride), _pair(self.dilation)): o = ((((i + (2 * p)) - ((d * (k - 1)) + 1)) / s) + 1) o = (math.ceil(o) if self.ceil_mode else math.floor(o)) out_shape.append(o) empty = NewEmptyTensorOp.apply(x, out_shape) return empty return super().forward(x)
class MaxPool3d(nn.MaxPool3d): def forward(self, x): if ((x.numel() == 0) and obsolete_torch_version(TORCH_VERSION, (1, 9))): out_shape = list(x.shape[:2]) for (i, k, p, s, d) in zip(x.shape[(- 3):], _triple(self.kernel_size), _triple(self.padding), _triple(self.stride), _triple(self.dilation)): o = ((((i + (2 * p)) - ((d * (k - 1)) + 1)) / s) + 1) o = (math.ceil(o) if self.ceil_mode else math.floor(o)) out_shape.append(o) empty = NewEmptyTensorOp.apply(x, out_shape) return empty return super().forward(x)
class Linear(torch.nn.Linear): def forward(self, x): if ((x.numel() == 0) and obsolete_torch_version(TORCH_VERSION, (1, 5))): out_shape = [x.shape[0], self.out_features] empty = NewEmptyTensorOp.apply(x, out_shape) if self.training: dummy = (sum((x.view((- 1))[0] for x in self.parameters())) * 0.0) return (empty + dummy) else: return empty return super().forward(x)
def build_model_from_cfg(cfg, registry, default_args=None): 'Build a PyTorch model from config dict(s). Different from\n ``build_from_cfg``, if cfg is a list, a ``nn.Sequential`` will be built.\n\n Args:\n cfg (dict, list[dict]): The config of modules, is is either a config\n dict or a list of config dicts. If cfg is a list, a\n the built modules will be wrapped with ``nn.Sequential``.\n registry (:obj:`Registry`): A registry the module belongs to.\n default_args (dict, optional): Default arguments to build the module.\n Defaults to None.\n\n Returns:\n nn.Module: A built nn module.\n ' if isinstance(cfg, list): modules = [build_from_cfg(cfg_, registry, default_args) for cfg_ in cfg] return Sequential(*modules) else: return build_from_cfg(cfg, registry, default_args)
def conv3x3(in_planes, out_planes, stride=1, dilation=1): '3x3 convolution with padding.' return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=dilation, dilation=dilation, bias=False)
class BasicBlock(nn.Module): expansion = 1 def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=None, style='pytorch', with_cp=False): super(BasicBlock, self).__init__() assert (style in ['pytorch', 'caffe']) self.conv1 = conv3x3(inplanes, planes, stride, dilation) self.bn1 = nn.BatchNorm2d(planes) self.relu = nn.ReLU(inplace=True) self.conv2 = conv3x3(planes, planes) self.bn2 = nn.BatchNorm2d(planes) self.downsample = downsample self.stride = stride self.dilation = dilation assert (not with_cp) def forward(self, x): residual = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) if (self.downsample is not None): residual = self.downsample(x) out += residual out = self.relu(out) return out
class Bottleneck(nn.Module): expansion = 4 def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=None, style='pytorch', with_cp=False): 'Bottleneck block.\n\n If style is "pytorch", the stride-two layer is the 3x3 conv layer, if\n it is "caffe", the stride-two layer is the first 1x1 conv layer.\n ' super(Bottleneck, self).__init__() assert (style in ['pytorch', 'caffe']) if (style == 'pytorch'): conv1_stride = 1 conv2_stride = stride else: conv1_stride = stride conv2_stride = 1 self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, stride=conv1_stride, bias=False) self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=conv2_stride, padding=dilation, dilation=dilation, bias=False) self.bn1 = nn.BatchNorm2d(planes) self.bn2 = nn.BatchNorm2d(planes) self.conv3 = nn.Conv2d(planes, (planes * self.expansion), kernel_size=1, bias=False) self.bn3 = nn.BatchNorm2d((planes * self.expansion)) self.relu = nn.ReLU(inplace=True) self.downsample = downsample self.stride = stride self.dilation = dilation self.with_cp = with_cp def forward(self, x): def _inner_forward(x): residual = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) out = self.relu(out) out = self.conv3(out) out = self.bn3(out) if (self.downsample is not None): residual = self.downsample(x) out += residual return out if (self.with_cp and x.requires_grad): out = cp.checkpoint(_inner_forward, x) else: out = _inner_forward(x) out = self.relu(out) return out
def make_res_layer(block, inplanes, planes, blocks, stride=1, dilation=1, style='pytorch', with_cp=False): downsample = None if ((stride != 1) or (inplanes != (planes * block.expansion))): downsample = nn.Sequential(nn.Conv2d(inplanes, (planes * block.expansion), kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d((planes * block.expansion))) layers = [] layers.append(block(inplanes, planes, stride, dilation, downsample, style=style, with_cp=with_cp)) inplanes = (planes * block.expansion) for _ in range(1, blocks): layers.append(block(inplanes, planes, 1, dilation, style=style, with_cp=with_cp)) return nn.Sequential(*layers)
class ResNet(nn.Module): 'ResNet backbone.\n\n Args:\n depth (int): Depth of resnet, from {18, 34, 50, 101, 152}.\n num_stages (int): Resnet stages, normally 4.\n strides (Sequence[int]): Strides of the first block of each stage.\n dilations (Sequence[int]): Dilation of each stage.\n out_indices (Sequence[int]): Output from which stages.\n style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two\n layer is the 3x3 conv layer, otherwise the stride-two layer is\n the first 1x1 conv layer.\n frozen_stages (int): Stages to be frozen (all param fixed). -1 means\n not freezing any parameters.\n bn_eval (bool): Whether to set BN layers as eval mode, namely, freeze\n running stats (mean and var).\n bn_frozen (bool): Whether to freeze weight and bias of BN layers.\n with_cp (bool): Use checkpoint or not. Using checkpoint will save some\n memory while slowing down the training speed.\n ' arch_settings = {18: (BasicBlock, (2, 2, 2, 2)), 34: (BasicBlock, (3, 4, 6, 3)), 50: (Bottleneck, (3, 4, 6, 3)), 101: (Bottleneck, (3, 4, 23, 3)), 152: (Bottleneck, (3, 8, 36, 3))} def __init__(self, depth, num_stages=4, strides=(1, 2, 2, 2), dilations=(1, 1, 1, 1), out_indices=(0, 1, 2, 3), style='pytorch', frozen_stages=(- 1), bn_eval=True, bn_frozen=False, with_cp=False): super(ResNet, self).__init__() if (depth not in self.arch_settings): raise KeyError(f'invalid depth {depth} for resnet') assert ((num_stages >= 1) and (num_stages <= 4)) (block, stage_blocks) = self.arch_settings[depth] stage_blocks = stage_blocks[:num_stages] assert (len(strides) == len(dilations) == num_stages) assert (max(out_indices) < num_stages) self.out_indices = out_indices self.style = style self.frozen_stages = frozen_stages self.bn_eval = bn_eval self.bn_frozen = bn_frozen self.with_cp = with_cp self.inplanes = 64 self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False) self.bn1 = nn.BatchNorm2d(64) self.relu = nn.ReLU(inplace=True) self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) self.res_layers = [] for (i, num_blocks) in enumerate(stage_blocks): stride = strides[i] dilation = dilations[i] planes = (64 * (2 ** i)) res_layer = make_res_layer(block, self.inplanes, planes, num_blocks, stride=stride, dilation=dilation, style=self.style, with_cp=with_cp) self.inplanes = (planes * block.expansion) layer_name = f'layer{(i + 1)}' self.add_module(layer_name, res_layer) self.res_layers.append(layer_name) self.feat_dim = ((block.expansion * 64) * (2 ** (len(stage_blocks) - 1))) def init_weights(self, pretrained=None): if isinstance(pretrained, str): logger = logging.getLogger() from ..runner import load_checkpoint load_checkpoint(self, pretrained, strict=False, logger=logger) elif (pretrained is None): for m in self.modules(): if isinstance(m, nn.Conv2d): kaiming_init(m) elif isinstance(m, nn.BatchNorm2d): constant_init(m, 1) else: raise TypeError('pretrained must be a str or None') def forward(self, x): x = self.conv1(x) x = self.bn1(x) x = self.relu(x) x = self.maxpool(x) outs = [] for (i, layer_name) in enumerate(self.res_layers): res_layer = getattr(self, layer_name) x = res_layer(x) if (i in self.out_indices): outs.append(x) if (len(outs) == 1): return outs[0] else: return tuple(outs) def train(self, mode=True): super(ResNet, self).train(mode) if self.bn_eval: for m in self.modules(): if isinstance(m, nn.BatchNorm2d): m.eval() if self.bn_frozen: for params in m.parameters(): params.requires_grad = False if (mode and (self.frozen_stages >= 0)): for param in self.conv1.parameters(): param.requires_grad = False for param in self.bn1.parameters(): param.requires_grad = False self.bn1.eval() self.bn1.weight.requires_grad = False self.bn1.bias.requires_grad = False for i in range(1, (self.frozen_stages + 1)): mod = getattr(self, f'layer{i}') mod.eval() for param in mod.parameters(): param.requires_grad = False
def get_model_complexity_info(model, input_shape, print_per_layer_stat=True, as_strings=True, input_constructor=None, flush=False, ost=sys.stdout): 'Get complexity information of a model.\n\n This method can calculate FLOPs and parameter counts of a model with\n corresponding input shape. It can also print complexity information for\n each layer in a model.\n\n Supported layers are listed as below:\n - Convolutions: ``nn.Conv1d``, ``nn.Conv2d``, ``nn.Conv3d``.\n - Activations: ``nn.ReLU``, ``nn.PReLU``, ``nn.ELU``,\n ``nn.LeakyReLU``, ``nn.ReLU6``.\n - Poolings: ``nn.MaxPool1d``, ``nn.MaxPool2d``, ``nn.MaxPool3d``,\n ``nn.AvgPool1d``, ``nn.AvgPool2d``, ``nn.AvgPool3d``,\n ``nn.AdaptiveMaxPool1d``, ``nn.AdaptiveMaxPool2d``,\n ``nn.AdaptiveMaxPool3d``, ``nn.AdaptiveAvgPool1d``,\n ``nn.AdaptiveAvgPool2d``, ``nn.AdaptiveAvgPool3d``.\n - BatchNorms: ``nn.BatchNorm1d``, ``nn.BatchNorm2d``,\n ``nn.BatchNorm3d``, ``nn.GroupNorm``, ``nn.InstanceNorm1d``,\n ``InstanceNorm2d``, ``InstanceNorm3d``, ``nn.LayerNorm``.\n - Linear: ``nn.Linear``.\n - Deconvolution: ``nn.ConvTranspose2d``.\n - Upsample: ``nn.Upsample``.\n\n Args:\n model (nn.Module): The model for complexity calculation.\n input_shape (tuple): Input shape used for calculation.\n print_per_layer_stat (bool): Whether to print complexity information\n for each layer in a model. Default: True.\n as_strings (bool): Output FLOPs and params counts in a string form.\n Default: True.\n input_constructor (None | callable): If specified, it takes a callable\n method that generates input. otherwise, it will generate a random\n tensor with input shape to calculate FLOPs. Default: None.\n flush (bool): same as that in :func:`print`. Default: False.\n ost (stream): same as ``file`` param in :func:`print`.\n Default: sys.stdout.\n\n Returns:\n tuple[float | str]: If ``as_strings`` is set to True, it will return\n FLOPs and parameter counts in a string format. otherwise, it will\n return those in a float number format.\n ' assert (type(input_shape) is tuple) assert (len(input_shape) >= 1) assert isinstance(model, nn.Module) flops_model = add_flops_counting_methods(model) flops_model.eval() flops_model.start_flops_count() if input_constructor: input = input_constructor(input_shape) _ = flops_model(**input) else: try: batch = torch.ones(()).new_empty((1, *input_shape), dtype=next(flops_model.parameters()).dtype, device=next(flops_model.parameters()).device) except StopIteration: batch = torch.ones(()).new_empty((1, *input_shape)) _ = flops_model(batch) (flops_count, params_count) = flops_model.compute_average_flops_cost() if print_per_layer_stat: print_model_with_flops(flops_model, flops_count, params_count, ost=ost, flush=flush) flops_model.stop_flops_count() if as_strings: return (flops_to_string(flops_count), params_to_string(params_count)) return (flops_count, params_count)
def flops_to_string(flops, units='GFLOPs', precision=2): "Convert FLOPs number into a string.\n\n Note that Here we take a multiply-add counts as one FLOP.\n\n Args:\n flops (float): FLOPs number to be converted.\n units (str | None): Converted FLOPs units. Options are None, 'GFLOPs',\n 'MFLOPs', 'KFLOPs', 'FLOPs'. If set to None, it will automatically\n choose the most suitable unit for FLOPs. Default: 'GFLOPs'.\n precision (int): Digit number after the decimal point. Default: 2.\n\n Returns:\n str: The converted FLOPs number with units.\n\n Examples:\n >>> flops_to_string(1e9)\n '1.0 GFLOPs'\n >>> flops_to_string(2e5, 'MFLOPs')\n '0.2 MFLOPs'\n >>> flops_to_string(3e-9, None)\n '3e-09 FLOPs'\n " if (units is None): if ((flops // (10 ** 9)) > 0): return (str(round((flops / (10.0 ** 9)), precision)) + ' GFLOPs') elif ((flops // (10 ** 6)) > 0): return (str(round((flops / (10.0 ** 6)), precision)) + ' MFLOPs') elif ((flops // (10 ** 3)) > 0): return (str(round((flops / (10.0 ** 3)), precision)) + ' KFLOPs') else: return (str(flops) + ' FLOPs') elif (units == 'GFLOPs'): return ((str(round((flops / (10.0 ** 9)), precision)) + ' ') + units) elif (units == 'MFLOPs'): return ((str(round((flops / (10.0 ** 6)), precision)) + ' ') + units) elif (units == 'KFLOPs'): return ((str(round((flops / (10.0 ** 3)), precision)) + ' ') + units) else: return (str(flops) + ' FLOPs')
def params_to_string(num_params, units=None, precision=2): "Convert parameter number into a string.\n\n Args:\n num_params (float): Parameter number to be converted.\n units (str | None): Converted FLOPs units. Options are None, 'M',\n 'K' and ''. If set to None, it will automatically choose the most\n suitable unit for Parameter number. Default: None.\n precision (int): Digit number after the decimal point. Default: 2.\n\n Returns:\n str: The converted parameter number with units.\n\n Examples:\n >>> params_to_string(1e9)\n '1000.0 M'\n >>> params_to_string(2e5)\n '200.0 k'\n >>> params_to_string(3e-9)\n '3e-09'\n " if (units is None): if ((num_params // (10 ** 6)) > 0): return (str(round((num_params / (10 ** 6)), precision)) + ' M') elif (num_params // (10 ** 3)): return (str(round((num_params / (10 ** 3)), precision)) + ' k') else: return str(num_params) elif (units == 'M'): return ((str(round((num_params / (10.0 ** 6)), precision)) + ' ') + units) elif (units == 'K'): return ((str(round((num_params / (10.0 ** 3)), precision)) + ' ') + units) else: return str(num_params)
def print_model_with_flops(model, total_flops, total_params, units='GFLOPs', precision=3, ost=sys.stdout, flush=False): "Print a model with FLOPs for each layer.\n\n Args:\n model (nn.Module): The model to be printed.\n total_flops (float): Total FLOPs of the model.\n total_params (float): Total parameter counts of the model.\n units (str | None): Converted FLOPs units. Default: 'GFLOPs'.\n precision (int): Digit number after the decimal point. Default: 3.\n ost (stream): same as `file` param in :func:`print`.\n Default: sys.stdout.\n flush (bool): same as that in :func:`print`. Default: False.\n\n Example:\n >>> class ExampleModel(nn.Module):\n\n >>> def __init__(self):\n >>> super().__init__()\n >>> self.conv1 = nn.Conv2d(3, 8, 3)\n >>> self.conv2 = nn.Conv2d(8, 256, 3)\n >>> self.conv3 = nn.Conv2d(256, 8, 3)\n >>> self.avg_pool = nn.AdaptiveAvgPool2d((1, 1))\n >>> self.flatten = nn.Flatten()\n >>> self.fc = nn.Linear(8, 1)\n\n >>> def forward(self, x):\n >>> x = self.conv1(x)\n >>> x = self.conv2(x)\n >>> x = self.conv3(x)\n >>> x = self.avg_pool(x)\n >>> x = self.flatten(x)\n >>> x = self.fc(x)\n >>> return x\n\n >>> model = ExampleModel()\n >>> x = (3, 16, 16)\n to print the complexity information state for each layer, you can use\n >>> get_model_complexity_info(model, x)\n or directly use\n >>> print_model_with_flops(model, 4579784.0, 37361)\n ExampleModel(\n 0.037 M, 100.000% Params, 0.005 GFLOPs, 100.000% FLOPs,\n (conv1): Conv2d(0.0 M, 0.600% Params, 0.0 GFLOPs, 0.959% FLOPs, 3, 8, kernel_size=(3, 3), stride=(1, 1)) # noqa: E501\n (conv2): Conv2d(0.019 M, 50.020% Params, 0.003 GFLOPs, 58.760% FLOPs, 8, 256, kernel_size=(3, 3), stride=(1, 1))\n (conv3): Conv2d(0.018 M, 49.356% Params, 0.002 GFLOPs, 40.264% FLOPs, 256, 8, kernel_size=(3, 3), stride=(1, 1))\n (avg_pool): AdaptiveAvgPool2d(0.0 M, 0.000% Params, 0.0 GFLOPs, 0.017% FLOPs, output_size=(1, 1))\n (flatten): Flatten(0.0 M, 0.000% Params, 0.0 GFLOPs, 0.000% FLOPs, )\n (fc): Linear(0.0 M, 0.024% Params, 0.0 GFLOPs, 0.000% FLOPs, in_features=8, out_features=1, bias=True)\n )\n " def accumulate_params(self): if is_supported_instance(self): return self.__params__ else: sum = 0 for m in self.children(): sum += m.accumulate_params() return sum def accumulate_flops(self): if is_supported_instance(self): return (self.__flops__ / model.__batch_counter__) else: sum = 0 for m in self.children(): sum += m.accumulate_flops() return sum def flops_repr(self): accumulated_num_params = self.accumulate_params() accumulated_flops_cost = self.accumulate_flops() return ', '.join([params_to_string(accumulated_num_params, units='M', precision=precision), '{:.3%} Params'.format((accumulated_num_params / total_params)), flops_to_string(accumulated_flops_cost, units=units, precision=precision), '{:.3%} FLOPs'.format((accumulated_flops_cost / total_flops)), self.original_extra_repr()]) def add_extra_repr(m): m.accumulate_flops = accumulate_flops.__get__(m) m.accumulate_params = accumulate_params.__get__(m) flops_extra_repr = flops_repr.__get__(m) if (m.extra_repr != flops_extra_repr): m.original_extra_repr = m.extra_repr m.extra_repr = flops_extra_repr assert (m.extra_repr != m.original_extra_repr) def del_extra_repr(m): if hasattr(m, 'original_extra_repr'): m.extra_repr = m.original_extra_repr del m.original_extra_repr if hasattr(m, 'accumulate_flops'): del m.accumulate_flops model.apply(add_extra_repr) print(model, file=ost, flush=flush) model.apply(del_extra_repr)
def get_model_parameters_number(model): 'Calculate parameter number of a model.\n\n Args:\n model (nn.module): The model for parameter number calculation.\n\n Returns:\n float: Parameter number of the model.\n ' num_params = sum((p.numel() for p in model.parameters() if p.requires_grad)) return num_params
def add_flops_counting_methods(net_main_module): net_main_module.start_flops_count = start_flops_count.__get__(net_main_module) net_main_module.stop_flops_count = stop_flops_count.__get__(net_main_module) net_main_module.reset_flops_count = reset_flops_count.__get__(net_main_module) net_main_module.compute_average_flops_cost = compute_average_flops_cost.__get__(net_main_module) net_main_module.reset_flops_count() return net_main_module
def compute_average_flops_cost(self): 'Compute average FLOPs cost.\n\n A method to compute average FLOPs cost, which will be available after\n `add_flops_counting_methods()` is called on a desired net object.\n\n Returns:\n float: Current mean flops consumption per image.\n ' batches_count = self.__batch_counter__ flops_sum = 0 for module in self.modules(): if is_supported_instance(module): flops_sum += module.__flops__ params_sum = get_model_parameters_number(self) return ((flops_sum / batches_count), params_sum)
def start_flops_count(self): 'Activate the computation of mean flops consumption per image.\n\n A method to activate the computation of mean flops consumption per image.\n which will be available after ``add_flops_counting_methods()`` is called on\n a desired net object. It should be called before running the network.\n ' add_batch_counter_hook_function(self) def add_flops_counter_hook_function(module): if is_supported_instance(module): if hasattr(module, '__flops_handle__'): return else: handle = module.register_forward_hook(get_modules_mapping()[type(module)]) module.__flops_handle__ = handle self.apply(partial(add_flops_counter_hook_function))
def stop_flops_count(self): 'Stop computing the mean flops consumption per image.\n\n A method to stop computing the mean flops consumption per image, which will\n be available after ``add_flops_counting_methods()`` is called on a desired\n net object. It can be called to pause the computation whenever.\n ' remove_batch_counter_hook_function(self) self.apply(remove_flops_counter_hook_function)
def reset_flops_count(self): 'Reset statistics computed so far.\n\n A method to Reset computed statistics, which will be available after\n `add_flops_counting_methods()` is called on a desired net object.\n ' add_batch_counter_variables_or_reset(self) self.apply(add_flops_counter_variable_or_reset)
def empty_flops_counter_hook(module, input, output): module.__flops__ += 0
def upsample_flops_counter_hook(module, input, output): output_size = output[0] batch_size = output_size.shape[0] output_elements_count = batch_size for val in output_size.shape[1:]: output_elements_count *= val module.__flops__ += int(output_elements_count)
def relu_flops_counter_hook(module, input, output): active_elements_count = output.numel() module.__flops__ += int(active_elements_count)
def linear_flops_counter_hook(module, input, output): input = input[0] output_last_dim = output.shape[(- 1)] module.__flops__ += int((np.prod(input.shape) * output_last_dim))
def pool_flops_counter_hook(module, input, output): input = input[0] module.__flops__ += int(np.prod(input.shape))
def norm_flops_counter_hook(module, input, output): input = input[0] batch_flops = np.prod(input.shape) if (getattr(module, 'affine', False) or getattr(module, 'elementwise_affine', False)): batch_flops *= 2 module.__flops__ += int(batch_flops)
def deconv_flops_counter_hook(conv_module, input, output): input = input[0] batch_size = input.shape[0] (input_height, input_width) = input.shape[2:] (kernel_height, kernel_width) = conv_module.kernel_size in_channels = conv_module.in_channels out_channels = conv_module.out_channels groups = conv_module.groups filters_per_channel = (out_channels // groups) conv_per_position_flops = (((kernel_height * kernel_width) * in_channels) * filters_per_channel) active_elements_count = ((batch_size * input_height) * input_width) overall_conv_flops = (conv_per_position_flops * active_elements_count) bias_flops = 0 if (conv_module.bias is not None): (output_height, output_width) = output.shape[2:] bias_flops = (((out_channels * batch_size) * output_height) * output_width) overall_flops = (overall_conv_flops + bias_flops) conv_module.__flops__ += int(overall_flops)
def conv_flops_counter_hook(conv_module, input, output): input = input[0] batch_size = input.shape[0] output_dims = list(output.shape[2:]) kernel_dims = list(conv_module.kernel_size) in_channels = conv_module.in_channels out_channels = conv_module.out_channels groups = conv_module.groups filters_per_channel = (out_channels // groups) conv_per_position_flops = ((int(np.prod(kernel_dims)) * in_channels) * filters_per_channel) active_elements_count = (batch_size * int(np.prod(output_dims))) overall_conv_flops = (conv_per_position_flops * active_elements_count) bias_flops = 0 if (conv_module.bias is not None): bias_flops = (out_channels * active_elements_count) overall_flops = (overall_conv_flops + bias_flops) conv_module.__flops__ += int(overall_flops)
def batch_counter_hook(module, input, output): batch_size = 1 if (len(input) > 0): input = input[0] batch_size = len(input) else: warnings.warn('No positional inputs found for a module, assuming batch size is 1.') module.__batch_counter__ += batch_size
def add_batch_counter_variables_or_reset(module): module.__batch_counter__ = 0
def add_batch_counter_hook_function(module): if hasattr(module, '__batch_counter_handle__'): return handle = module.register_forward_hook(batch_counter_hook) module.__batch_counter_handle__ = handle
def remove_batch_counter_hook_function(module): if hasattr(module, '__batch_counter_handle__'): module.__batch_counter_handle__.remove() del module.__batch_counter_handle__
def add_flops_counter_variable_or_reset(module): if is_supported_instance(module): if (hasattr(module, '__flops__') or hasattr(module, '__params__')): warnings.warn((('variables __flops__ or __params__ are already defined for the module' + type(module).__name__) + ' ptflops can affect your code!')) module.__flops__ = 0 module.__params__ = get_model_parameters_number(module)
def is_supported_instance(module): if (type(module) in get_modules_mapping()): return True return False
def remove_flops_counter_hook_function(module): if is_supported_instance(module): if hasattr(module, '__flops_handle__'): module.__flops_handle__.remove() del module.__flops_handle__
def get_modules_mapping(): return {nn.Conv1d: conv_flops_counter_hook, nn.Conv2d: conv_flops_counter_hook, mmcv.cnn.bricks.Conv2d: conv_flops_counter_hook, nn.Conv3d: conv_flops_counter_hook, mmcv.cnn.bricks.Conv3d: conv_flops_counter_hook, nn.ReLU: relu_flops_counter_hook, nn.PReLU: relu_flops_counter_hook, nn.ELU: relu_flops_counter_hook, nn.LeakyReLU: relu_flops_counter_hook, nn.ReLU6: relu_flops_counter_hook, nn.MaxPool1d: pool_flops_counter_hook, nn.AvgPool1d: pool_flops_counter_hook, nn.AvgPool2d: pool_flops_counter_hook, nn.MaxPool2d: pool_flops_counter_hook, mmcv.cnn.bricks.MaxPool2d: pool_flops_counter_hook, nn.MaxPool3d: pool_flops_counter_hook, mmcv.cnn.bricks.MaxPool3d: pool_flops_counter_hook, nn.AvgPool3d: pool_flops_counter_hook, nn.AdaptiveMaxPool1d: pool_flops_counter_hook, nn.AdaptiveAvgPool1d: pool_flops_counter_hook, nn.AdaptiveMaxPool2d: pool_flops_counter_hook, nn.AdaptiveAvgPool2d: pool_flops_counter_hook, nn.AdaptiveMaxPool3d: pool_flops_counter_hook, nn.AdaptiveAvgPool3d: pool_flops_counter_hook, nn.BatchNorm1d: norm_flops_counter_hook, nn.BatchNorm2d: norm_flops_counter_hook, nn.BatchNorm3d: norm_flops_counter_hook, nn.GroupNorm: norm_flops_counter_hook, nn.InstanceNorm1d: norm_flops_counter_hook, nn.InstanceNorm2d: norm_flops_counter_hook, nn.InstanceNorm3d: norm_flops_counter_hook, nn.LayerNorm: norm_flops_counter_hook, nn.Linear: linear_flops_counter_hook, mmcv.cnn.bricks.Linear: linear_flops_counter_hook, nn.Upsample: upsample_flops_counter_hook, nn.ConvTranspose2d: deconv_flops_counter_hook, mmcv.cnn.bricks.ConvTranspose2d: deconv_flops_counter_hook}
def _fuse_conv_bn(conv, bn): 'Fuse conv and bn into one module.\n\n Args:\n conv (nn.Module): Conv to be fused.\n bn (nn.Module): BN to be fused.\n\n Returns:\n nn.Module: Fused module.\n ' conv_w = conv.weight conv_b = (conv.bias if (conv.bias is not None) else torch.zeros_like(bn.running_mean)) factor = (bn.weight / torch.sqrt((bn.running_var + bn.eps))) conv.weight = nn.Parameter((conv_w * factor.reshape([conv.out_channels, 1, 1, 1]))) conv.bias = nn.Parameter((((conv_b - bn.running_mean) * factor) + bn.bias)) return conv
def fuse_conv_bn(module): 'Recursively fuse conv and bn in a module.\n\n During inference, the functionary of batch norm layers is turned off\n but only the mean and var alone channels are used, which exposes the\n chance to fuse it with the preceding conv layers to save computations and\n simplify network structures.\n\n Args:\n module (nn.Module): Module to be fused.\n\n Returns:\n nn.Module: Fused module.\n ' last_conv = None last_conv_name = None for (name, child) in module.named_children(): if isinstance(child, (nn.modules.batchnorm._BatchNorm, nn.SyncBatchNorm)): if (last_conv is None): continue fused_conv = _fuse_conv_bn(last_conv, child) module._modules[last_conv_name] = fused_conv module._modules[name] = nn.Identity() last_conv = None elif isinstance(child, nn.Conv2d): last_conv = child last_conv_name = name else: fuse_conv_bn(child) return module
class _BatchNormXd(torch.nn.modules.batchnorm._BatchNorm): "A general BatchNorm layer without input dimension check.\n\n Reproduced from @kapily's work:\n (https://github.com/pytorch/pytorch/issues/41081#issuecomment-783961547)\n The only difference between BatchNorm1d, BatchNorm2d, BatchNorm3d, etc\n is `_check_input_dim` that is designed for tensor sanity checks.\n The check has been bypassed in this class for the convenience of converting\n SyncBatchNorm.\n " def _check_input_dim(self, input): return
def revert_sync_batchnorm(module): "Helper function to convert all `SyncBatchNorm` (SyncBN) and\n `mmcv.ops.sync_bn.SyncBatchNorm`(MMSyncBN) layers in the model to\n `BatchNormXd` layers.\n\n Adapted from @kapily's work:\n (https://github.com/pytorch/pytorch/issues/41081#issuecomment-783961547)\n\n Args:\n module (nn.Module): The module containing `SyncBatchNorm` layers.\n\n Returns:\n module_output: The converted module with `BatchNormXd` layers.\n " module_output = module module_checklist = [torch.nn.modules.batchnorm.SyncBatchNorm] if hasattr(mmcv, 'ops'): module_checklist.append(mmcv.ops.SyncBatchNorm) if isinstance(module, tuple(module_checklist)): module_output = _BatchNormXd(module.num_features, module.eps, module.momentum, module.affine, module.track_running_stats) if module.affine: with torch.no_grad(): module_output.weight = module.weight module_output.bias = module.bias module_output.running_mean = module.running_mean module_output.running_var = module.running_var module_output.num_batches_tracked = module.num_batches_tracked module_output.training = module.training if hasattr(module, 'qconfig'): module_output.qconfig = module.qconfig for (name, child) in module.named_children(): module_output.add_module(name, revert_sync_batchnorm(child)) del module return module_output
def update_init_info(module, init_info): 'Update the `_params_init_info` in the module if the value of parameters\n are changed.\n\n Args:\n module (obj:`nn.Module`): The module of PyTorch with a user-defined\n attribute `_params_init_info` which records the initialization\n information.\n init_info (str): The string that describes the initialization.\n ' assert hasattr(module, '_params_init_info'), f'Can not find `_params_init_info` in {module}' for (name, param) in module.named_parameters(): assert (param in module._params_init_info), f'Find a new :obj:`Parameter` named `{name}` during executing the `init_weights` of `{module.__class__.__name__}`. Please do not add or replace parameters during executing the `init_weights`. ' mean_value = param.data.mean() if (module._params_init_info[param]['tmp_mean_value'] != mean_value): module._params_init_info[param]['init_info'] = init_info module._params_init_info[param]['tmp_mean_value'] = mean_value
def constant_init(module, val, bias=0): if (hasattr(module, 'weight') and (module.weight is not None)): nn.init.constant_(module.weight, val) if (hasattr(module, 'bias') and (module.bias is not None)): nn.init.constant_(module.bias, bias)
def xavier_init(module, gain=1, bias=0, distribution='normal'): assert (distribution in ['uniform', 'normal']) if (hasattr(module, 'weight') and (module.weight is not None)): if (distribution == 'uniform'): nn.init.xavier_uniform_(module.weight, gain=gain) else: nn.init.xavier_normal_(module.weight, gain=gain) if (hasattr(module, 'bias') and (module.bias is not None)): nn.init.constant_(module.bias, bias)
def normal_init(module, mean=0, std=1, bias=0): if (hasattr(module, 'weight') and (module.weight is not None)): nn.init.normal_(module.weight, mean, std) if (hasattr(module, 'bias') and (module.bias is not None)): nn.init.constant_(module.bias, bias)
def trunc_normal_init(module: nn.Module, mean: float=0, std: float=1, a: float=(- 2), b: float=2, bias: float=0) -> None: if (hasattr(module, 'weight') and (module.weight is not None)): trunc_normal_(module.weight, mean, std, a, b) if (hasattr(module, 'bias') and (module.bias is not None)): nn.init.constant_(module.bias, bias)
def uniform_init(module, a=0, b=1, bias=0): if (hasattr(module, 'weight') and (module.weight is not None)): nn.init.uniform_(module.weight, a, b) if (hasattr(module, 'bias') and (module.bias is not None)): nn.init.constant_(module.bias, bias)
def kaiming_init(module, a=0, mode='fan_out', nonlinearity='relu', bias=0, distribution='normal'): assert (distribution in ['uniform', 'normal']) if (hasattr(module, 'weight') and (module.weight is not None)): if (distribution == 'uniform'): nn.init.kaiming_uniform_(module.weight, a=a, mode=mode, nonlinearity=nonlinearity) else: nn.init.kaiming_normal_(module.weight, a=a, mode=mode, nonlinearity=nonlinearity) if (hasattr(module, 'bias') and (module.bias is not None)): nn.init.constant_(module.bias, bias)
def caffe2_xavier_init(module, bias=0): kaiming_init(module, a=1, mode='fan_in', nonlinearity='leaky_relu', bias=bias, distribution='uniform')
def bias_init_with_prob(prior_prob): 'initialize conv/fc bias value according to a given probability value.' bias_init = float((- np.log(((1 - prior_prob) / prior_prob)))) return bias_init
def _get_bases_name(m): return [b.__name__ for b in m.__class__.__bases__]
class BaseInit(object): def __init__(self, *, bias=0, bias_prob=None, layer=None): self.wholemodule = False if (not isinstance(bias, (int, float))): raise TypeError(f'bias must be a number, but got a {type(bias)}') if (bias_prob is not None): if (not isinstance(bias_prob, float)): raise TypeError(f'bias_prob type must be float, but got {type(bias_prob)}') if (layer is not None): if (not isinstance(layer, (str, list))): raise TypeError(f'layer must be a str or a list of str, but got a {type(layer)}') else: layer = [] if (bias_prob is not None): self.bias = bias_init_with_prob(bias_prob) else: self.bias = bias self.layer = ([layer] if isinstance(layer, str) else layer) def _get_init_info(self): info = f'{self.__class__.__name__}, bias={self.bias}' return info
@INITIALIZERS.register_module(name='Constant') class ConstantInit(BaseInit): 'Initialize module parameters with constant values.\n\n Args:\n val (int | float): the value to fill the weights in the module with\n bias (int | float): the value to fill the bias. Defaults to 0.\n bias_prob (float, optional): the probability for bias initialization.\n Defaults to None.\n layer (str | list[str], optional): the layer will be initialized.\n Defaults to None.\n ' def __init__(self, val, **kwargs): super().__init__(**kwargs) self.val = val def __call__(self, module): def init(m): if self.wholemodule: constant_init(m, self.val, self.bias) else: layername = m.__class__.__name__ basesname = _get_bases_name(m) if len((set(self.layer) & set(([layername] + basesname)))): constant_init(m, self.val, self.bias) module.apply(init) if hasattr(module, '_params_init_info'): update_init_info(module, init_info=self._get_init_info()) def _get_init_info(self): info = f'{self.__class__.__name__}: val={self.val}, bias={self.bias}' return info
@INITIALIZERS.register_module(name='Xavier') class XavierInit(BaseInit): "Initialize module parameters with values according to the method\n described in `Understanding the difficulty of training deep feedforward\n neural networks - Glorot, X. & Bengio, Y. (2010).\n <http://proceedings.mlr.press/v9/glorot10a/glorot10a.pdf>`_\n\n Args:\n gain (int | float): an optional scaling factor. Defaults to 1.\n bias (int | float): the value to fill the bias. Defaults to 0.\n bias_prob (float, optional): the probability for bias initialization.\n Defaults to None.\n distribution (str): distribution either be ``'normal'``\n or ``'uniform'``. Defaults to ``'normal'``.\n layer (str | list[str], optional): the layer will be initialized.\n Defaults to None.\n " def __init__(self, gain=1, distribution='normal', **kwargs): super().__init__(**kwargs) self.gain = gain self.distribution = distribution def __call__(self, module): def init(m): if self.wholemodule: xavier_init(m, self.gain, self.bias, self.distribution) else: layername = m.__class__.__name__ basesname = _get_bases_name(m) if len((set(self.layer) & set(([layername] + basesname)))): xavier_init(m, self.gain, self.bias, self.distribution) module.apply(init) if hasattr(module, '_params_init_info'): update_init_info(module, init_info=self._get_init_info()) def _get_init_info(self): info = f'{self.__class__.__name__}: gain={self.gain}, distribution={self.distribution}, bias={self.bias}' return info
@INITIALIZERS.register_module(name='Normal') class NormalInit(BaseInit): 'Initialize module parameters with the values drawn from the normal\n distribution :math:`\\mathcal{N}(\\text{mean}, \\text{std}^2)`.\n\n Args:\n mean (int | float):the mean of the normal distribution. Defaults to 0.\n std (int | float): the standard deviation of the normal distribution.\n Defaults to 1.\n bias (int | float): the value to fill the bias. Defaults to 0.\n bias_prob (float, optional): the probability for bias initialization.\n Defaults to None.\n layer (str | list[str], optional): the layer will be initialized.\n Defaults to None.\n\n ' def __init__(self, mean=0, std=1, **kwargs): super().__init__(**kwargs) self.mean = mean self.std = std def __call__(self, module): def init(m): if self.wholemodule: normal_init(m, self.mean, self.std, self.bias) else: layername = m.__class__.__name__ basesname = _get_bases_name(m) if len((set(self.layer) & set(([layername] + basesname)))): normal_init(m, self.mean, self.std, self.bias) module.apply(init) if hasattr(module, '_params_init_info'): update_init_info(module, init_info=self._get_init_info()) def _get_init_info(self): info = f'{self.__class__.__name__}: mean={self.mean}, std={self.std}, bias={self.bias}' return info
@INITIALIZERS.register_module(name='TruncNormal') class TruncNormalInit(BaseInit): 'Initialize module parameters with the values drawn from the normal\n distribution :math:`\\mathcal{N}(\\text{mean}, \\text{std}^2)` with values\n outside :math:`[a, b]`.\n\n Args:\n mean (float): the mean of the normal distribution. Defaults to 0.\n std (float): the standard deviation of the normal distribution.\n Defaults to 1.\n a (float): The minimum cutoff value.\n b ( float): The maximum cutoff value.\n bias (float): the value to fill the bias. Defaults to 0.\n bias_prob (float, optional): the probability for bias initialization.\n Defaults to None.\n layer (str | list[str], optional): the layer will be initialized.\n Defaults to None.\n\n ' def __init__(self, mean: float=0, std: float=1, a: float=(- 2), b: float=2, **kwargs) -> None: super().__init__(**kwargs) self.mean = mean self.std = std self.a = a self.b = b def __call__(self, module: nn.Module) -> None: def init(m): if self.wholemodule: trunc_normal_init(m, self.mean, self.std, self.a, self.b, self.bias) else: layername = m.__class__.__name__ basesname = _get_bases_name(m) if len((set(self.layer) & set(([layername] + basesname)))): trunc_normal_init(m, self.mean, self.std, self.a, self.b, self.bias) module.apply(init) if hasattr(module, '_params_init_info'): update_init_info(module, init_info=self._get_init_info()) def _get_init_info(self): info = f'{self.__class__.__name__}: a={self.a}, b={self.b}, mean={self.mean}, std={self.std}, bias={self.bias}' return info
@INITIALIZERS.register_module(name='Uniform') class UniformInit(BaseInit): 'Initialize module parameters with values drawn from the uniform\n distribution :math:`\\mathcal{U}(a, b)`.\n\n Args:\n a (int | float): the lower bound of the uniform distribution.\n Defaults to 0.\n b (int | float): the upper bound of the uniform distribution.\n Defaults to 1.\n bias (int | float): the value to fill the bias. Defaults to 0.\n bias_prob (float, optional): the probability for bias initialization.\n Defaults to None.\n layer (str | list[str], optional): the layer will be initialized.\n Defaults to None.\n ' def __init__(self, a=0, b=1, **kwargs): super().__init__(**kwargs) self.a = a self.b = b def __call__(self, module): def init(m): if self.wholemodule: uniform_init(m, self.a, self.b, self.bias) else: layername = m.__class__.__name__ basesname = _get_bases_name(m) if len((set(self.layer) & set(([layername] + basesname)))): uniform_init(m, self.a, self.b, self.bias) module.apply(init) if hasattr(module, '_params_init_info'): update_init_info(module, init_info=self._get_init_info()) def _get_init_info(self): info = f'{self.__class__.__name__}: a={self.a}, b={self.b}, bias={self.bias}' return info
@INITIALIZERS.register_module(name='Kaiming') class KaimingInit(BaseInit): "Initialize module parameters with the values according to the method\n described in `Delving deep into rectifiers: Surpassing human-level\n performance on ImageNet classification - He, K. et al. (2015).\n <https://www.cv-foundation.org/openaccess/content_iccv_2015/\n papers/He_Delving_Deep_into_ICCV_2015_paper.pdf>`_\n\n Args:\n a (int | float): the negative slope of the rectifier used after this\n layer (only used with ``'leaky_relu'``). Defaults to 0.\n mode (str): either ``'fan_in'`` or ``'fan_out'``. Choosing\n ``'fan_in'`` preserves the magnitude of the variance of the weights\n in the forward pass. Choosing ``'fan_out'`` preserves the\n magnitudes in the backwards pass. Defaults to ``'fan_out'``.\n nonlinearity (str): the non-linear function (`nn.functional` name),\n recommended to use only with ``'relu'`` or ``'leaky_relu'`` .\n Defaults to 'relu'.\n bias (int | float): the value to fill the bias. Defaults to 0.\n bias_prob (float, optional): the probability for bias initialization.\n Defaults to None.\n distribution (str): distribution either be ``'normal'`` or\n ``'uniform'``. Defaults to ``'normal'``.\n layer (str | list[str], optional): the layer will be initialized.\n Defaults to None.\n " def __init__(self, a=0, mode='fan_out', nonlinearity='relu', distribution='normal', **kwargs): super().__init__(**kwargs) self.a = a self.mode = mode self.nonlinearity = nonlinearity self.distribution = distribution def __call__(self, module): def init(m): if self.wholemodule: kaiming_init(m, self.a, self.mode, self.nonlinearity, self.bias, self.distribution) else: layername = m.__class__.__name__ basesname = _get_bases_name(m) if len((set(self.layer) & set(([layername] + basesname)))): kaiming_init(m, self.a, self.mode, self.nonlinearity, self.bias, self.distribution) module.apply(init) if hasattr(module, '_params_init_info'): update_init_info(module, init_info=self._get_init_info()) def _get_init_info(self): info = f'{self.__class__.__name__}: a={self.a}, mode={self.mode}, nonlinearity={self.nonlinearity}, distribution ={self.distribution}, bias={self.bias}' return info
@INITIALIZERS.register_module(name='Caffe2Xavier') class Caffe2XavierInit(KaimingInit): def __init__(self, **kwargs): super().__init__(a=1, mode='fan_in', nonlinearity='leaky_relu', distribution='uniform', **kwargs) def __call__(self, module): super().__call__(module)
@INITIALIZERS.register_module(name='Pretrained') class PretrainedInit(object): "Initialize module by loading a pretrained model.\n\n Args:\n checkpoint (str): the checkpoint file of the pretrained model should\n be load.\n prefix (str, optional): the prefix of a sub-module in the pretrained\n model. it is for loading a part of the pretrained model to\n initialize. For example, if we would like to only load the\n backbone of a detector model, we can set ``prefix='backbone.'``.\n Defaults to None.\n map_location (str): map tensors into proper locations.\n " def __init__(self, checkpoint, prefix=None, map_location=None): self.checkpoint = checkpoint self.prefix = prefix self.map_location = map_location def __call__(self, module): from mmcv.runner import _load_checkpoint_with_prefix, load_checkpoint, load_state_dict logger = get_logger('mmcv') if (self.prefix is None): print_log(f'load model from: {self.checkpoint}', logger=logger) load_checkpoint(module, self.checkpoint, map_location=self.map_location, strict=False, logger=logger) else: print_log(f'load {self.prefix} in model from: {self.checkpoint}', logger=logger) state_dict = _load_checkpoint_with_prefix(self.prefix, self.checkpoint, map_location=self.map_location) load_state_dict(module, state_dict, strict=False, logger=logger) if hasattr(module, '_params_init_info'): update_init_info(module, init_info=self._get_init_info()) def _get_init_info(self): info = f'{self.__class__.__name__}: load from {self.checkpoint}' return info
def _initialize(module, cfg, wholemodule=False): func = build_from_cfg(cfg, INITIALIZERS) func.wholemodule = wholemodule func(module)
def _initialize_override(module, override, cfg): if (not isinstance(override, (dict, list))): raise TypeError(f'override must be a dict or a list of dict, but got {type(override)}') override = ([override] if isinstance(override, dict) else override) for override_ in override: cp_override = copy.deepcopy(override_) name = cp_override.pop('name', None) if (name is None): raise ValueError(f'`override` must contain the key "name",but got {cp_override}') if (not cp_override): cp_override.update(cfg) elif ('type' not in cp_override.keys()): raise ValueError(f'`override` need "type" key, but got {cp_override}') if hasattr(module, name): _initialize(getattr(module, name), cp_override, wholemodule=True) else: raise RuntimeError(f'module did not have attribute {name}, but init_cfg is {cp_override}.')
def initialize(module, init_cfg): 'Initialize a module.\n\n Args:\n module (``torch.nn.Module``): the module will be initialized.\n init_cfg (dict | list[dict]): initialization configuration dict to\n define initializer. OpenMMLab has implemented 6 initializers\n including ``Constant``, ``Xavier``, ``Normal``, ``Uniform``,\n ``Kaiming``, and ``Pretrained``.\n\n Example:\n >>> module = nn.Linear(2, 3, bias=True)\n >>> init_cfg = dict(type=\'Constant\', layer=\'Linear\', val =1 , bias =2)\n >>> initialize(module, init_cfg)\n\n >>> module = nn.Sequential(nn.Conv1d(3, 1, 3), nn.Linear(1,2))\n >>> # define key ``\'layer\'`` for initializing layer with different\n >>> # configuration\n >>> init_cfg = [dict(type=\'Constant\', layer=\'Conv1d\', val=1),\n dict(type=\'Constant\', layer=\'Linear\', val=2)]\n >>> initialize(module, init_cfg)\n\n >>> # define key``\'override\'`` to initialize some specific part in\n >>> # module\n >>> class FooNet(nn.Module):\n >>> def __init__(self):\n >>> super().__init__()\n >>> self.feat = nn.Conv2d(3, 16, 3)\n >>> self.reg = nn.Conv2d(16, 10, 3)\n >>> self.cls = nn.Conv2d(16, 5, 3)\n >>> model = FooNet()\n >>> init_cfg = dict(type=\'Constant\', val=1, bias=2, layer=\'Conv2d\',\n >>> override=dict(type=\'Constant\', name=\'reg\', val=3, bias=4))\n >>> initialize(model, init_cfg)\n\n >>> model = ResNet(depth=50)\n >>> # Initialize weights with the pretrained model.\n >>> init_cfg = dict(type=\'Pretrained\',\n checkpoint=\'torchvision://resnet50\')\n >>> initialize(model, init_cfg)\n\n >>> # Initialize weights of a sub-module with the specific part of\n >>> # a pretrained model by using "prefix".\n >>> url = \'http://download.openmmlab.com/mmdetection/v2.0/retinanet/\'\\\n >>> \'retinanet_r50_fpn_1x_coco/\'\\\n >>> \'retinanet_r50_fpn_1x_coco_20200130-c2398f9e.pth\'\n >>> init_cfg = dict(type=\'Pretrained\',\n checkpoint=url, prefix=\'backbone.\')\n ' if (not isinstance(init_cfg, (dict, list))): raise TypeError(f'init_cfg must be a dict or a list of dict, but got {type(init_cfg)}') if isinstance(init_cfg, dict): init_cfg = [init_cfg] for cfg in init_cfg: cp_cfg = copy.deepcopy(cfg) override = cp_cfg.pop('override', None) _initialize(module, cp_cfg) if (override is not None): cp_cfg.pop('layer', None) _initialize_override(module, override, cp_cfg) else: pass
def _no_grad_trunc_normal_(tensor: Tensor, mean: float, std: float, a: float, b: float) -> Tensor: def norm_cdf(x): return ((1.0 + math.erf((x / math.sqrt(2.0)))) / 2.0) if ((mean < (a - (2 * std))) or (mean > (b + (2 * std)))): warnings.warn('mean is more than 2 std from [a, b] in nn.init.trunc_normal_. The distribution of values may be incorrect.', stacklevel=2) with torch.no_grad(): lower = norm_cdf(((a - mean) / std)) upper = norm_cdf(((b - mean) / std)) tensor.uniform_(((2 * lower) - 1), ((2 * upper) - 1)) tensor.erfinv_() tensor.mul_((std * math.sqrt(2.0))) tensor.add_(mean) tensor.clamp_(min=a, max=b) return tensor
def trunc_normal_(tensor: Tensor, mean: float=0.0, std: float=1.0, a: float=(- 2.0), b: float=2.0) -> Tensor: 'Fills the input Tensor with values drawn from a truncated\n normal distribution. The values are effectively drawn from the\n normal distribution :math:`\\mathcal{N}(\\text{mean}, \\text{std}^2)`\n with values outside :math:`[a, b]` redrawn until they are within\n the bounds. The method used for generating the random values works\n best when :math:`a \\leq \\text{mean} \\leq b`.\n\n Modified from\n https://github.com/pytorch/pytorch/blob/master/torch/nn/init.py\n\n Args:\n tensor (``torch.Tensor``): an n-dimensional `torch.Tensor`.\n mean (float): the mean of the normal distribution.\n std (float): the standard deviation of the normal distribution.\n a (float): the minimum cutoff value.\n b (float): the maximum cutoff value.\n ' return _no_grad_trunc_normal_(tensor, mean, std, a, b)
def conv3x3(in_planes, out_planes, dilation=1): '3x3 convolution with padding.' return nn.Conv2d(in_planes, out_planes, kernel_size=3, padding=dilation, dilation=dilation)
def make_vgg_layer(inplanes, planes, num_blocks, dilation=1, with_bn=False, ceil_mode=False): layers = [] for _ in range(num_blocks): layers.append(conv3x3(inplanes, planes, dilation)) if with_bn: layers.append(nn.BatchNorm2d(planes)) layers.append(nn.ReLU(inplace=True)) inplanes = planes layers.append(nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=ceil_mode)) return layers
class VGG(nn.Module): 'VGG backbone.\n\n Args:\n depth (int): Depth of vgg, from {11, 13, 16, 19}.\n with_bn (bool): Use BatchNorm or not.\n num_classes (int): number of classes for classification.\n num_stages (int): VGG stages, normally 5.\n dilations (Sequence[int]): Dilation of each stage.\n out_indices (Sequence[int]): Output from which stages.\n frozen_stages (int): Stages to be frozen (all param fixed). -1 means\n not freezing any parameters.\n bn_eval (bool): Whether to set BN layers as eval mode, namely, freeze\n running stats (mean and var).\n bn_frozen (bool): Whether to freeze weight and bias of BN layers.\n ' arch_settings = {11: (1, 1, 2, 2, 2), 13: (2, 2, 2, 2, 2), 16: (2, 2, 3, 3, 3), 19: (2, 2, 4, 4, 4)} def __init__(self, depth, with_bn=False, num_classes=(- 1), num_stages=5, dilations=(1, 1, 1, 1, 1), out_indices=(0, 1, 2, 3, 4), frozen_stages=(- 1), bn_eval=True, bn_frozen=False, ceil_mode=False, with_last_pool=True): super(VGG, self).__init__() if (depth not in self.arch_settings): raise KeyError(f'invalid depth {depth} for vgg') assert ((num_stages >= 1) and (num_stages <= 5)) stage_blocks = self.arch_settings[depth] self.stage_blocks = stage_blocks[:num_stages] assert (len(dilations) == num_stages) assert (max(out_indices) <= num_stages) self.num_classes = num_classes self.out_indices = out_indices self.frozen_stages = frozen_stages self.bn_eval = bn_eval self.bn_frozen = bn_frozen self.inplanes = 3 start_idx = 0 vgg_layers = [] self.range_sub_modules = [] for (i, num_blocks) in enumerate(self.stage_blocks): num_modules = ((num_blocks * (2 + with_bn)) + 1) end_idx = (start_idx + num_modules) dilation = dilations[i] planes = ((64 * (2 ** i)) if (i < 4) else 512) vgg_layer = make_vgg_layer(self.inplanes, planes, num_blocks, dilation=dilation, with_bn=with_bn, ceil_mode=ceil_mode) vgg_layers.extend(vgg_layer) self.inplanes = planes self.range_sub_modules.append([start_idx, end_idx]) start_idx = end_idx if (not with_last_pool): vgg_layers.pop((- 1)) self.range_sub_modules[(- 1)][1] -= 1 self.module_name = 'features' self.add_module(self.module_name, nn.Sequential(*vgg_layers)) if (self.num_classes > 0): self.classifier = nn.Sequential(nn.Linear(((512 * 7) * 7), 4096), nn.ReLU(True), nn.Dropout(), nn.Linear(4096, 4096), nn.ReLU(True), nn.Dropout(), nn.Linear(4096, num_classes)) def init_weights(self, pretrained=None): if isinstance(pretrained, str): logger = logging.getLogger() from ..runner import load_checkpoint load_checkpoint(self, pretrained, strict=False, logger=logger) elif (pretrained is None): for m in self.modules(): if isinstance(m, nn.Conv2d): kaiming_init(m) elif isinstance(m, nn.BatchNorm2d): constant_init(m, 1) elif isinstance(m, nn.Linear): normal_init(m, std=0.01) else: raise TypeError('pretrained must be a str or None') def forward(self, x): outs = [] vgg_layers = getattr(self, self.module_name) for i in range(len(self.stage_blocks)): for j in range(*self.range_sub_modules[i]): vgg_layer = vgg_layers[j] x = vgg_layer(x) if (i in self.out_indices): outs.append(x) if (self.num_classes > 0): x = x.view(x.size(0), (- 1)) x = self.classifier(x) outs.append(x) if (len(outs) == 1): return outs[0] else: return tuple(outs) def train(self, mode=True): super(VGG, self).train(mode) if self.bn_eval: for m in self.modules(): if isinstance(m, nn.BatchNorm2d): m.eval() if self.bn_frozen: for params in m.parameters(): params.requires_grad = False vgg_layers = getattr(self, self.module_name) if (mode and (self.frozen_stages >= 0)): for i in range(self.frozen_stages): for j in range(*self.range_sub_modules[i]): mod = vgg_layers[j] mod.eval() for param in mod.parameters(): param.requires_grad = False
def single_gpu_test(model, data_loader): 'Test model with a single gpu.\n\n This method tests model with a single gpu and displays test progress bar.\n\n Args:\n model (nn.Module): Model to be tested.\n data_loader (nn.Dataloader): Pytorch data loader.\n\n Returns:\n list: The prediction results.\n ' model.eval() results = [] dataset = data_loader.dataset prog_bar = mmcv.ProgressBar(len(dataset)) for data in data_loader: with torch.no_grad(): result = model(return_loss=False, **data) results.extend(result) batch_size = len(result) for _ in range(batch_size): prog_bar.update() return results
def multi_gpu_test(model, data_loader, tmpdir=None, gpu_collect=False): 'Test model with multiple gpus.\n\n This method tests model with multiple gpus and collects the results\n under two different modes: gpu and cpu modes. By setting\n ``gpu_collect=True``, it encodes results to gpu tensors and use gpu\n communication for results collection. On cpu mode it saves the results on\n different gpus to ``tmpdir`` and collects them by the rank 0 worker.\n\n Args:\n model (nn.Module): Model to be tested.\n data_loader (nn.Dataloader): Pytorch data loader.\n tmpdir (str): Path of directory to save the temporary results from\n different gpus under cpu mode.\n gpu_collect (bool): Option to use either gpu or cpu to collect results.\n\n Returns:\n list: The prediction results.\n ' model.eval() results = [] dataset = data_loader.dataset (rank, world_size) = get_dist_info() if (rank == 0): prog_bar = mmcv.ProgressBar(len(dataset)) time.sleep(2) for (i, data) in enumerate(data_loader): with torch.no_grad(): result = model(return_loss=False, **data) results.extend(result) if (rank == 0): batch_size = len(result) batch_size_all = (batch_size * world_size) if ((batch_size_all + prog_bar.completed) > len(dataset)): batch_size_all = (len(dataset) - prog_bar.completed) for _ in range(batch_size_all): prog_bar.update() if gpu_collect: results = collect_results_gpu(results, len(dataset)) else: results = collect_results_cpu(results, len(dataset), tmpdir) return results
def collect_results_cpu(result_part, size, tmpdir=None): 'Collect results under cpu mode.\n\n On cpu mode, this function will save the results on different gpus to\n ``tmpdir`` and collect them by the rank 0 worker.\n\n Args:\n result_part (list): Result list containing result parts\n to be collected.\n size (int): Size of the results, commonly equal to length of\n the results.\n tmpdir (str | None): temporal directory for collected results to\n store. If set to None, it will create a random temporal directory\n for it.\n\n Returns:\n list: The collected results.\n ' (rank, world_size) = get_dist_info() if (tmpdir is None): MAX_LEN = 512 dir_tensor = torch.full((MAX_LEN,), 32, dtype=torch.uint8, device='cuda') if (rank == 0): mmcv.mkdir_or_exist('.dist_test') tmpdir = tempfile.mkdtemp(dir='.dist_test') tmpdir = torch.tensor(bytearray(tmpdir.encode()), dtype=torch.uint8, device='cuda') dir_tensor[:len(tmpdir)] = tmpdir dist.broadcast(dir_tensor, 0) tmpdir = dir_tensor.cpu().numpy().tobytes().decode().rstrip() else: mmcv.mkdir_or_exist(tmpdir) mmcv.dump(result_part, osp.join(tmpdir, f'part_{rank}.pkl')) dist.barrier() if (rank != 0): return None else: part_list = [] for i in range(world_size): part_file = osp.join(tmpdir, f'part_{i}.pkl') part_result = mmcv.load(part_file) if part_result: part_list.append(part_result) ordered_results = [] for res in zip(*part_list): ordered_results.extend(list(res)) ordered_results = ordered_results[:size] shutil.rmtree(tmpdir) return ordered_results
def collect_results_gpu(result_part, size): 'Collect results under gpu mode.\n\n On gpu mode, this function will encode results to gpu tensors and use gpu\n communication for results collection.\n\n Args:\n result_part (list): Result list containing result parts\n to be collected.\n size (int): Size of the results, commonly equal to length of\n the results.\n\n Returns:\n list: The collected results.\n ' (rank, world_size) = get_dist_info() part_tensor = torch.tensor(bytearray(pickle.dumps(result_part)), dtype=torch.uint8, device='cuda') shape_tensor = torch.tensor(part_tensor.shape, device='cuda') shape_list = [shape_tensor.clone() for _ in range(world_size)] dist.all_gather(shape_list, shape_tensor) shape_max = torch.tensor(shape_list).max() part_send = torch.zeros(shape_max, dtype=torch.uint8, device='cuda') part_send[:shape_tensor[0]] = part_tensor part_recv_list = [part_tensor.new_zeros(shape_max) for _ in range(world_size)] dist.all_gather(part_recv_list, part_send) if (rank == 0): part_list = [] for (recv, shape) in zip(part_recv_list, shape_list): part_result = pickle.loads(recv[:shape[0]].cpu().numpy().tobytes()) if part_result: part_list.append(part_result) ordered_results = [] for res in zip(*part_list): ordered_results.extend(list(res)) ordered_results = ordered_results[:size] return ordered_results
class BaseStorageBackend(metaclass=ABCMeta): 'Abstract class of storage backends.\n\n All backends need to implement two apis: ``get()`` and ``get_text()``.\n ``get()`` reads the file as a byte stream and ``get_text()`` reads the file\n as texts.\n ' _allow_symlink = False @property def name(self): return self.__class__.__name__ @property def allow_symlink(self): return self._allow_symlink @abstractmethod def get(self, filepath): pass @abstractmethod def get_text(self, filepath): pass
class CephBackend(BaseStorageBackend): "Ceph storage backend (for internal use).\n\n Args:\n path_mapping (dict|None): path mapping dict from local path to Petrel\n path. When ``path_mapping={'src': 'dst'}``, ``src`` in ``filepath``\n will be replaced by ``dst``. Default: None.\n\n .. warning::\n :class:`mmcv.fileio.file_client.CephBackend` will be deprecated,\n please use :class:`mmcv.fileio.file_client.PetrelBackend` instead.\n " def __init__(self, path_mapping=None): try: import ceph except ImportError: raise ImportError('Please install ceph to enable CephBackend.') warnings.warn('CephBackend will be deprecated, please use PetrelBackend instead', DeprecationWarning) self._client = ceph.S3Client() assert (isinstance(path_mapping, dict) or (path_mapping is None)) self.path_mapping = path_mapping def get(self, filepath): filepath = str(filepath) if (self.path_mapping is not None): for (k, v) in self.path_mapping.items(): filepath = filepath.replace(k, v) value = self._client.Get(filepath) value_buf = memoryview(value) return value_buf def get_text(self, filepath, encoding=None): raise NotImplementedError
class PetrelBackend(BaseStorageBackend): "Petrel storage backend (for internal use).\n\n PetrelBackend supports reading and writing data to multiple clusters.\n If the file path contains the cluster name, PetrelBackend will read data\n from specified cluster or write data to it. Otherwise, PetrelBackend will\n access the default cluster.\n\n Args:\n path_mapping (dict, optional): Path mapping dict from local path to\n Petrel path. When ``path_mapping={'src': 'dst'}``, ``src`` in\n ``filepath`` will be replaced by ``dst``. Default: None.\n enable_mc (bool, optional): Whether to enable memcached support.\n Default: True.\n\n Examples:\n >>> filepath1 = 's3://path/of/file'\n >>> filepath2 = 'cluster-name:s3://path/of/file'\n >>> client = PetrelBackend()\n >>> client.get(filepath1) # get data from default cluster\n >>> client.get(filepath2) # get data from 'cluster-name' cluster\n " def __init__(self, path_mapping: Optional[dict]=None, enable_mc: bool=True): try: from petrel_client import client except ImportError: raise ImportError('Please install petrel_client to enable PetrelBackend.') self._client = client.Client(enable_mc=enable_mc) assert (isinstance(path_mapping, dict) or (path_mapping is None)) self.path_mapping = path_mapping def _map_path(self, filepath: Union[(str, Path)]) -> str: 'Map ``filepath`` to a string path whose prefix will be replaced by\n :attr:`self.path_mapping`.\n\n Args:\n filepath (str): Path to be mapped.\n ' filepath = str(filepath) if (self.path_mapping is not None): for (k, v) in self.path_mapping.items(): filepath = filepath.replace(k, v) return filepath def _format_path(self, filepath: str) -> str: "Convert a ``filepath`` to standard format of petrel oss.\n\n If the ``filepath`` is concatenated by ``os.path.join``, in a Windows\n environment, the ``filepath`` will be the format of\n 's3://bucket_name\\image.jpg'. By invoking :meth:`_format_path`, the\n above ``filepath`` will be converted to 's3://bucket_name/image.jpg'.\n\n Args:\n filepath (str): Path to be formatted.\n " return re.sub('\\\\+', '/', filepath) def get(self, filepath: Union[(str, Path)]) -> memoryview: "Read data from a given ``filepath`` with 'rb' mode.\n\n Args:\n filepath (str or Path): Path to read data.\n\n Returns:\n memoryview: A memory view of expected bytes object to avoid\n copying. The memoryview object can be converted to bytes by\n ``value_buf.tobytes()``.\n " filepath = self._map_path(filepath) filepath = self._format_path(filepath) value = self._client.Get(filepath) value_buf = memoryview(value) return value_buf def get_text(self, filepath: Union[(str, Path)], encoding: str='utf-8') -> str: "Read data from a given ``filepath`` with 'r' mode.\n\n Args:\n filepath (str or Path): Path to read data.\n encoding (str): The encoding format used to open the ``filepath``.\n Default: 'utf-8'.\n\n Returns:\n str: Expected text reading from ``filepath``.\n " return str(self.get(filepath), encoding=encoding) def put(self, obj: bytes, filepath: Union[(str, Path)]) -> None: 'Save data to a given ``filepath``.\n\n Args:\n obj (bytes): Data to be saved.\n filepath (str or Path): Path to write data.\n ' filepath = self._map_path(filepath) filepath = self._format_path(filepath) self._client.put(filepath, obj) def put_text(self, obj: str, filepath: Union[(str, Path)], encoding: str='utf-8') -> None: "Save data to a given ``filepath``.\n\n Args:\n obj (str): Data to be written.\n filepath (str or Path): Path to write data.\n encoding (str): The encoding format used to encode the ``obj``.\n Default: 'utf-8'.\n " self.put(bytes(obj, encoding=encoding), filepath) def remove(self, filepath: Union[(str, Path)]) -> None: 'Remove a file.\n\n Args:\n filepath (str or Path): Path to be removed.\n ' if (not has_method(self._client, 'delete')): raise NotImplementedError('Current version of Petrel Python SDK has not supported the `delete` method, please use a higher version or dev branch instead.') filepath = self._map_path(filepath) filepath = self._format_path(filepath) self._client.delete(filepath) def exists(self, filepath: Union[(str, Path)]) -> bool: 'Check whether a file path exists.\n\n Args:\n filepath (str or Path): Path to be checked whether exists.\n\n Returns:\n bool: Return ``True`` if ``filepath`` exists, ``False`` otherwise.\n ' if (not (has_method(self._client, 'contains') and has_method(self._client, 'isdir'))): raise NotImplementedError('Current version of Petrel Python SDK has not supported the `contains` and `isdir` methods, please use a higherversion or dev branch instead.') filepath = self._map_path(filepath) filepath = self._format_path(filepath) return (self._client.contains(filepath) or self._client.isdir(filepath)) def isdir(self, filepath: Union[(str, Path)]) -> bool: 'Check whether a file path is a directory.\n\n Args:\n filepath (str or Path): Path to be checked whether it is a\n directory.\n\n Returns:\n bool: Return ``True`` if ``filepath`` points to a directory,\n ``False`` otherwise.\n ' if (not has_method(self._client, 'isdir')): raise NotImplementedError('Current version of Petrel Python SDK has not supported the `isdir` method, please use a higher version or dev branch instead.') filepath = self._map_path(filepath) filepath = self._format_path(filepath) return self._client.isdir(filepath) def isfile(self, filepath: Union[(str, Path)]) -> bool: 'Check whether a file path is a file.\n\n Args:\n filepath (str or Path): Path to be checked whether it is a file.\n\n Returns:\n bool: Return ``True`` if ``filepath`` points to a file, ``False``\n otherwise.\n ' if (not has_method(self._client, 'contains')): raise NotImplementedError('Current version of Petrel Python SDK has not supported the `contains` method, please use a higher version or dev branch instead.') filepath = self._map_path(filepath) filepath = self._format_path(filepath) return self._client.contains(filepath) def join_path(self, filepath: Union[(str, Path)], *filepaths: Union[(str, Path)]) -> str: 'Concatenate all file paths.\n\n Args:\n filepath (str or Path): Path to be concatenated.\n\n Returns:\n str: The result after concatenation.\n ' filepath = self._format_path(self._map_path(filepath)) if filepath.endswith('/'): filepath = filepath[:(- 1)] formatted_paths = [filepath] for path in filepaths: formatted_paths.append(self._format_path(self._map_path(path))) return '/'.join(formatted_paths) @contextmanager def get_local_path(self, filepath: Union[(str, Path)]) -> Iterable[str]: "Download a file from ``filepath`` and return a temporary path.\n\n ``get_local_path`` is decorated by :meth:`contxtlib.contextmanager`. It\n can be called with ``with`` statement, and when exists from the\n ``with`` statement, the temporary path will be released.\n\n Args:\n filepath (str | Path): Download a file from ``filepath``.\n\n Examples:\n >>> client = PetrelBackend()\n >>> # After existing from the ``with`` clause,\n >>> # the path will be removed\n >>> with client.get_local_path('s3://path/of/your/file') as path:\n ... # do something here\n\n Yields:\n Iterable[str]: Only yield one temporary path.\n " filepath = self._map_path(filepath) filepath = self._format_path(filepath) assert self.isfile(filepath) try: f = tempfile.NamedTemporaryFile(delete=False) f.write(self.get(filepath)) f.close() (yield f.name) finally: os.remove(f.name) def list_dir_or_file(self, dir_path: Union[(str, Path)], list_dir: bool=True, list_file: bool=True, suffix: Optional[Union[(str, Tuple[str])]]=None, recursive: bool=False) -> Iterator[str]: "Scan a directory to find the interested directories or files in\n arbitrary order.\n\n Note:\n Petrel has no concept of directories but it simulates the directory\n hierarchy in the filesystem through public prefixes. In addition,\n if the returned path ends with '/', it means the path is a public\n prefix which is a logical directory.\n\n Note:\n :meth:`list_dir_or_file` returns the path relative to ``dir_path``.\n In addition, the returned path of directory will not contains the\n suffix '/' which is consistent with other backends.\n\n Args:\n dir_path (str | Path): Path of the directory.\n list_dir (bool): List the directories. Default: True.\n list_file (bool): List the path of files. Default: True.\n suffix (str or tuple[str], optional): File suffix\n that we are interested in. Default: None.\n recursive (bool): If set to True, recursively scan the\n directory. Default: False.\n\n Yields:\n Iterable[str]: A relative path to ``dir_path``.\n " if (not has_method(self._client, 'list')): raise NotImplementedError('Current version of Petrel Python SDK has not supported the `list` method, please use a higher version or dev branch instead.') dir_path = self._map_path(dir_path) dir_path = self._format_path(dir_path) if (list_dir and (suffix is not None)): raise TypeError('`list_dir` should be False when `suffix` is not None') if ((suffix is not None) and (not isinstance(suffix, (str, tuple)))): raise TypeError('`suffix` must be a string or tuple of strings') if (not dir_path.endswith('/')): dir_path += '/' root = dir_path def _list_dir_or_file(dir_path, list_dir, list_file, suffix, recursive): for path in self._client.list(dir_path): if path.endswith('/'): next_dir_path = self.join_path(dir_path, path) if list_dir: rel_dir = next_dir_path[len(root):(- 1)] (yield rel_dir) if recursive: (yield from _list_dir_or_file(next_dir_path, list_dir, list_file, suffix, recursive)) else: absolute_path = self.join_path(dir_path, path) rel_path = absolute_path[len(root):] if (((suffix is None) or rel_path.endswith(suffix)) and list_file): (yield rel_path) return _list_dir_or_file(dir_path, list_dir, list_file, suffix, recursive)
class MemcachedBackend(BaseStorageBackend): 'Memcached storage backend.\n\n Attributes:\n server_list_cfg (str): Config file for memcached server list.\n client_cfg (str): Config file for memcached client.\n sys_path (str | None): Additional path to be appended to `sys.path`.\n Default: None.\n ' def __init__(self, server_list_cfg, client_cfg, sys_path=None): if (sys_path is not None): import sys sys.path.append(sys_path) try: import mc except ImportError: raise ImportError('Please install memcached to enable MemcachedBackend.') self.server_list_cfg = server_list_cfg self.client_cfg = client_cfg self._client = mc.MemcachedClient.GetInstance(self.server_list_cfg, self.client_cfg) self._mc_buffer = mc.pyvector() def get(self, filepath): filepath = str(filepath) import mc self._client.Get(filepath, self._mc_buffer) value_buf = mc.ConvertBuffer(self._mc_buffer) return value_buf def get_text(self, filepath, encoding=None): raise NotImplementedError
class LmdbBackend(BaseStorageBackend): 'Lmdb storage backend.\n\n Args:\n db_path (str): Lmdb database path.\n readonly (bool, optional): Lmdb environment parameter. If True,\n disallow any write operations. Default: True.\n lock (bool, optional): Lmdb environment parameter. If False, when\n concurrent access occurs, do not lock the database. Default: False.\n readahead (bool, optional): Lmdb environment parameter. If False,\n disable the OS filesystem readahead mechanism, which may improve\n random read performance when a database is larger than RAM.\n Default: False.\n\n Attributes:\n db_path (str): Lmdb database path.\n ' def __init__(self, db_path, readonly=True, lock=False, readahead=False, **kwargs): try: import lmdb except ImportError: raise ImportError('Please install lmdb to enable LmdbBackend.') self.db_path = str(db_path) self._client = lmdb.open(self.db_path, readonly=readonly, lock=lock, readahead=readahead, **kwargs) def get(self, filepath): 'Get values according to the filepath.\n\n Args:\n filepath (str | obj:`Path`): Here, filepath is the lmdb key.\n ' filepath = str(filepath) with self._client.begin(write=False) as txn: value_buf = txn.get(filepath.encode('ascii')) return value_buf def get_text(self, filepath, encoding=None): raise NotImplementedError
class HardDiskBackend(BaseStorageBackend): 'Raw hard disks storage backend.' _allow_symlink = True def get(self, filepath: Union[(str, Path)]) -> bytes: "Read data from a given ``filepath`` with 'rb' mode.\n\n Args:\n filepath (str or Path): Path to read data.\n\n Returns:\n bytes: Expected bytes object.\n " with open(filepath, 'rb') as f: value_buf = f.read() return value_buf def get_text(self, filepath: Union[(str, Path)], encoding: str='utf-8') -> str: "Read data from a given ``filepath`` with 'r' mode.\n\n Args:\n filepath (str or Path): Path to read data.\n encoding (str): The encoding format used to open the ``filepath``.\n Default: 'utf-8'.\n\n Returns:\n str: Expected text reading from ``filepath``.\n " with open(filepath, 'r', encoding=encoding) as f: value_buf = f.read() return value_buf def put(self, obj: bytes, filepath: Union[(str, Path)]) -> None: "Write data to a given ``filepath`` with 'wb' mode.\n\n Note:\n ``put`` will create a directory if the directory of ``filepath``\n does not exist.\n\n Args:\n obj (bytes): Data to be written.\n filepath (str or Path): Path to write data.\n " mmcv.mkdir_or_exist(osp.dirname(filepath)) with open(filepath, 'wb') as f: f.write(obj) def put_text(self, obj: str, filepath: Union[(str, Path)], encoding: str='utf-8') -> None: "Write data to a given ``filepath`` with 'w' mode.\n\n Note:\n ``put_text`` will create a directory if the directory of\n ``filepath`` does not exist.\n\n Args:\n obj (str): Data to be written.\n filepath (str or Path): Path to write data.\n encoding (str): The encoding format used to open the ``filepath``.\n Default: 'utf-8'.\n " mmcv.mkdir_or_exist(osp.dirname(filepath)) with open(filepath, 'w', encoding=encoding) as f: f.write(obj) def remove(self, filepath: Union[(str, Path)]) -> None: 'Remove a file.\n\n Args:\n filepath (str or Path): Path to be removed.\n ' os.remove(filepath) def exists(self, filepath: Union[(str, Path)]) -> bool: 'Check whether a file path exists.\n\n Args:\n filepath (str or Path): Path to be checked whether exists.\n\n Returns:\n bool: Return ``True`` if ``filepath`` exists, ``False`` otherwise.\n ' return osp.exists(filepath) def isdir(self, filepath: Union[(str, Path)]) -> bool: 'Check whether a file path is a directory.\n\n Args:\n filepath (str or Path): Path to be checked whether it is a\n directory.\n\n Returns:\n bool: Return ``True`` if ``filepath`` points to a directory,\n ``False`` otherwise.\n ' return osp.isdir(filepath) def isfile(self, filepath: Union[(str, Path)]) -> bool: 'Check whether a file path is a file.\n\n Args:\n filepath (str or Path): Path to be checked whether it is a file.\n\n Returns:\n bool: Return ``True`` if ``filepath`` points to a file, ``False``\n otherwise.\n ' return osp.isfile(filepath) def join_path(self, filepath: Union[(str, Path)], *filepaths: Union[(str, Path)]) -> str: 'Concatenate all file paths.\n\n Join one or more filepath components intelligently. The return value\n is the concatenation of filepath and any members of *filepaths.\n\n Args:\n filepath (str or Path): Path to be concatenated.\n\n Returns:\n str: The result of concatenation.\n ' return osp.join(filepath, *filepaths) @contextmanager def get_local_path(self, filepath: Union[(str, Path)]) -> Iterable[Union[(str, Path)]]: 'Only for unified API and do nothing.' (yield filepath) def list_dir_or_file(self, dir_path: Union[(str, Path)], list_dir: bool=True, list_file: bool=True, suffix: Optional[Union[(str, Tuple[str])]]=None, recursive: bool=False) -> Iterator[str]: 'Scan a directory to find the interested directories or files in\n arbitrary order.\n\n Note:\n :meth:`list_dir_or_file` returns the path relative to ``dir_path``.\n\n Args:\n dir_path (str | Path): Path of the directory.\n list_dir (bool): List the directories. Default: True.\n list_file (bool): List the path of files. Default: True.\n suffix (str or tuple[str], optional): File suffix\n that we are interested in. Default: None.\n recursive (bool): If set to True, recursively scan the\n directory. Default: False.\n\n Yields:\n Iterable[str]: A relative path to ``dir_path``.\n ' if (list_dir and (suffix is not None)): raise TypeError('`suffix` should be None when `list_dir` is True') if ((suffix is not None) and (not isinstance(suffix, (str, tuple)))): raise TypeError('`suffix` must be a string or tuple of strings') root = dir_path def _list_dir_or_file(dir_path, list_dir, list_file, suffix, recursive): for entry in os.scandir(dir_path): if ((not entry.name.startswith('.')) and entry.is_file()): rel_path = osp.relpath(entry.path, root) if (((suffix is None) or rel_path.endswith(suffix)) and list_file): (yield rel_path) elif osp.isdir(entry.path): if list_dir: rel_dir = osp.relpath(entry.path, root) (yield rel_dir) if recursive: (yield from _list_dir_or_file(entry.path, list_dir, list_file, suffix, recursive)) return _list_dir_or_file(dir_path, list_dir, list_file, suffix, recursive)
class HTTPBackend(BaseStorageBackend): 'HTTP and HTTPS storage bachend.' def get(self, filepath): value_buf = urlopen(filepath).read() return value_buf def get_text(self, filepath, encoding='utf-8'): value_buf = urlopen(filepath).read() return value_buf.decode(encoding) @contextmanager def get_local_path(self, filepath: str) -> Iterable[str]: "Download a file from ``filepath``.\n\n ``get_local_path`` is decorated by :meth:`contxtlib.contextmanager`. It\n can be called with ``with`` statement, and when exists from the\n ``with`` statement, the temporary path will be released.\n\n Args:\n filepath (str): Download a file from ``filepath``.\n\n Examples:\n >>> client = HTTPBackend()\n >>> # After existing from the ``with`` clause,\n >>> # the path will be removed\n >>> with client.get_local_path('http://path/of/your/file') as path:\n ... # do something here\n " try: f = tempfile.NamedTemporaryFile(delete=False) f.write(self.get(filepath)) f.close() (yield f.name) finally: os.remove(f.name)
class FileClient(): 'A general file client to access files in different backends.\n\n The client loads a file or text in a specified backend from its path\n and returns it as a binary or text file. There are two ways to choose a\n backend, the name of backend and the prefix of path. Although both of them\n can be used to choose a storage backend, ``backend`` has a higher priority\n that is if they are all set, the storage backend will be chosen by the\n backend argument. If they are all `None`, the disk backend will be chosen.\n Note that It can also register other backend accessor with a given name,\n prefixes, and backend class. In addition, We use the singleton pattern to\n avoid repeated object creation. If the arguments are the same, the same\n object will be returned.\n\n Args:\n backend (str, optional): The storage backend type. Options are "disk",\n "ceph", "memcached", "lmdb", "http" and "petrel". Default: None.\n prefix (str, optional): The prefix of the registered storage backend.\n Options are "s3", "http", "https". Default: None.\n\n Examples:\n >>> # only set backend\n >>> file_client = FileClient(backend=\'petrel\')\n >>> # only set prefix\n >>> file_client = FileClient(prefix=\'s3\')\n >>> # set both backend and prefix but use backend to choose client\n >>> file_client = FileClient(backend=\'petrel\', prefix=\'s3\')\n >>> # if the arguments are the same, the same object is returned\n >>> file_client1 = FileClient(backend=\'petrel\')\n >>> file_client1 is file_client\n True\n\n Attributes:\n client (:obj:`BaseStorageBackend`): The backend object.\n ' _backends = {'disk': HardDiskBackend, 'ceph': CephBackend, 'memcached': MemcachedBackend, 'lmdb': LmdbBackend, 'petrel': PetrelBackend, 'http': HTTPBackend} _overridden_backends = set() _prefix_to_backends = {'s3': PetrelBackend, 'http': HTTPBackend, 'https': HTTPBackend} _overridden_prefixes = set() _instances = {} def __new__(cls, backend=None, prefix=None, **kwargs): if ((backend is None) and (prefix is None)): backend = 'disk' if ((backend is not None) and (backend not in cls._backends)): raise ValueError(f'Backend {backend} is not supported. Currently supported ones are {list(cls._backends.keys())}') if ((prefix is not None) and (prefix not in cls._prefix_to_backends)): raise ValueError(f'prefix {prefix} is not supported. Currently supported ones are {list(cls._prefix_to_backends.keys())}') arg_key = f'{backend}:{prefix}' for (key, value) in kwargs.items(): arg_key += f':{key}:{value}' if ((arg_key in cls._instances) and (backend not in cls._overridden_backends) and (prefix not in cls._overridden_prefixes)): _instance = cls._instances[arg_key] else: _instance = super().__new__(cls) if (backend is not None): _instance.client = cls._backends[backend](**kwargs) else: _instance.client = cls._prefix_to_backends[prefix](**kwargs) cls._instances[arg_key] = _instance return _instance @property def name(self): return self.client.name @property def allow_symlink(self): return self.client.allow_symlink @staticmethod def parse_uri_prefix(uri: Union[(str, Path)]) -> Optional[str]: "Parse the prefix of a uri.\n\n Args:\n uri (str | Path): Uri to be parsed that contains the file prefix.\n\n Examples:\n >>> FileClient.parse_uri_prefix('s3://path/of/your/file')\n 's3'\n\n Returns:\n str | None: Return the prefix of uri if the uri contains '://' else\n ``None``.\n " assert is_filepath(uri) uri = str(uri) if ('://' not in uri): return None else: (prefix, _) = uri.split('://') if (':' in prefix): (_, prefix) = prefix.split(':') return prefix @classmethod def infer_client(cls, file_client_args: Optional[dict]=None, uri: Optional[Union[(str, Path)]]=None) -> 'FileClient': "Infer a suitable file client based on the URI and arguments.\n\n Args:\n file_client_args (dict, optional): Arguments to instantiate a\n FileClient. Default: None.\n uri (str | Path, optional): Uri to be parsed that contains the file\n prefix. Default: None.\n\n Examples:\n >>> uri = 's3://path/of/your/file'\n >>> file_client = FileClient.infer_client(uri=uri)\n >>> file_client_args = {'backend': 'petrel'}\n >>> file_client = FileClient.infer_client(file_client_args)\n\n Returns:\n FileClient: Instantiated FileClient object.\n " assert ((file_client_args is not None) or (uri is not None)) if (file_client_args is None): file_prefix = cls.parse_uri_prefix(uri) return cls(prefix=file_prefix) else: return cls(**file_client_args) @classmethod def _register_backend(cls, name, backend, force=False, prefixes=None): if (not isinstance(name, str)): raise TypeError(f'the backend name should be a string, but got {type(name)}') if (not inspect.isclass(backend)): raise TypeError(f'backend should be a class but got {type(backend)}') if (not issubclass(backend, BaseStorageBackend)): raise TypeError(f'backend {backend} is not a subclass of BaseStorageBackend') if ((not force) and (name in cls._backends)): raise KeyError(f'{name} is already registered as a storage backend, add "force=True" if you want to override it') if ((name in cls._backends) and force): cls._overridden_backends.add(name) cls._backends[name] = backend if (prefixes is not None): if isinstance(prefixes, str): prefixes = [prefixes] else: assert isinstance(prefixes, (list, tuple)) for prefix in prefixes: if (prefix not in cls._prefix_to_backends): cls._prefix_to_backends[prefix] = backend elif ((prefix in cls._prefix_to_backends) and force): cls._overridden_prefixes.add(prefix) cls._prefix_to_backends[prefix] = backend else: raise KeyError(f'{prefix} is already registered as a storage backend, add "force=True" if you want to override it') @classmethod def register_backend(cls, name, backend=None, force=False, prefixes=None): "Register a backend to FileClient.\n\n This method can be used as a normal class method or a decorator.\n\n .. code-block:: python\n\n class NewBackend(BaseStorageBackend):\n\n def get(self, filepath):\n return filepath\n\n def get_text(self, filepath):\n return filepath\n\n FileClient.register_backend('new', NewBackend)\n\n or\n\n .. code-block:: python\n\n @FileClient.register_backend('new')\n class NewBackend(BaseStorageBackend):\n\n def get(self, filepath):\n return filepath\n\n def get_text(self, filepath):\n return filepath\n\n Args:\n name (str): The name of the registered backend.\n backend (class, optional): The backend class to be registered,\n which must be a subclass of :class:`BaseStorageBackend`.\n When this method is used as a decorator, backend is None.\n Defaults to None.\n force (bool, optional): Whether to override the backend if the name\n has already been registered. Defaults to False.\n prefixes (str or list[str] or tuple[str], optional): The prefixes\n of the registered storage backend. Default: None.\n `New in version 1.3.15.`\n " if (backend is not None): cls._register_backend(name, backend, force=force, prefixes=prefixes) return def _register(backend_cls): cls._register_backend(name, backend_cls, force=force, prefixes=prefixes) return backend_cls return _register def get(self, filepath: Union[(str, Path)]) -> Union[(bytes, memoryview)]: "Read data from a given ``filepath`` with 'rb' mode.\n\n Note:\n There are two types of return values for ``get``, one is ``bytes``\n and the other is ``memoryview``. The advantage of using memoryview\n is that you can avoid copying, and if you want to convert it to\n ``bytes``, you can use ``.tobytes()``.\n\n Args:\n filepath (str or Path): Path to read data.\n\n Returns:\n bytes | memoryview: Expected bytes object or a memory view of the\n bytes object.\n " return self.client.get(filepath) def get_text(self, filepath: Union[(str, Path)], encoding='utf-8') -> str: "Read data from a given ``filepath`` with 'r' mode.\n\n Args:\n filepath (str or Path): Path to read data.\n encoding (str): The encoding format used to open the ``filepath``.\n Default: 'utf-8'.\n\n Returns:\n str: Expected text reading from ``filepath``.\n " return self.client.get_text(filepath, encoding) def put(self, obj: bytes, filepath: Union[(str, Path)]) -> None: "Write data to a given ``filepath`` with 'wb' mode.\n\n Note:\n ``put`` should create a directory if the directory of ``filepath``\n does not exist.\n\n Args:\n obj (bytes): Data to be written.\n filepath (str or Path): Path to write data.\n " self.client.put(obj, filepath) def put_text(self, obj: str, filepath: Union[(str, Path)]) -> None: "Write data to a given ``filepath`` with 'w' mode.\n\n Note:\n ``put_text`` should create a directory if the directory of\n ``filepath`` does not exist.\n\n Args:\n obj (str): Data to be written.\n filepath (str or Path): Path to write data.\n encoding (str, optional): The encoding format used to open the\n `filepath`. Default: 'utf-8'.\n " self.client.put_text(obj, filepath) def remove(self, filepath: Union[(str, Path)]) -> None: 'Remove a file.\n\n Args:\n filepath (str, Path): Path to be removed.\n ' self.client.remove(filepath) def exists(self, filepath: Union[(str, Path)]) -> bool: 'Check whether a file path exists.\n\n Args:\n filepath (str or Path): Path to be checked whether exists.\n\n Returns:\n bool: Return ``True`` if ``filepath`` exists, ``False`` otherwise.\n ' return self.client.exists(filepath) def isdir(self, filepath: Union[(str, Path)]) -> bool: 'Check whether a file path is a directory.\n\n Args:\n filepath (str or Path): Path to be checked whether it is a\n directory.\n\n Returns:\n bool: Return ``True`` if ``filepath`` points to a directory,\n ``False`` otherwise.\n ' return self.client.isdir(filepath) def isfile(self, filepath: Union[(str, Path)]) -> bool: 'Check whether a file path is a file.\n\n Args:\n filepath (str or Path): Path to be checked whether it is a file.\n\n Returns:\n bool: Return ``True`` if ``filepath`` points to a file, ``False``\n otherwise.\n ' return self.client.isfile(filepath) def join_path(self, filepath: Union[(str, Path)], *filepaths: Union[(str, Path)]) -> str: 'Concatenate all file paths.\n\n Join one or more filepath components intelligently. The return value\n is the concatenation of filepath and any members of *filepaths.\n\n Args:\n filepath (str or Path): Path to be concatenated.\n\n Returns:\n str: The result of concatenation.\n ' return self.client.join_path(filepath, *filepaths) @contextmanager def get_local_path(self, filepath: Union[(str, Path)]) -> Iterable[str]: "Download data from ``filepath`` and write the data to local path.\n\n ``get_local_path`` is decorated by :meth:`contxtlib.contextmanager`. It\n can be called with ``with`` statement, and when exists from the\n ``with`` statement, the temporary path will be released.\n\n Note:\n If the ``filepath`` is a local path, just return itself.\n\n .. warning::\n ``get_local_path`` is an experimental interface that may change in\n the future.\n\n Args:\n filepath (str or Path): Path to be read data.\n\n Examples:\n >>> file_client = FileClient(prefix='s3')\n >>> with file_client.get_local_path('s3://bucket/abc.jpg') as path:\n ... # do something here\n\n Yields:\n Iterable[str]: Only yield one path.\n " with self.client.get_local_path(str(filepath)) as local_path: (yield local_path) def list_dir_or_file(self, dir_path: Union[(str, Path)], list_dir: bool=True, list_file: bool=True, suffix: Optional[Union[(str, Tuple[str])]]=None, recursive: bool=False) -> Iterator[str]: 'Scan a directory to find the interested directories or files in\n arbitrary order.\n\n Note:\n :meth:`list_dir_or_file` returns the path relative to ``dir_path``.\n\n Args:\n dir_path (str | Path): Path of the directory.\n list_dir (bool): List the directories. Default: True.\n list_file (bool): List the path of files. Default: True.\n suffix (str or tuple[str], optional): File suffix\n that we are interested in. Default: None.\n recursive (bool): If set to True, recursively scan the\n directory. Default: False.\n\n Yields:\n Iterable[str]: A relative path to ``dir_path``.\n ' (yield from self.client.list_dir_or_file(dir_path, list_dir, list_file, suffix, recursive))
class BaseFileHandler(metaclass=ABCMeta): str_like = True @abstractmethod def load_from_fileobj(self, file, **kwargs): pass @abstractmethod def dump_to_fileobj(self, obj, file, **kwargs): pass @abstractmethod def dump_to_str(self, obj, **kwargs): pass def load_from_path(self, filepath, mode='r', **kwargs): with open(filepath, mode) as f: return self.load_from_fileobj(f, **kwargs) def dump_to_path(self, obj, filepath, mode='w', **kwargs): with open(filepath, mode) as f: self.dump_to_fileobj(obj, f, **kwargs)