code
stringlengths
17
6.64M
class MishJitAutoFn(torch.autograd.Function): ' Mish: A Self Regularized Non-Monotonic Neural Activation Function - https://arxiv.org/abs/1908.08681\n A memory efficient, jit scripted variant of Mish\n ' @staticmethod def forward(ctx, x): ctx.save_for_backward(x) return mish_jit_fwd(x) @staticmethod def backward(ctx, grad_output): x = ctx.saved_tensors[0] return mish_jit_bwd(x, grad_output)
def mish_me(x, inplace=False): return MishJitAutoFn.apply(x)
class MishMe(nn.Module): def __init__(self, inplace: bool=False): super(MishMe, self).__init__() def forward(self, x): return MishJitAutoFn.apply(x)
@torch.jit.script def hard_sigmoid_jit_fwd(x, inplace: bool=False): return (x + 3).clamp(min=0, max=6).div(6.0)
@torch.jit.script def hard_sigmoid_jit_bwd(x, grad_output): m = ((torch.ones_like(x) * ((x >= (- 3.0)) & (x <= 3.0))) / 6.0) return (grad_output * m)
class HardSigmoidJitAutoFn(torch.autograd.Function): @staticmethod def forward(ctx, x): ctx.save_for_backward(x) return hard_sigmoid_jit_fwd(x) @staticmethod def backward(ctx, grad_output): x = ctx.saved_tensors[0] return hard_sigmoid_jit_bwd(x, grad_output)
def hard_sigmoid_me(x, inplace: bool=False): return HardSigmoidJitAutoFn.apply(x)
class HardSigmoidMe(nn.Module): def __init__(self, inplace: bool=False): super(HardSigmoidMe, self).__init__() def forward(self, x): return HardSigmoidJitAutoFn.apply(x)
@torch.jit.script def hard_swish_jit_fwd(x): return (x * (x + 3).clamp(min=0, max=6).div(6.0))
@torch.jit.script def hard_swish_jit_bwd(x, grad_output): m = (torch.ones_like(x) * (x >= 3.0)) m = torch.where(((x >= (- 3.0)) & (x <= 3.0)), ((x / 3.0) + 0.5), m) return (grad_output * m)
class HardSwishJitAutoFn(torch.autograd.Function): 'A memory efficient, jit-scripted HardSwish activation' @staticmethod def forward(ctx, x): ctx.save_for_backward(x) return hard_swish_jit_fwd(x) @staticmethod def backward(ctx, grad_output): x = ctx.saved_tensors[0] return hard_swish_jit_bwd(x, grad_output)
def hard_swish_me(x, inplace=False): return HardSwishJitAutoFn.apply(x)
class HardSwishMe(nn.Module): def __init__(self, inplace: bool=False): super(HardSwishMe, self).__init__() def forward(self, x): return HardSwishJitAutoFn.apply(x)
def is_no_jit(): return _NO_JIT
class set_no_jit(): def __init__(self, mode: bool) -> None: global _NO_JIT self.prev = _NO_JIT _NO_JIT = mode def __enter__(self) -> None: pass def __exit__(self, *args: Any) -> bool: global _NO_JIT _NO_JIT = self.prev return False
def is_exportable(): return _EXPORTABLE
class set_exportable(): def __init__(self, mode: bool) -> None: global _EXPORTABLE self.prev = _EXPORTABLE _EXPORTABLE = mode def __enter__(self) -> None: pass def __exit__(self, *args: Any) -> bool: global _EXPORTABLE _EXPORTABLE = self.prev return False
def is_scriptable(): return _SCRIPTABLE
class set_scriptable(): def __init__(self, mode: bool) -> None: global _SCRIPTABLE self.prev = _SCRIPTABLE _SCRIPTABLE = mode def __enter__(self) -> None: pass def __exit__(self, *args: Any) -> bool: global _SCRIPTABLE _SCRIPTABLE = self.prev return False
class set_layer_config(): ' Layer config context manager that allows setting all layer config flags at once.\n If a flag arg is None, it will not change the current value.\n ' def __init__(self, scriptable: Optional[bool]=None, exportable: Optional[bool]=None, no_jit: Optional[bool]=None, no_activation_jit: Optional[bool]=None): global _SCRIPTABLE global _EXPORTABLE global _NO_JIT global _NO_ACTIVATION_JIT self.prev = (_SCRIPTABLE, _EXPORTABLE, _NO_JIT, _NO_ACTIVATION_JIT) if (scriptable is not None): _SCRIPTABLE = scriptable if (exportable is not None): _EXPORTABLE = exportable if (no_jit is not None): _NO_JIT = no_jit if (no_activation_jit is not None): _NO_ACTIVATION_JIT = no_activation_jit def __enter__(self) -> None: pass def __exit__(self, *args: Any) -> bool: global _SCRIPTABLE global _EXPORTABLE global _NO_JIT global _NO_ACTIVATION_JIT (_SCRIPTABLE, _EXPORTABLE, _NO_JIT, _NO_ACTIVATION_JIT) = self.prev return False
def layer_config_kwargs(kwargs): ' Consume config kwargs and return contextmgr obj ' return set_layer_config(scriptable=kwargs.pop('scriptable', None), exportable=kwargs.pop('exportable', None), no_jit=kwargs.pop('no_jit', None))
def _ntuple(n): def parse(x): if isinstance(x, collections.abc.Iterable): return x return tuple(repeat(x, n)) return parse
def _is_static_pad(kernel_size, stride=1, dilation=1, **_): return ((stride == 1) and (((dilation * (kernel_size - 1)) % 2) == 0))
def _get_padding(kernel_size, stride=1, dilation=1, **_): padding = (((stride - 1) + (dilation * (kernel_size - 1))) // 2) return padding
def _calc_same_pad(i: int, k: int, s: int, d: int): return max(((((((- (i // (- s))) - 1) * s) + ((k - 1) * d)) + 1) - i), 0)
def _same_pad_arg(input_size, kernel_size, stride, dilation): (ih, iw) = input_size (kh, kw) = kernel_size pad_h = _calc_same_pad(ih, kh, stride[0], dilation[0]) pad_w = _calc_same_pad(iw, kw, stride[1], dilation[1]) return [(pad_w // 2), (pad_w - (pad_w // 2)), (pad_h // 2), (pad_h - (pad_h // 2))]
def _split_channels(num_chan, num_groups): split = [(num_chan // num_groups) for _ in range(num_groups)] split[0] += (num_chan - sum(split)) return split
def conv2d_same(x, weight: torch.Tensor, bias: Optional[torch.Tensor]=None, stride: Tuple[(int, int)]=(1, 1), padding: Tuple[(int, int)]=(0, 0), dilation: Tuple[(int, int)]=(1, 1), groups: int=1): (ih, iw) = x.size()[(- 2):] (kh, kw) = weight.size()[(- 2):] pad_h = _calc_same_pad(ih, kh, stride[0], dilation[0]) pad_w = _calc_same_pad(iw, kw, stride[1], dilation[1]) x = F.pad(x, [(pad_w // 2), (pad_w - (pad_w // 2)), (pad_h // 2), (pad_h - (pad_h // 2))]) return F.conv2d(x, weight, bias, stride, (0, 0), dilation, groups)
class Conv2dSame(nn.Conv2d): " Tensorflow like 'SAME' convolution wrapper for 2D convolutions\n " def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True): super(Conv2dSame, self).__init__(in_channels, out_channels, kernel_size, stride, 0, dilation, groups, bias) def forward(self, x): return conv2d_same(x, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups)
class Conv2dSameExport(nn.Conv2d): " ONNX export friendly Tensorflow like 'SAME' convolution wrapper for 2D convolutions\n\n NOTE: This does not currently work with torch.jit.script\n " def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True): super(Conv2dSameExport, self).__init__(in_channels, out_channels, kernel_size, stride, 0, dilation, groups, bias) self.pad = None self.pad_input_size = (0, 0) def forward(self, x): input_size = x.size()[(- 2):] if (self.pad is None): pad_arg = _same_pad_arg(input_size, self.weight.size()[(- 2):], self.stride, self.dilation) self.pad = nn.ZeroPad2d(pad_arg) self.pad_input_size = input_size if (self.pad is not None): x = self.pad(x) return F.conv2d(x, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups)
def get_padding_value(padding, kernel_size, **kwargs): dynamic = False if isinstance(padding, str): padding = padding.lower() if (padding == 'same'): if _is_static_pad(kernel_size, **kwargs): padding = _get_padding(kernel_size, **kwargs) else: padding = 0 dynamic = True elif (padding == 'valid'): padding = 0 else: padding = _get_padding(kernel_size, **kwargs) return (padding, dynamic)
def create_conv2d_pad(in_chs, out_chs, kernel_size, **kwargs): padding = kwargs.pop('padding', '') kwargs.setdefault('bias', False) (padding, is_dynamic) = get_padding_value(padding, kernel_size, **kwargs) if is_dynamic: if is_exportable(): assert (not is_scriptable()) return Conv2dSameExport(in_chs, out_chs, kernel_size, **kwargs) else: return Conv2dSame(in_chs, out_chs, kernel_size, **kwargs) else: return nn.Conv2d(in_chs, out_chs, kernel_size, padding=padding, **kwargs)
class MixedConv2d(nn.ModuleDict): ' Mixed Grouped Convolution\n Based on MDConv and GroupedConv in MixNet impl:\n https://github.com/tensorflow/tpu/blob/master/models/official/mnasnet/mixnet/custom_layers.py\n ' def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, padding='', dilation=1, depthwise=False, **kwargs): super(MixedConv2d, self).__init__() kernel_size = (kernel_size if isinstance(kernel_size, list) else [kernel_size]) num_groups = len(kernel_size) in_splits = _split_channels(in_channels, num_groups) out_splits = _split_channels(out_channels, num_groups) self.in_channels = sum(in_splits) self.out_channels = sum(out_splits) for (idx, (k, in_ch, out_ch)) in enumerate(zip(kernel_size, in_splits, out_splits)): conv_groups = (out_ch if depthwise else 1) self.add_module(str(idx), create_conv2d_pad(in_ch, out_ch, k, stride=stride, padding=padding, dilation=dilation, groups=conv_groups, **kwargs)) self.splits = in_splits def forward(self, x): x_split = torch.split(x, self.splits, 1) x_out = [conv(x_split[i]) for (i, conv) in enumerate(self.values())] x = torch.cat(x_out, 1) return x
def get_condconv_initializer(initializer, num_experts, expert_shape): def condconv_initializer(weight): 'CondConv initializer function.' num_params = np.prod(expert_shape) if ((len(weight.shape) != 2) or (weight.shape[0] != num_experts) or (weight.shape[1] != num_params)): raise ValueError('CondConv variables must have shape [num_experts, num_params]') for i in range(num_experts): initializer(weight[i].view(expert_shape)) return condconv_initializer
class CondConv2d(nn.Module): ' Conditional Convolution\n Inspired by: https://github.com/tensorflow/tpu/blob/master/models/official/efficientnet/condconv/condconv_layers.py\n\n Grouped convolution hackery for parallel execution of the per-sample kernel filters inspired by this discussion:\n https://github.com/pytorch/pytorch/issues/17983\n ' __constants__ = ['bias', 'in_channels', 'out_channels', 'dynamic_padding'] def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, padding='', dilation=1, groups=1, bias=False, num_experts=4): super(CondConv2d, self).__init__() self.in_channels = in_channels self.out_channels = out_channels self.kernel_size = _pair(kernel_size) self.stride = _pair(stride) (padding_val, is_padding_dynamic) = get_padding_value(padding, kernel_size, stride=stride, dilation=dilation) self.dynamic_padding = is_padding_dynamic self.padding = _pair(padding_val) self.dilation = _pair(dilation) self.groups = groups self.num_experts = num_experts self.weight_shape = ((self.out_channels, (self.in_channels // self.groups)) + self.kernel_size) weight_num_param = 1 for wd in self.weight_shape: weight_num_param *= wd self.weight = torch.nn.Parameter(torch.Tensor(self.num_experts, weight_num_param)) if bias: self.bias_shape = (self.out_channels,) self.bias = torch.nn.Parameter(torch.Tensor(self.num_experts, self.out_channels)) else: self.register_parameter('bias', None) self.reset_parameters() def reset_parameters(self): init_weight = get_condconv_initializer(partial(nn.init.kaiming_uniform_, a=math.sqrt(5)), self.num_experts, self.weight_shape) init_weight(self.weight) if (self.bias is not None): fan_in = np.prod(self.weight_shape[1:]) bound = (1 / math.sqrt(fan_in)) init_bias = get_condconv_initializer(partial(nn.init.uniform_, a=(- bound), b=bound), self.num_experts, self.bias_shape) init_bias(self.bias) def forward(self, x, routing_weights): (B, C, H, W) = x.shape weight = torch.matmul(routing_weights, self.weight) new_weight_shape = (((B * self.out_channels), (self.in_channels // self.groups)) + self.kernel_size) weight = weight.view(new_weight_shape) bias = None if (self.bias is not None): bias = torch.matmul(routing_weights, self.bias) bias = bias.view((B * self.out_channels)) x = x.view(1, (B * C), H, W) if self.dynamic_padding: out = conv2d_same(x, weight, bias, stride=self.stride, padding=self.padding, dilation=self.dilation, groups=(self.groups * B)) else: out = F.conv2d(x, weight, bias, stride=self.stride, padding=self.padding, dilation=self.dilation, groups=(self.groups * B)) out = out.permute([1, 0, 2, 3]).view(B, self.out_channels, out.shape[(- 2)], out.shape[(- 1)]) return out
def select_conv2d(in_chs, out_chs, kernel_size, **kwargs): assert ('groups' not in kwargs) if isinstance(kernel_size, list): assert ('num_experts' not in kwargs) m = MixedConv2d(in_chs, out_chs, kernel_size, **kwargs) else: depthwise = kwargs.pop('depthwise', False) groups = (out_chs if depthwise else 1) if (('num_experts' in kwargs) and (kwargs['num_experts'] > 0)): m = CondConv2d(in_chs, out_chs, kernel_size, groups=groups, **kwargs) else: m = create_conv2d_pad(in_chs, out_chs, kernel_size, groups=groups, **kwargs) return m
def get_bn_args_tf(): return _BN_ARGS_TF.copy()
def resolve_bn_args(kwargs): bn_args = (get_bn_args_tf() if kwargs.pop('bn_tf', False) else {}) bn_momentum = kwargs.pop('bn_momentum', None) if (bn_momentum is not None): bn_args['momentum'] = bn_momentum bn_eps = kwargs.pop('bn_eps', None) if (bn_eps is not None): bn_args['eps'] = bn_eps return bn_args
def resolve_se_args(kwargs, in_chs, act_layer=None): se_kwargs = (kwargs.copy() if (kwargs is not None) else {}) for (k, v) in _SE_ARGS_DEFAULT.items(): se_kwargs.setdefault(k, v) if (not se_kwargs.pop('reduce_mid')): se_kwargs['reduced_base_chs'] = in_chs if (se_kwargs['act_layer'] is None): assert (act_layer is not None) se_kwargs['act_layer'] = act_layer return se_kwargs
def resolve_act_layer(kwargs, default='relu'): act_layer = kwargs.pop('act_layer', default) if isinstance(act_layer, str): act_layer = get_act_layer(act_layer) return act_layer
def make_divisible(v: int, divisor: int=8, min_value: int=None): min_value = (min_value or divisor) new_v = max(min_value, ((int((v + (divisor / 2))) // divisor) * divisor)) if (new_v < (0.9 * v)): new_v += divisor return new_v
def round_channels(channels, multiplier=1.0, divisor=8, channel_min=None): 'Round number of filters based on depth multiplier.' if (not multiplier): return channels channels *= multiplier return make_divisible(channels, divisor, channel_min)
def drop_connect(inputs, training: bool=False, drop_connect_rate: float=0.0): 'Apply drop connect.' if (not training): return inputs keep_prob = (1 - drop_connect_rate) random_tensor = (keep_prob + torch.rand((inputs.size()[0], 1, 1, 1), dtype=inputs.dtype, device=inputs.device)) random_tensor.floor_() output = (inputs.div(keep_prob) * random_tensor) return output
class SqueezeExcite(nn.Module): def __init__(self, in_chs, se_ratio=0.25, reduced_base_chs=None, act_layer=nn.ReLU, gate_fn=sigmoid, divisor=1): super(SqueezeExcite, self).__init__() reduced_chs = make_divisible(((reduced_base_chs or in_chs) * se_ratio), divisor) self.conv_reduce = nn.Conv2d(in_chs, reduced_chs, 1, bias=True) self.act1 = act_layer(inplace=True) self.conv_expand = nn.Conv2d(reduced_chs, in_chs, 1, bias=True) self.gate_fn = gate_fn def forward(self, x): x_se = x.mean((2, 3), keepdim=True) x_se = self.conv_reduce(x_se) x_se = self.act1(x_se) x_se = self.conv_expand(x_se) x = (x * self.gate_fn(x_se)) return x
class ConvBnAct(nn.Module): def __init__(self, in_chs, out_chs, kernel_size, stride=1, pad_type='', act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, norm_kwargs=None): super(ConvBnAct, self).__init__() assert (stride in [1, 2]) norm_kwargs = (norm_kwargs or {}) self.conv = select_conv2d(in_chs, out_chs, kernel_size, stride=stride, padding=pad_type) self.bn1 = norm_layer(out_chs, **norm_kwargs) self.act1 = act_layer(inplace=True) def forward(self, x): x = self.conv(x) x = self.bn1(x) x = self.act1(x) return x
class DepthwiseSeparableConv(nn.Module): ' DepthwiseSeparable block\n Used for DS convs in MobileNet-V1 and in the place of IR blocks with an expansion\n factor of 1.0. This is an alternative to having a IR with optional first pw conv.\n ' def __init__(self, in_chs, out_chs, dw_kernel_size=3, stride=1, pad_type='', act_layer=nn.ReLU, noskip=False, pw_kernel_size=1, pw_act=False, se_ratio=0.0, se_kwargs=None, norm_layer=nn.BatchNorm2d, norm_kwargs=None, drop_connect_rate=0.0): super(DepthwiseSeparableConv, self).__init__() assert (stride in [1, 2]) norm_kwargs = (norm_kwargs or {}) self.has_residual = (((stride == 1) and (in_chs == out_chs)) and (not noskip)) self.drop_connect_rate = drop_connect_rate self.conv_dw = select_conv2d(in_chs, in_chs, dw_kernel_size, stride=stride, padding=pad_type, depthwise=True) self.bn1 = norm_layer(in_chs, **norm_kwargs) self.act1 = act_layer(inplace=True) if ((se_ratio is not None) and (se_ratio > 0.0)): se_kwargs = resolve_se_args(se_kwargs, in_chs, act_layer) self.se = SqueezeExcite(in_chs, se_ratio=se_ratio, **se_kwargs) else: self.se = nn.Identity() self.conv_pw = select_conv2d(in_chs, out_chs, pw_kernel_size, padding=pad_type) self.bn2 = norm_layer(out_chs, **norm_kwargs) self.act2 = (act_layer(inplace=True) if pw_act else nn.Identity()) def forward(self, x): residual = x x = self.conv_dw(x) x = self.bn1(x) x = self.act1(x) x = self.se(x) x = self.conv_pw(x) x = self.bn2(x) x = self.act2(x) if self.has_residual: if (self.drop_connect_rate > 0.0): x = drop_connect(x, self.training, self.drop_connect_rate) x += residual return x
class InvertedResidual(nn.Module): ' Inverted residual block w/ optional SE' def __init__(self, in_chs, out_chs, dw_kernel_size=3, stride=1, pad_type='', act_layer=nn.ReLU, noskip=False, exp_ratio=1.0, exp_kernel_size=1, pw_kernel_size=1, se_ratio=0.0, se_kwargs=None, norm_layer=nn.BatchNorm2d, norm_kwargs=None, conv_kwargs=None, drop_connect_rate=0.0): super(InvertedResidual, self).__init__() norm_kwargs = (norm_kwargs or {}) conv_kwargs = (conv_kwargs or {}) mid_chs: int = make_divisible((in_chs * exp_ratio)) self.has_residual = (((in_chs == out_chs) and (stride == 1)) and (not noskip)) self.drop_connect_rate = drop_connect_rate self.conv_pw = select_conv2d(in_chs, mid_chs, exp_kernel_size, padding=pad_type, **conv_kwargs) self.bn1 = norm_layer(mid_chs, **norm_kwargs) self.act1 = act_layer(inplace=True) self.conv_dw = select_conv2d(mid_chs, mid_chs, dw_kernel_size, stride=stride, padding=pad_type, depthwise=True, **conv_kwargs) self.bn2 = norm_layer(mid_chs, **norm_kwargs) self.act2 = act_layer(inplace=True) if ((se_ratio is not None) and (se_ratio > 0.0)): se_kwargs = resolve_se_args(se_kwargs, in_chs, act_layer) self.se = SqueezeExcite(mid_chs, se_ratio=se_ratio, **se_kwargs) else: self.se = nn.Identity() self.conv_pwl = select_conv2d(mid_chs, out_chs, pw_kernel_size, padding=pad_type, **conv_kwargs) self.bn3 = norm_layer(out_chs, **norm_kwargs) def forward(self, x): residual = x x = self.conv_pw(x) x = self.bn1(x) x = self.act1(x) x = self.conv_dw(x) x = self.bn2(x) x = self.act2(x) x = self.se(x) x = self.conv_pwl(x) x = self.bn3(x) if self.has_residual: if (self.drop_connect_rate > 0.0): x = drop_connect(x, self.training, self.drop_connect_rate) x += residual return x
class CondConvResidual(InvertedResidual): ' Inverted residual block w/ CondConv routing' def __init__(self, in_chs, out_chs, dw_kernel_size=3, stride=1, pad_type='', act_layer=nn.ReLU, noskip=False, exp_ratio=1.0, exp_kernel_size=1, pw_kernel_size=1, se_ratio=0.0, se_kwargs=None, norm_layer=nn.BatchNorm2d, norm_kwargs=None, num_experts=0, drop_connect_rate=0.0): self.num_experts = num_experts conv_kwargs = dict(num_experts=self.num_experts) super(CondConvResidual, self).__init__(in_chs, out_chs, dw_kernel_size=dw_kernel_size, stride=stride, pad_type=pad_type, act_layer=act_layer, noskip=noskip, exp_ratio=exp_ratio, exp_kernel_size=exp_kernel_size, pw_kernel_size=pw_kernel_size, se_ratio=se_ratio, se_kwargs=se_kwargs, norm_layer=norm_layer, norm_kwargs=norm_kwargs, conv_kwargs=conv_kwargs, drop_connect_rate=drop_connect_rate) self.routing_fn = nn.Linear(in_chs, self.num_experts) def forward(self, x): residual = x pooled_inputs = F.adaptive_avg_pool2d(x, 1).flatten(1) routing_weights = torch.sigmoid(self.routing_fn(pooled_inputs)) x = self.conv_pw(x, routing_weights) x = self.bn1(x) x = self.act1(x) x = self.conv_dw(x, routing_weights) x = self.bn2(x) x = self.act2(x) x = self.se(x) x = self.conv_pwl(x, routing_weights) x = self.bn3(x) if self.has_residual: if (self.drop_connect_rate > 0.0): x = drop_connect(x, self.training, self.drop_connect_rate) x += residual return x
class EdgeResidual(nn.Module): ' EdgeTPU Residual block with expansion convolution followed by pointwise-linear w/ stride' def __init__(self, in_chs, out_chs, exp_kernel_size=3, exp_ratio=1.0, fake_in_chs=0, stride=1, pad_type='', act_layer=nn.ReLU, noskip=False, pw_kernel_size=1, se_ratio=0.0, se_kwargs=None, norm_layer=nn.BatchNorm2d, norm_kwargs=None, drop_connect_rate=0.0): super(EdgeResidual, self).__init__() norm_kwargs = (norm_kwargs or {}) mid_chs = (make_divisible((fake_in_chs * exp_ratio)) if (fake_in_chs > 0) else make_divisible((in_chs * exp_ratio))) self.has_residual = (((in_chs == out_chs) and (stride == 1)) and (not noskip)) self.drop_connect_rate = drop_connect_rate self.conv_exp = select_conv2d(in_chs, mid_chs, exp_kernel_size, padding=pad_type) self.bn1 = norm_layer(mid_chs, **norm_kwargs) self.act1 = act_layer(inplace=True) if ((se_ratio is not None) and (se_ratio > 0.0)): se_kwargs = resolve_se_args(se_kwargs, in_chs, act_layer) self.se = SqueezeExcite(mid_chs, se_ratio=se_ratio, **se_kwargs) else: self.se = nn.Identity() self.conv_pwl = select_conv2d(mid_chs, out_chs, pw_kernel_size, stride=stride, padding=pad_type) self.bn2 = nn.BatchNorm2d(out_chs, **norm_kwargs) def forward(self, x): residual = x x = self.conv_exp(x) x = self.bn1(x) x = self.act1(x) x = self.se(x) x = self.conv_pwl(x) x = self.bn2(x) if self.has_residual: if (self.drop_connect_rate > 0.0): x = drop_connect(x, self.training, self.drop_connect_rate) x += residual return x
class EfficientNetBuilder(): ' Build Trunk Blocks for Efficient/Mobile Networks\n\n This ended up being somewhat of a cross between\n https://github.com/tensorflow/tpu/blob/master/models/official/mnasnet/mnasnet_models.py\n and\n https://github.com/facebookresearch/maskrcnn-benchmark/blob/master/maskrcnn_benchmark/modeling/backbone/fbnet_builder.py\n\n ' def __init__(self, channel_multiplier=1.0, channel_divisor=8, channel_min=None, pad_type='', act_layer=None, se_kwargs=None, norm_layer=nn.BatchNorm2d, norm_kwargs=None, drop_connect_rate=0.0): self.channel_multiplier = channel_multiplier self.channel_divisor = channel_divisor self.channel_min = channel_min self.pad_type = pad_type self.act_layer = act_layer self.se_kwargs = se_kwargs self.norm_layer = norm_layer self.norm_kwargs = norm_kwargs self.drop_connect_rate = drop_connect_rate self.in_chs = None self.block_idx = 0 self.block_count = 0 def _round_channels(self, chs): return round_channels(chs, self.channel_multiplier, self.channel_divisor, self.channel_min) def _make_block(self, ba): bt = ba.pop('block_type') ba['in_chs'] = self.in_chs ba['out_chs'] = self._round_channels(ba['out_chs']) if (('fake_in_chs' in ba) and ba['fake_in_chs']): ba['fake_in_chs'] = self._round_channels(ba['fake_in_chs']) ba['norm_layer'] = self.norm_layer ba['norm_kwargs'] = self.norm_kwargs ba['pad_type'] = self.pad_type ba['act_layer'] = (ba['act_layer'] if (ba['act_layer'] is not None) else self.act_layer) assert (ba['act_layer'] is not None) if (bt == 'ir'): ba['drop_connect_rate'] = ((self.drop_connect_rate * self.block_idx) / self.block_count) ba['se_kwargs'] = self.se_kwargs if (ba.get('num_experts', 0) > 0): block = CondConvResidual(**ba) else: block = InvertedResidual(**ba) elif ((bt == 'ds') or (bt == 'dsa')): ba['drop_connect_rate'] = ((self.drop_connect_rate * self.block_idx) / self.block_count) ba['se_kwargs'] = self.se_kwargs block = DepthwiseSeparableConv(**ba) elif (bt == 'er'): ba['drop_connect_rate'] = ((self.drop_connect_rate * self.block_idx) / self.block_count) ba['se_kwargs'] = self.se_kwargs block = EdgeResidual(**ba) elif (bt == 'cn'): block = ConvBnAct(**ba) else: assert False, ('Uknkown block type (%s) while building model.' % bt) self.in_chs = ba['out_chs'] return block def _make_stack(self, stack_args): blocks = [] for (i, ba) in enumerate(stack_args): if (i >= 1): ba['stride'] = 1 block = self._make_block(ba) blocks.append(block) self.block_idx += 1 return nn.Sequential(*blocks) def __call__(self, in_chs, block_args): ' Build the blocks\n Args:\n in_chs: Number of input-channels passed to first block\n block_args: A list of lists, outer list defines stages, inner\n list contains strings defining block configuration(s)\n Return:\n List of block stacks (each stack wrapped in nn.Sequential)\n ' self.in_chs = in_chs self.block_count = sum([len(x) for x in block_args]) self.block_idx = 0 blocks = [] for (stack_idx, stack) in enumerate(block_args): assert isinstance(stack, list) stack = self._make_stack(stack) blocks.append(stack) return blocks
def _parse_ksize(ss): if ss.isdigit(): return int(ss) else: return [int(k) for k in ss.split('.')]
def _decode_block_str(block_str): " Decode block definition string\n\n Gets a list of block arg (dicts) through a string notation of arguments.\n E.g. ir_r2_k3_s2_e1_i32_o16_se0.25_noskip\n\n All args can exist in any order with the exception of the leading string which\n is assumed to indicate the block type.\n\n leading string - block type (\n ir = InvertedResidual, ds = DepthwiseSep, dsa = DeptwhiseSep with pw act, cn = ConvBnAct)\n r - number of repeat blocks,\n k - kernel size,\n s - strides (1-9),\n e - expansion ratio,\n c - output channels,\n se - squeeze/excitation ratio\n n - activation fn ('re', 'r6', 'hs', or 'sw')\n Args:\n block_str: a string representation of block arguments.\n Returns:\n A list of block args (dicts)\n Raises:\n ValueError: if the string def not properly specified (TODO)\n " assert isinstance(block_str, str) ops = block_str.split('_') block_type = ops[0] ops = ops[1:] options = {} noskip = False for op in ops: if (op == 'noskip'): noskip = True elif op.startswith('n'): key = op[0] v = op[1:] if (v == 're'): value = get_act_layer('relu') elif (v == 'r6'): value = get_act_layer('relu6') elif (v == 'hs'): value = get_act_layer('hard_swish') elif (v == 'sw'): value = get_act_layer('swish') else: continue options[key] = value else: splits = re.split('(\\d.*)', op) if (len(splits) >= 2): (key, value) = splits[:2] options[key] = value act_layer = (options['n'] if ('n' in options) else None) exp_kernel_size = (_parse_ksize(options['a']) if ('a' in options) else 1) pw_kernel_size = (_parse_ksize(options['p']) if ('p' in options) else 1) fake_in_chs = (int(options['fc']) if ('fc' in options) else 0) num_repeat = int(options['r']) if (block_type == 'ir'): block_args = dict(block_type=block_type, dw_kernel_size=_parse_ksize(options['k']), exp_kernel_size=exp_kernel_size, pw_kernel_size=pw_kernel_size, out_chs=int(options['c']), exp_ratio=float(options['e']), se_ratio=(float(options['se']) if ('se' in options) else None), stride=int(options['s']), act_layer=act_layer, noskip=noskip) if ('cc' in options): block_args['num_experts'] = int(options['cc']) elif ((block_type == 'ds') or (block_type == 'dsa')): block_args = dict(block_type=block_type, dw_kernel_size=_parse_ksize(options['k']), pw_kernel_size=pw_kernel_size, out_chs=int(options['c']), se_ratio=(float(options['se']) if ('se' in options) else None), stride=int(options['s']), act_layer=act_layer, pw_act=(block_type == 'dsa'), noskip=((block_type == 'dsa') or noskip)) elif (block_type == 'er'): block_args = dict(block_type=block_type, exp_kernel_size=_parse_ksize(options['k']), pw_kernel_size=pw_kernel_size, out_chs=int(options['c']), exp_ratio=float(options['e']), fake_in_chs=fake_in_chs, se_ratio=(float(options['se']) if ('se' in options) else None), stride=int(options['s']), act_layer=act_layer, noskip=noskip) elif (block_type == 'cn'): block_args = dict(block_type=block_type, kernel_size=int(options['k']), out_chs=int(options['c']), stride=int(options['s']), act_layer=act_layer) else: assert False, ('Unknown block type (%s)' % block_type) return (block_args, num_repeat)
def _scale_stage_depth(stack_args, repeats, depth_multiplier=1.0, depth_trunc='ceil'): ' Per-stage depth scaling\n Scales the block repeats in each stage. This depth scaling impl maintains\n compatibility with the EfficientNet scaling method, while allowing sensible\n scaling for other models that may have multiple block arg definitions in each stage.\n ' num_repeat = sum(repeats) if (depth_trunc == 'round'): num_repeat_scaled = max(1, round((num_repeat * depth_multiplier))) else: num_repeat_scaled = int(math.ceil((num_repeat * depth_multiplier))) repeats_scaled = [] for r in repeats[::(- 1)]: rs = max(1, round(((r / num_repeat) * num_repeat_scaled))) repeats_scaled.append(rs) num_repeat -= r num_repeat_scaled -= rs repeats_scaled = repeats_scaled[::(- 1)] sa_scaled = [] for (ba, rep) in zip(stack_args, repeats_scaled): sa_scaled.extend([deepcopy(ba) for _ in range(rep)]) return sa_scaled
def decode_arch_def(arch_def, depth_multiplier=1.0, depth_trunc='ceil', experts_multiplier=1, fix_first_last=False): arch_args = [] for (stack_idx, block_strings) in enumerate(arch_def): assert isinstance(block_strings, list) stack_args = [] repeats = [] for block_str in block_strings: assert isinstance(block_str, str) (ba, rep) = _decode_block_str(block_str) if ((ba.get('num_experts', 0) > 0) and (experts_multiplier > 1)): ba['num_experts'] *= experts_multiplier stack_args.append(ba) repeats.append(rep) if (fix_first_last and ((stack_idx == 0) or (stack_idx == (len(arch_def) - 1)))): arch_args.append(_scale_stage_depth(stack_args, repeats, 1.0, depth_trunc)) else: arch_args.append(_scale_stage_depth(stack_args, repeats, depth_multiplier, depth_trunc)) return arch_args
def initialize_weight_goog(m, n='', fix_group_fanout=True): if isinstance(m, CondConv2d): fan_out = ((m.kernel_size[0] * m.kernel_size[1]) * m.out_channels) if fix_group_fanout: fan_out //= m.groups init_weight_fn = get_condconv_initializer((lambda w: w.data.normal_(0, math.sqrt((2.0 / fan_out)))), m.num_experts, m.weight_shape) init_weight_fn(m.weight) if (m.bias is not None): m.bias.data.zero_() elif isinstance(m, nn.Conv2d): fan_out = ((m.kernel_size[0] * m.kernel_size[1]) * m.out_channels) if fix_group_fanout: fan_out //= m.groups m.weight.data.normal_(0, math.sqrt((2.0 / fan_out))) if (m.bias is not None): m.bias.data.zero_() elif isinstance(m, nn.BatchNorm2d): m.weight.data.fill_(1.0) m.bias.data.zero_() elif isinstance(m, nn.Linear): fan_out = m.weight.size(0) fan_in = 0 if ('routing_fn' in n): fan_in = m.weight.size(1) init_range = (1.0 / math.sqrt((fan_in + fan_out))) m.weight.data.uniform_((- init_range), init_range) m.bias.data.zero_()
def initialize_weight_default(m, n=''): if isinstance(m, CondConv2d): init_fn = get_condconv_initializer(partial(nn.init.kaiming_normal_, mode='fan_out', nonlinearity='relu'), m.num_experts, m.weight_shape) init_fn(m.weight) elif isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') elif isinstance(m, nn.BatchNorm2d): m.weight.data.fill_(1.0) m.bias.data.zero_() elif isinstance(m, nn.Linear): nn.init.kaiming_uniform_(m.weight, mode='fan_in', nonlinearity='linear')
class GenEfficientNet(nn.Module): ' Generic EfficientNets\n\n An implementation of mobile optimized networks that covers:\n * EfficientNet (B0-B8, L2, CondConv, EdgeTPU)\n * MixNet (Small, Medium, and Large, XL)\n * MNASNet A1, B1, and small\n * FBNet C\n * Single-Path NAS Pixel1\n ' def __init__(self, block_args, num_classes=1000, in_chans=3, num_features=1280, stem_size=32, fix_stem=False, channel_multiplier=1.0, channel_divisor=8, channel_min=None, pad_type='', act_layer=nn.ReLU, drop_rate=0.0, drop_connect_rate=0.0, se_kwargs=None, norm_layer=nn.BatchNorm2d, norm_kwargs=None, weight_init='goog'): super(GenEfficientNet, self).__init__() self.drop_rate = drop_rate if (not fix_stem): stem_size = round_channels(stem_size, channel_multiplier, channel_divisor, channel_min) self.conv_stem = select_conv2d(in_chans, stem_size, 3, stride=2, padding=pad_type) self.bn1 = norm_layer(stem_size, **norm_kwargs) self.act1 = act_layer(inplace=True) in_chs = stem_size builder = EfficientNetBuilder(channel_multiplier, channel_divisor, channel_min, pad_type, act_layer, se_kwargs, norm_layer, norm_kwargs, drop_connect_rate) self.blocks = nn.Sequential(*builder(in_chs, block_args)) in_chs = builder.in_chs self.conv_head = select_conv2d(in_chs, num_features, 1, padding=pad_type) self.bn2 = norm_layer(num_features, **norm_kwargs) self.act2 = act_layer(inplace=True) self.global_pool = nn.AdaptiveAvgPool2d(1) self.classifier = nn.Linear(num_features, num_classes) for (n, m) in self.named_modules(): if (weight_init == 'goog'): initialize_weight_goog(m, n) else: initialize_weight_default(m, n) def features(self, x): x = self.conv_stem(x) x = self.bn1(x) x = self.act1(x) x = self.blocks(x) x = self.conv_head(x) x = self.bn2(x) x = self.act2(x) return x def as_sequential(self): layers = [self.conv_stem, self.bn1, self.act1] layers.extend(self.blocks) layers.extend([self.conv_head, self.bn2, self.act2, self.global_pool, nn.Flatten(), nn.Dropout(self.drop_rate), self.classifier]) return nn.Sequential(*layers) def forward(self, x): x = self.features(x) x = self.global_pool(x) x = x.flatten(1) if (self.drop_rate > 0.0): x = F.dropout(x, p=self.drop_rate, training=self.training) return self.classifier(x)
def _create_model(model_kwargs, variant, pretrained=False): as_sequential = model_kwargs.pop('as_sequential', False) model = GenEfficientNet(**model_kwargs) if pretrained: load_pretrained(model, model_urls[variant]) if as_sequential: model = model.as_sequential() return model
def _gen_mnasnet_a1(variant, channel_multiplier=1.0, pretrained=False, **kwargs): 'Creates a mnasnet-a1 model.\n\n Ref impl: https://github.com/tensorflow/tpu/tree/master/models/official/mnasnet\n Paper: https://arxiv.org/pdf/1807.11626.pdf.\n\n Args:\n channel_multiplier: multiplier to number of channels per layer.\n ' arch_def = [['ds_r1_k3_s1_e1_c16_noskip'], ['ir_r2_k3_s2_e6_c24'], ['ir_r3_k5_s2_e3_c40_se0.25'], ['ir_r4_k3_s2_e6_c80'], ['ir_r2_k3_s1_e6_c112_se0.25'], ['ir_r3_k5_s2_e6_c160_se0.25'], ['ir_r1_k3_s1_e6_c320']] with layer_config_kwargs(kwargs): model_kwargs = dict(block_args=decode_arch_def(arch_def), stem_size=32, channel_multiplier=channel_multiplier, act_layer=resolve_act_layer(kwargs, 'relu'), norm_kwargs=resolve_bn_args(kwargs), **kwargs) model = _create_model(model_kwargs, variant, pretrained) return model
def _gen_mnasnet_b1(variant, channel_multiplier=1.0, pretrained=False, **kwargs): 'Creates a mnasnet-b1 model.\n\n Ref impl: https://github.com/tensorflow/tpu/tree/master/models/official/mnasnet\n Paper: https://arxiv.org/pdf/1807.11626.pdf.\n\n Args:\n channel_multiplier: multiplier to number of channels per layer.\n ' arch_def = [['ds_r1_k3_s1_c16_noskip'], ['ir_r3_k3_s2_e3_c24'], ['ir_r3_k5_s2_e3_c40'], ['ir_r3_k5_s2_e6_c80'], ['ir_r2_k3_s1_e6_c96'], ['ir_r4_k5_s2_e6_c192'], ['ir_r1_k3_s1_e6_c320_noskip']] with layer_config_kwargs(kwargs): model_kwargs = dict(block_args=decode_arch_def(arch_def), stem_size=32, channel_multiplier=channel_multiplier, act_layer=resolve_act_layer(kwargs, 'relu'), norm_kwargs=resolve_bn_args(kwargs), **kwargs) model = _create_model(model_kwargs, variant, pretrained) return model
def _gen_mnasnet_small(variant, channel_multiplier=1.0, pretrained=False, **kwargs): 'Creates a mnasnet-b1 model.\n\n Ref impl: https://github.com/tensorflow/tpu/tree/master/models/official/mnasnet\n Paper: https://arxiv.org/pdf/1807.11626.pdf.\n\n Args:\n channel_multiplier: multiplier to number of channels per layer.\n ' arch_def = [['ds_r1_k3_s1_c8'], ['ir_r1_k3_s2_e3_c16'], ['ir_r2_k3_s2_e6_c16'], ['ir_r4_k5_s2_e6_c32_se0.25'], ['ir_r3_k3_s1_e6_c32_se0.25'], ['ir_r3_k5_s2_e6_c88_se0.25'], ['ir_r1_k3_s1_e6_c144']] with layer_config_kwargs(kwargs): model_kwargs = dict(block_args=decode_arch_def(arch_def), stem_size=8, channel_multiplier=channel_multiplier, act_layer=resolve_act_layer(kwargs, 'relu'), norm_kwargs=resolve_bn_args(kwargs), **kwargs) model = _create_model(model_kwargs, variant, pretrained) return model
def _gen_mobilenet_v2(variant, channel_multiplier=1.0, depth_multiplier=1.0, fix_stem_head=False, pretrained=False, **kwargs): ' Generate MobileNet-V2 network\n Ref impl: https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet_v2.py\n Paper: https://arxiv.org/abs/1801.04381\n ' arch_def = [['ds_r1_k3_s1_c16'], ['ir_r2_k3_s2_e6_c24'], ['ir_r3_k3_s2_e6_c32'], ['ir_r4_k3_s2_e6_c64'], ['ir_r3_k3_s1_e6_c96'], ['ir_r3_k3_s2_e6_c160'], ['ir_r1_k3_s1_e6_c320']] with layer_config_kwargs(kwargs): model_kwargs = dict(block_args=decode_arch_def(arch_def, depth_multiplier=depth_multiplier, fix_first_last=fix_stem_head), num_features=(1280 if fix_stem_head else round_channels(1280, channel_multiplier, 8, None)), stem_size=32, fix_stem=fix_stem_head, channel_multiplier=channel_multiplier, norm_kwargs=resolve_bn_args(kwargs), act_layer=nn.ReLU6, **kwargs) model = _create_model(model_kwargs, variant, pretrained) return model
def _gen_fbnetc(variant, channel_multiplier=1.0, pretrained=False, **kwargs): " FBNet-C\n\n Paper: https://arxiv.org/abs/1812.03443\n Ref Impl: https://github.com/facebookresearch/maskrcnn-benchmark/blob/master/maskrcnn_benchmark/modeling/backbone/fbnet_modeldef.py\n\n NOTE: the impl above does not relate to the 'C' variant here, that was derived from paper,\n it was used to confirm some building block details\n " arch_def = [['ir_r1_k3_s1_e1_c16'], ['ir_r1_k3_s2_e6_c24', 'ir_r2_k3_s1_e1_c24'], ['ir_r1_k5_s2_e6_c32', 'ir_r1_k5_s1_e3_c32', 'ir_r1_k5_s1_e6_c32', 'ir_r1_k3_s1_e6_c32'], ['ir_r1_k5_s2_e6_c64', 'ir_r1_k5_s1_e3_c64', 'ir_r2_k5_s1_e6_c64'], ['ir_r3_k5_s1_e6_c112', 'ir_r1_k5_s1_e3_c112'], ['ir_r4_k5_s2_e6_c184'], ['ir_r1_k3_s1_e6_c352']] with layer_config_kwargs(kwargs): model_kwargs = dict(block_args=decode_arch_def(arch_def), stem_size=16, num_features=1984, channel_multiplier=channel_multiplier, act_layer=resolve_act_layer(kwargs, 'relu'), norm_kwargs=resolve_bn_args(kwargs), **kwargs) model = _create_model(model_kwargs, variant, pretrained) return model
def _gen_spnasnet(variant, channel_multiplier=1.0, pretrained=False, **kwargs): 'Creates the Single-Path NAS model from search targeted for Pixel1 phone.\n\n Paper: https://arxiv.org/abs/1904.02877\n\n Args:\n channel_multiplier: multiplier to number of channels per layer.\n ' arch_def = [['ds_r1_k3_s1_c16_noskip'], ['ir_r3_k3_s2_e3_c24'], ['ir_r1_k5_s2_e6_c40', 'ir_r3_k3_s1_e3_c40'], ['ir_r1_k5_s2_e6_c80', 'ir_r3_k3_s1_e3_c80'], ['ir_r1_k5_s1_e6_c96', 'ir_r3_k5_s1_e3_c96'], ['ir_r4_k5_s2_e6_c192'], ['ir_r1_k3_s1_e6_c320_noskip']] with layer_config_kwargs(kwargs): model_kwargs = dict(block_args=decode_arch_def(arch_def), stem_size=32, channel_multiplier=channel_multiplier, act_layer=resolve_act_layer(kwargs, 'relu'), norm_kwargs=resolve_bn_args(kwargs), **kwargs) model = _create_model(model_kwargs, variant, pretrained) return model
def _gen_efficientnet(variant, channel_multiplier=1.0, depth_multiplier=1.0, pretrained=False, **kwargs): "Creates an EfficientNet model.\n\n Ref impl: https://github.com/tensorflow/tpu/blob/master/models/official/efficientnet/efficientnet_model.py\n Paper: https://arxiv.org/abs/1905.11946\n\n EfficientNet params\n name: (channel_multiplier, depth_multiplier, resolution, dropout_rate)\n 'efficientnet-b0': (1.0, 1.0, 224, 0.2),\n 'efficientnet-b1': (1.0, 1.1, 240, 0.2),\n 'efficientnet-b2': (1.1, 1.2, 260, 0.3),\n 'efficientnet-b3': (1.2, 1.4, 300, 0.3),\n 'efficientnet-b4': (1.4, 1.8, 380, 0.4),\n 'efficientnet-b5': (1.6, 2.2, 456, 0.4),\n 'efficientnet-b6': (1.8, 2.6, 528, 0.5),\n 'efficientnet-b7': (2.0, 3.1, 600, 0.5),\n 'efficientnet-b8': (2.2, 3.6, 672, 0.5),\n\n Args:\n channel_multiplier: multiplier to number of channels per layer\n depth_multiplier: multiplier to number of repeats per stage\n\n " arch_def = [['ds_r1_k3_s1_e1_c16_se0.25'], ['ir_r2_k3_s2_e6_c24_se0.25'], ['ir_r2_k5_s2_e6_c40_se0.25'], ['ir_r3_k3_s2_e6_c80_se0.25'], ['ir_r3_k5_s1_e6_c112_se0.25'], ['ir_r4_k5_s2_e6_c192_se0.25'], ['ir_r1_k3_s1_e6_c320_se0.25']] with layer_config_kwargs(kwargs): model_kwargs = dict(block_args=decode_arch_def(arch_def, depth_multiplier), num_features=round_channels(1280, channel_multiplier, 8, None), stem_size=32, channel_multiplier=channel_multiplier, act_layer=resolve_act_layer(kwargs, 'swish'), norm_kwargs=resolve_bn_args(kwargs), **kwargs) model = _create_model(model_kwargs, variant, pretrained) return model
def _gen_efficientnet_edge(variant, channel_multiplier=1.0, depth_multiplier=1.0, pretrained=False, **kwargs): arch_def = [['er_r1_k3_s1_e4_c24_fc24_noskip'], ['er_r2_k3_s2_e8_c32'], ['er_r4_k3_s2_e8_c48'], ['ir_r5_k5_s2_e8_c96'], ['ir_r4_k5_s1_e8_c144'], ['ir_r2_k5_s2_e8_c192']] with layer_config_kwargs(kwargs): model_kwargs = dict(block_args=decode_arch_def(arch_def, depth_multiplier), num_features=round_channels(1280, channel_multiplier, 8, None), stem_size=32, channel_multiplier=channel_multiplier, act_layer=resolve_act_layer(kwargs, 'relu'), norm_kwargs=resolve_bn_args(kwargs), **kwargs) model = _create_model(model_kwargs, variant, pretrained) return model
def _gen_efficientnet_condconv(variant, channel_multiplier=1.0, depth_multiplier=1.0, experts_multiplier=1, pretrained=False, **kwargs): 'Creates an efficientnet-condconv model.' arch_def = [['ds_r1_k3_s1_e1_c16_se0.25'], ['ir_r2_k3_s2_e6_c24_se0.25'], ['ir_r2_k5_s2_e6_c40_se0.25'], ['ir_r3_k3_s2_e6_c80_se0.25'], ['ir_r3_k5_s1_e6_c112_se0.25_cc4'], ['ir_r4_k5_s2_e6_c192_se0.25_cc4'], ['ir_r1_k3_s1_e6_c320_se0.25_cc4']] with layer_config_kwargs(kwargs): model_kwargs = dict(block_args=decode_arch_def(arch_def, depth_multiplier, experts_multiplier=experts_multiplier), num_features=round_channels(1280, channel_multiplier, 8, None), stem_size=32, channel_multiplier=channel_multiplier, act_layer=resolve_act_layer(kwargs, 'swish'), norm_kwargs=resolve_bn_args(kwargs), **kwargs) model = _create_model(model_kwargs, variant, pretrained) return model
def _gen_efficientnet_lite(variant, channel_multiplier=1.0, depth_multiplier=1.0, pretrained=False, **kwargs): "Creates an EfficientNet-Lite model.\n\n Ref impl: https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet/lite\n Paper: https://arxiv.org/abs/1905.11946\n\n EfficientNet params\n name: (channel_multiplier, depth_multiplier, resolution, dropout_rate)\n 'efficientnet-lite0': (1.0, 1.0, 224, 0.2),\n 'efficientnet-lite1': (1.0, 1.1, 240, 0.2),\n 'efficientnet-lite2': (1.1, 1.2, 260, 0.3),\n 'efficientnet-lite3': (1.2, 1.4, 280, 0.3),\n 'efficientnet-lite4': (1.4, 1.8, 300, 0.3),\n\n Args:\n channel_multiplier: multiplier to number of channels per layer\n depth_multiplier: multiplier to number of repeats per stage\n " arch_def = [['ds_r1_k3_s1_e1_c16'], ['ir_r2_k3_s2_e6_c24'], ['ir_r2_k5_s2_e6_c40'], ['ir_r3_k3_s2_e6_c80'], ['ir_r3_k5_s1_e6_c112'], ['ir_r4_k5_s2_e6_c192'], ['ir_r1_k3_s1_e6_c320']] with layer_config_kwargs(kwargs): model_kwargs = dict(block_args=decode_arch_def(arch_def, depth_multiplier, fix_first_last=True), num_features=1280, stem_size=32, fix_stem=True, channel_multiplier=channel_multiplier, act_layer=nn.ReLU6, norm_kwargs=resolve_bn_args(kwargs), **kwargs) model = _create_model(model_kwargs, variant, pretrained) return model
def _gen_mixnet_s(variant, channel_multiplier=1.0, pretrained=False, **kwargs): 'Creates a MixNet Small model.\n\n Ref impl: https://github.com/tensorflow/tpu/tree/master/models/official/mnasnet/mixnet\n Paper: https://arxiv.org/abs/1907.09595\n ' arch_def = [['ds_r1_k3_s1_e1_c16'], ['ir_r1_k3_a1.1_p1.1_s2_e6_c24', 'ir_r1_k3_a1.1_p1.1_s1_e3_c24'], ['ir_r1_k3.5.7_s2_e6_c40_se0.5_nsw', 'ir_r3_k3.5_a1.1_p1.1_s1_e6_c40_se0.5_nsw'], ['ir_r1_k3.5.7_p1.1_s2_e6_c80_se0.25_nsw', 'ir_r2_k3.5_p1.1_s1_e6_c80_se0.25_nsw'], ['ir_r1_k3.5.7_a1.1_p1.1_s1_e6_c120_se0.5_nsw', 'ir_r2_k3.5.7.9_a1.1_p1.1_s1_e3_c120_se0.5_nsw'], ['ir_r1_k3.5.7.9.11_s2_e6_c200_se0.5_nsw', 'ir_r2_k3.5.7.9_p1.1_s1_e6_c200_se0.5_nsw']] with layer_config_kwargs(kwargs): model_kwargs = dict(block_args=decode_arch_def(arch_def), num_features=1536, stem_size=16, channel_multiplier=channel_multiplier, act_layer=resolve_act_layer(kwargs, 'relu'), norm_kwargs=resolve_bn_args(kwargs), **kwargs) model = _create_model(model_kwargs, variant, pretrained) return model
def _gen_mixnet_m(variant, channel_multiplier=1.0, depth_multiplier=1.0, pretrained=False, **kwargs): 'Creates a MixNet Medium-Large model.\n\n Ref impl: https://github.com/tensorflow/tpu/tree/master/models/official/mnasnet/mixnet\n Paper: https://arxiv.org/abs/1907.09595\n ' arch_def = [['ds_r1_k3_s1_e1_c24'], ['ir_r1_k3.5.7_a1.1_p1.1_s2_e6_c32', 'ir_r1_k3_a1.1_p1.1_s1_e3_c32'], ['ir_r1_k3.5.7.9_s2_e6_c40_se0.5_nsw', 'ir_r3_k3.5_a1.1_p1.1_s1_e6_c40_se0.5_nsw'], ['ir_r1_k3.5.7_s2_e6_c80_se0.25_nsw', 'ir_r3_k3.5.7.9_a1.1_p1.1_s1_e6_c80_se0.25_nsw'], ['ir_r1_k3_s1_e6_c120_se0.5_nsw', 'ir_r3_k3.5.7.9_a1.1_p1.1_s1_e3_c120_se0.5_nsw'], ['ir_r1_k3.5.7.9_s2_e6_c200_se0.5_nsw', 'ir_r3_k3.5.7.9_p1.1_s1_e6_c200_se0.5_nsw']] with layer_config_kwargs(kwargs): model_kwargs = dict(block_args=decode_arch_def(arch_def, depth_multiplier, depth_trunc='round'), num_features=1536, stem_size=24, channel_multiplier=channel_multiplier, act_layer=resolve_act_layer(kwargs, 'relu'), norm_kwargs=resolve_bn_args(kwargs), **kwargs) model = _create_model(model_kwargs, variant, pretrained) return model
def mnasnet_050(pretrained=False, **kwargs): ' MNASNet B1, depth multiplier of 0.5. ' model = _gen_mnasnet_b1('mnasnet_050', 0.5, pretrained=pretrained, **kwargs) return model
def mnasnet_075(pretrained=False, **kwargs): ' MNASNet B1, depth multiplier of 0.75. ' model = _gen_mnasnet_b1('mnasnet_075', 0.75, pretrained=pretrained, **kwargs) return model
def mnasnet_100(pretrained=False, **kwargs): ' MNASNet B1, depth multiplier of 1.0. ' model = _gen_mnasnet_b1('mnasnet_100', 1.0, pretrained=pretrained, **kwargs) return model
def mnasnet_b1(pretrained=False, **kwargs): ' MNASNet B1, depth multiplier of 1.0. ' return mnasnet_100(pretrained, **kwargs)
def mnasnet_140(pretrained=False, **kwargs): ' MNASNet B1, depth multiplier of 1.4 ' model = _gen_mnasnet_b1('mnasnet_140', 1.4, pretrained=pretrained, **kwargs) return model
def semnasnet_050(pretrained=False, **kwargs): ' MNASNet A1 (w/ SE), depth multiplier of 0.5 ' model = _gen_mnasnet_a1('semnasnet_050', 0.5, pretrained=pretrained, **kwargs) return model
def semnasnet_075(pretrained=False, **kwargs): ' MNASNet A1 (w/ SE), depth multiplier of 0.75. ' model = _gen_mnasnet_a1('semnasnet_075', 0.75, pretrained=pretrained, **kwargs) return model
def semnasnet_100(pretrained=False, **kwargs): ' MNASNet A1 (w/ SE), depth multiplier of 1.0. ' model = _gen_mnasnet_a1('semnasnet_100', 1.0, pretrained=pretrained, **kwargs) return model
def mnasnet_a1(pretrained=False, **kwargs): ' MNASNet A1 (w/ SE), depth multiplier of 1.0. ' return semnasnet_100(pretrained, **kwargs)
def semnasnet_140(pretrained=False, **kwargs): ' MNASNet A1 (w/ SE), depth multiplier of 1.4. ' model = _gen_mnasnet_a1('semnasnet_140', 1.4, pretrained=pretrained, **kwargs) return model
def mnasnet_small(pretrained=False, **kwargs): ' MNASNet Small, depth multiplier of 1.0. ' model = _gen_mnasnet_small('mnasnet_small', 1.0, pretrained=pretrained, **kwargs) return model
def mobilenetv2_100(pretrained=False, **kwargs): ' MobileNet V2 w/ 1.0 channel multiplier ' model = _gen_mobilenet_v2('mobilenetv2_100', 1.0, pretrained=pretrained, **kwargs) return model
def mobilenetv2_140(pretrained=False, **kwargs): ' MobileNet V2 w/ 1.4 channel multiplier ' model = _gen_mobilenet_v2('mobilenetv2_140', 1.4, pretrained=pretrained, **kwargs) return model
def mobilenetv2_110d(pretrained=False, **kwargs): ' MobileNet V2 w/ 1.1 channel, 1.2 depth multipliers' model = _gen_mobilenet_v2('mobilenetv2_110d', 1.1, depth_multiplier=1.2, fix_stem_head=True, pretrained=pretrained, **kwargs) return model
def mobilenetv2_120d(pretrained=False, **kwargs): ' MobileNet V2 w/ 1.2 channel, 1.4 depth multipliers ' model = _gen_mobilenet_v2('mobilenetv2_120d', 1.2, depth_multiplier=1.4, fix_stem_head=True, pretrained=pretrained, **kwargs) return model
def fbnetc_100(pretrained=False, **kwargs): ' FBNet-C ' if pretrained: kwargs['bn_eps'] = BN_EPS_TF_DEFAULT model = _gen_fbnetc('fbnetc_100', 1.0, pretrained=pretrained, **kwargs) return model
def spnasnet_100(pretrained=False, **kwargs): ' Single-Path NAS Pixel1' model = _gen_spnasnet('spnasnet_100', 1.0, pretrained=pretrained, **kwargs) return model
def efficientnet_b0(pretrained=False, **kwargs): ' EfficientNet-B0 ' model = _gen_efficientnet('efficientnet_b0', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs) return model
def efficientnet_b1(pretrained=False, **kwargs): ' EfficientNet-B1 ' model = _gen_efficientnet('efficientnet_b1', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs) return model
def efficientnet_b2(pretrained=False, **kwargs): ' EfficientNet-B2 ' model = _gen_efficientnet('efficientnet_b2', channel_multiplier=1.1, depth_multiplier=1.2, pretrained=pretrained, **kwargs) return model
def efficientnet_b3(pretrained=False, **kwargs): ' EfficientNet-B3 ' model = _gen_efficientnet('efficientnet_b3', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs) return model
def efficientnet_b4(pretrained=False, **kwargs): ' EfficientNet-B4 ' model = _gen_efficientnet('efficientnet_b4', channel_multiplier=1.4, depth_multiplier=1.8, pretrained=pretrained, **kwargs) return model
def efficientnet_b5(pretrained=False, **kwargs): ' EfficientNet-B5 ' model = _gen_efficientnet('efficientnet_b5', channel_multiplier=1.6, depth_multiplier=2.2, pretrained=pretrained, **kwargs) return model
def efficientnet_b6(pretrained=False, **kwargs): ' EfficientNet-B6 ' model = _gen_efficientnet('efficientnet_b6', channel_multiplier=1.8, depth_multiplier=2.6, pretrained=pretrained, **kwargs) return model
def efficientnet_b7(pretrained=False, **kwargs): ' EfficientNet-B7 ' model = _gen_efficientnet('efficientnet_b7', channel_multiplier=2.0, depth_multiplier=3.1, pretrained=pretrained, **kwargs) return model
def efficientnet_b8(pretrained=False, **kwargs): ' EfficientNet-B8 ' model = _gen_efficientnet('efficientnet_b8', channel_multiplier=2.2, depth_multiplier=3.6, pretrained=pretrained, **kwargs) return model
def efficientnet_l2(pretrained=False, **kwargs): ' EfficientNet-L2. ' model = _gen_efficientnet('efficientnet_l2', channel_multiplier=4.3, depth_multiplier=5.3, pretrained=pretrained, **kwargs) return model
def efficientnet_es(pretrained=False, **kwargs): ' EfficientNet-Edge Small. ' model = _gen_efficientnet_edge('efficientnet_es', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs) return model
def efficientnet_em(pretrained=False, **kwargs): ' EfficientNet-Edge-Medium. ' model = _gen_efficientnet_edge('efficientnet_em', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs) return model
def efficientnet_el(pretrained=False, **kwargs): ' EfficientNet-Edge-Large. ' model = _gen_efficientnet_edge('efficientnet_el', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs) return model