code stringlengths 17 6.64M |
|---|
def efficientnet_b1(pretrained=False, **kwargs):
' EfficientNet-B1 '
model = _gen_efficientnet('efficientnet_b1', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs)
return model
|
def efficientnet_b2(pretrained=False, **kwargs):
' EfficientNet-B2 '
model = _gen_efficientnet('efficientnet_b2', channel_multiplier=1.1, depth_multiplier=1.2, pretrained=pretrained, **kwargs)
return model
|
def efficientnet_b3(pretrained=False, **kwargs):
' EfficientNet-B3 '
model = _gen_efficientnet('efficientnet_b3', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs)
return model
|
def efficientnet_b4(pretrained=False, **kwargs):
' EfficientNet-B4 '
model = _gen_efficientnet('efficientnet_b4', channel_multiplier=1.4, depth_multiplier=1.8, pretrained=pretrained, **kwargs)
return model
|
def efficientnet_b5(pretrained=False, **kwargs):
' EfficientNet-B5 '
model = _gen_efficientnet('efficientnet_b5', channel_multiplier=1.6, depth_multiplier=2.2, pretrained=pretrained, **kwargs)
return model
|
def efficientnet_b6(pretrained=False, **kwargs):
' EfficientNet-B6 '
model = _gen_efficientnet('efficientnet_b6', channel_multiplier=1.8, depth_multiplier=2.6, pretrained=pretrained, **kwargs)
return model
|
def efficientnet_b7(pretrained=False, **kwargs):
' EfficientNet-B7 '
model = _gen_efficientnet('efficientnet_b7', channel_multiplier=2.0, depth_multiplier=3.1, pretrained=pretrained, **kwargs)
return model
|
def efficientnet_b8(pretrained=False, **kwargs):
' EfficientNet-B8 '
model = _gen_efficientnet('efficientnet_b8', channel_multiplier=2.2, depth_multiplier=3.6, pretrained=pretrained, **kwargs)
return model
|
def efficientnet_l2(pretrained=False, **kwargs):
' EfficientNet-L2. '
model = _gen_efficientnet('efficientnet_l2', channel_multiplier=4.3, depth_multiplier=5.3, pretrained=pretrained, **kwargs)
return model
|
def efficientnet_es(pretrained=False, **kwargs):
' EfficientNet-Edge Small. '
model = _gen_efficientnet_edge('efficientnet_es', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs)
return model
|
def efficientnet_em(pretrained=False, **kwargs):
' EfficientNet-Edge-Medium. '
model = _gen_efficientnet_edge('efficientnet_em', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs)
return model
|
def efficientnet_el(pretrained=False, **kwargs):
' EfficientNet-Edge-Large. '
model = _gen_efficientnet_edge('efficientnet_el', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs)
return model
|
def efficientnet_cc_b0_4e(pretrained=False, **kwargs):
' EfficientNet-CondConv-B0 w/ 8 Experts '
model = _gen_efficientnet_condconv('efficientnet_cc_b0_4e', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs)
return model
|
def efficientnet_cc_b0_8e(pretrained=False, **kwargs):
' EfficientNet-CondConv-B0 w/ 8 Experts '
model = _gen_efficientnet_condconv('efficientnet_cc_b0_8e', channel_multiplier=1.0, depth_multiplier=1.0, experts_multiplier=2, pretrained=pretrained, **kwargs)
return model
|
def efficientnet_cc_b1_8e(pretrained=False, **kwargs):
' EfficientNet-CondConv-B1 w/ 8 Experts '
model = _gen_efficientnet_condconv('efficientnet_cc_b1_8e', channel_multiplier=1.0, depth_multiplier=1.1, experts_multiplier=2, pretrained=pretrained, **kwargs)
return model
|
def efficientnet_lite0(pretrained=False, **kwargs):
' EfficientNet-Lite0 '
model = _gen_efficientnet_lite('efficientnet_lite0', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs)
return model
|
def efficientnet_lite1(pretrained=False, **kwargs):
' EfficientNet-Lite1 '
model = _gen_efficientnet_lite('efficientnet_lite1', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs)
return model
|
def efficientnet_lite2(pretrained=False, **kwargs):
' EfficientNet-Lite2 '
model = _gen_efficientnet_lite('efficientnet_lite2', channel_multiplier=1.1, depth_multiplier=1.2, pretrained=pretrained, **kwargs)
return model
|
def efficientnet_lite3(pretrained=False, **kwargs):
' EfficientNet-Lite3 '
model = _gen_efficientnet_lite('efficientnet_lite3', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs)
return model
|
def efficientnet_lite4(pretrained=False, **kwargs):
' EfficientNet-Lite4 '
model = _gen_efficientnet_lite('efficientnet_lite4', channel_multiplier=1.4, depth_multiplier=1.8, pretrained=pretrained, **kwargs)
return model
|
def tf_efficientnet_b0(pretrained=False, **kwargs):
' EfficientNet-B0 AutoAug. Tensorflow compatible variant '
kwargs['bn_eps'] = BN_EPS_TF_DEFAULT
kwargs['pad_type'] = 'same'
model = _gen_efficientnet('tf_efficientnet_b0', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs)
return model
|
def tf_efficientnet_b1(pretrained=False, **kwargs):
' EfficientNet-B1 AutoAug. Tensorflow compatible variant '
kwargs['bn_eps'] = BN_EPS_TF_DEFAULT
kwargs['pad_type'] = 'same'
model = _gen_efficientnet('tf_efficientnet_b1', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs)
return model
|
def tf_efficientnet_b2(pretrained=False, **kwargs):
' EfficientNet-B2 AutoAug. Tensorflow compatible variant '
kwargs['bn_eps'] = BN_EPS_TF_DEFAULT
kwargs['pad_type'] = 'same'
model = _gen_efficientnet('tf_efficientnet_b2', channel_multiplier=1.1, depth_multiplier=1.2, pretrained=pretrained, **kwargs)
return model
|
def tf_efficientnet_b3(pretrained=False, **kwargs):
' EfficientNet-B3 AutoAug. Tensorflow compatible variant '
kwargs['bn_eps'] = BN_EPS_TF_DEFAULT
kwargs['pad_type'] = 'same'
model = _gen_efficientnet('tf_efficientnet_b3', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs)
return model
|
def tf_efficientnet_b4(pretrained=False, **kwargs):
' EfficientNet-B4 AutoAug. Tensorflow compatible variant '
kwargs['bn_eps'] = BN_EPS_TF_DEFAULT
kwargs['pad_type'] = 'same'
model = _gen_efficientnet('tf_efficientnet_b4', channel_multiplier=1.4, depth_multiplier=1.8, pretrained=pretrained, **kwargs)
return model
|
def tf_efficientnet_b5(pretrained=False, **kwargs):
' EfficientNet-B5 RandAug. Tensorflow compatible variant '
kwargs['bn_eps'] = BN_EPS_TF_DEFAULT
kwargs['pad_type'] = 'same'
model = _gen_efficientnet('tf_efficientnet_b5', channel_multiplier=1.6, depth_multiplier=2.2, pretrained=pretrained, **kwargs)
return model
|
def tf_efficientnet_b6(pretrained=False, **kwargs):
' EfficientNet-B6 AutoAug. Tensorflow compatible variant '
kwargs['bn_eps'] = BN_EPS_TF_DEFAULT
kwargs['pad_type'] = 'same'
model = _gen_efficientnet('tf_efficientnet_b6', channel_multiplier=1.8, depth_multiplier=2.6, pretrained=pretrained, **kwargs)
return model
|
def tf_efficientnet_b7(pretrained=False, **kwargs):
' EfficientNet-B7 RandAug. Tensorflow compatible variant '
kwargs['bn_eps'] = BN_EPS_TF_DEFAULT
kwargs['pad_type'] = 'same'
model = _gen_efficientnet('tf_efficientnet_b7', channel_multiplier=2.0, depth_multiplier=3.1, pretrained=pretrained, **kwargs)
return model
|
def tf_efficientnet_b8(pretrained=False, **kwargs):
' EfficientNet-B8 RandAug. Tensorflow compatible variant '
kwargs['bn_eps'] = BN_EPS_TF_DEFAULT
kwargs['pad_type'] = 'same'
model = _gen_efficientnet('tf_efficientnet_b8', channel_multiplier=2.2, depth_multiplier=3.6, pretrained=pretrained, **kwargs)
return model
|
def tf_efficientnet_b0_ap(pretrained=False, **kwargs):
' EfficientNet-B0 AdvProp. Tensorflow compatible variant\n Paper: Adversarial Examples Improve Image Recognition (https://arxiv.org/abs/1911.09665)\n '
kwargs['bn_eps'] = BN_EPS_TF_DEFAULT
kwargs['pad_type'] = 'same'
model = _gen_efficientnet('tf_efficientnet_b0_ap', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs)
return model
|
def tf_efficientnet_b1_ap(pretrained=False, **kwargs):
' EfficientNet-B1 AdvProp. Tensorflow compatible variant\n Paper: Adversarial Examples Improve Image Recognition (https://arxiv.org/abs/1911.09665)\n '
kwargs['bn_eps'] = BN_EPS_TF_DEFAULT
kwargs['pad_type'] = 'same'
model = _gen_efficientnet('tf_efficientnet_b1_ap', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs)
return model
|
def tf_efficientnet_b2_ap(pretrained=False, **kwargs):
' EfficientNet-B2 AdvProp. Tensorflow compatible variant\n Paper: Adversarial Examples Improve Image Recognition (https://arxiv.org/abs/1911.09665)\n '
kwargs['bn_eps'] = BN_EPS_TF_DEFAULT
kwargs['pad_type'] = 'same'
model = _gen_efficientnet('tf_efficientnet_b2_ap', channel_multiplier=1.1, depth_multiplier=1.2, pretrained=pretrained, **kwargs)
return model
|
def tf_efficientnet_b3_ap(pretrained=False, **kwargs):
' EfficientNet-B3 AdvProp. Tensorflow compatible variant\n Paper: Adversarial Examples Improve Image Recognition (https://arxiv.org/abs/1911.09665)\n '
kwargs['bn_eps'] = BN_EPS_TF_DEFAULT
kwargs['pad_type'] = 'same'
model = _gen_efficientnet('tf_efficientnet_b3_ap', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs)
return model
|
def tf_efficientnet_b4_ap(pretrained=False, **kwargs):
' EfficientNet-B4 AdvProp. Tensorflow compatible variant\n Paper: Adversarial Examples Improve Image Recognition (https://arxiv.org/abs/1911.09665)\n '
kwargs['bn_eps'] = BN_EPS_TF_DEFAULT
kwargs['pad_type'] = 'same'
model = _gen_efficientnet('tf_efficientnet_b4_ap', channel_multiplier=1.4, depth_multiplier=1.8, pretrained=pretrained, **kwargs)
return model
|
def tf_efficientnet_b5_ap(pretrained=False, **kwargs):
' EfficientNet-B5 AdvProp. Tensorflow compatible variant\n Paper: Adversarial Examples Improve Image Recognition (https://arxiv.org/abs/1911.09665)\n '
kwargs['bn_eps'] = BN_EPS_TF_DEFAULT
kwargs['pad_type'] = 'same'
model = _gen_efficientnet('tf_efficientnet_b5_ap', channel_multiplier=1.6, depth_multiplier=2.2, pretrained=pretrained, **kwargs)
return model
|
def tf_efficientnet_b6_ap(pretrained=False, **kwargs):
' EfficientNet-B6 AdvProp. Tensorflow compatible variant\n Paper: Adversarial Examples Improve Image Recognition (https://arxiv.org/abs/1911.09665)\n '
kwargs['bn_eps'] = BN_EPS_TF_DEFAULT
kwargs['pad_type'] = 'same'
model = _gen_efficientnet('tf_efficientnet_b6_ap', channel_multiplier=1.8, depth_multiplier=2.6, pretrained=pretrained, **kwargs)
return model
|
def tf_efficientnet_b7_ap(pretrained=False, **kwargs):
' EfficientNet-B7 AdvProp. Tensorflow compatible variant\n Paper: Adversarial Examples Improve Image Recognition (https://arxiv.org/abs/1911.09665)\n '
kwargs['bn_eps'] = BN_EPS_TF_DEFAULT
kwargs['pad_type'] = 'same'
model = _gen_efficientnet('tf_efficientnet_b7_ap', channel_multiplier=2.0, depth_multiplier=3.1, pretrained=pretrained, **kwargs)
return model
|
def tf_efficientnet_b8_ap(pretrained=False, **kwargs):
' EfficientNet-B8 AdvProp. Tensorflow compatible variant\n Paper: Adversarial Examples Improve Image Recognition (https://arxiv.org/abs/1911.09665)\n '
kwargs['bn_eps'] = BN_EPS_TF_DEFAULT
kwargs['pad_type'] = 'same'
model = _gen_efficientnet('tf_efficientnet_b8_ap', channel_multiplier=2.2, depth_multiplier=3.6, pretrained=pretrained, **kwargs)
return model
|
def tf_efficientnet_b0_ns(pretrained=False, **kwargs):
' EfficientNet-B0 NoisyStudent. Tensorflow compatible variant\n Paper: Self-training with Noisy Student improves ImageNet classification (https://arxiv.org/abs/1911.04252)\n '
kwargs['bn_eps'] = BN_EPS_TF_DEFAULT
kwargs['pad_type'] = 'same'
model = _gen_efficientnet('tf_efficientnet_b0_ns', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs)
return model
|
def tf_efficientnet_b1_ns(pretrained=False, **kwargs):
' EfficientNet-B1 NoisyStudent. Tensorflow compatible variant\n Paper: Self-training with Noisy Student improves ImageNet classification (https://arxiv.org/abs/1911.04252)\n '
kwargs['bn_eps'] = BN_EPS_TF_DEFAULT
kwargs['pad_type'] = 'same'
model = _gen_efficientnet('tf_efficientnet_b1_ns', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs)
return model
|
def tf_efficientnet_b2_ns(pretrained=False, **kwargs):
' EfficientNet-B2 NoisyStudent. Tensorflow compatible variant\n Paper: Self-training with Noisy Student improves ImageNet classification (https://arxiv.org/abs/1911.04252)\n '
kwargs['bn_eps'] = BN_EPS_TF_DEFAULT
kwargs['pad_type'] = 'same'
model = _gen_efficientnet('tf_efficientnet_b2_ns', channel_multiplier=1.1, depth_multiplier=1.2, pretrained=pretrained, **kwargs)
return model
|
def tf_efficientnet_b3_ns(pretrained=False, **kwargs):
' EfficientNet-B3 NoisyStudent. Tensorflow compatible variant\n Paper: Self-training with Noisy Student improves ImageNet classification (https://arxiv.org/abs/1911.04252)\n '
kwargs['bn_eps'] = BN_EPS_TF_DEFAULT
kwargs['pad_type'] = 'same'
model = _gen_efficientnet('tf_efficientnet_b3_ns', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs)
return model
|
def tf_efficientnet_b4_ns(pretrained=False, **kwargs):
' EfficientNet-B4 NoisyStudent. Tensorflow compatible variant\n Paper: Self-training with Noisy Student improves ImageNet classification (https://arxiv.org/abs/1911.04252)\n '
kwargs['bn_eps'] = BN_EPS_TF_DEFAULT
kwargs['pad_type'] = 'same'
model = _gen_efficientnet('tf_efficientnet_b4_ns', channel_multiplier=1.4, depth_multiplier=1.8, pretrained=pretrained, **kwargs)
return model
|
def tf_efficientnet_b5_ns(pretrained=False, **kwargs):
' EfficientNet-B5 NoisyStudent. Tensorflow compatible variant\n Paper: Self-training with Noisy Student improves ImageNet classification (https://arxiv.org/abs/1911.04252)\n '
kwargs['bn_eps'] = BN_EPS_TF_DEFAULT
kwargs['pad_type'] = 'same'
model = _gen_efficientnet('tf_efficientnet_b5_ns', channel_multiplier=1.6, depth_multiplier=2.2, pretrained=pretrained, **kwargs)
return model
|
def tf_efficientnet_b6_ns(pretrained=False, **kwargs):
' EfficientNet-B6 NoisyStudent. Tensorflow compatible variant\n Paper: Self-training with Noisy Student improves ImageNet classification (https://arxiv.org/abs/1911.04252)\n '
kwargs['bn_eps'] = BN_EPS_TF_DEFAULT
kwargs['pad_type'] = 'same'
model = _gen_efficientnet('tf_efficientnet_b6_ns', channel_multiplier=1.8, depth_multiplier=2.6, pretrained=pretrained, **kwargs)
return model
|
def tf_efficientnet_b7_ns(pretrained=False, **kwargs):
' EfficientNet-B7 NoisyStudent. Tensorflow compatible variant\n Paper: Self-training with Noisy Student improves ImageNet classification (https://arxiv.org/abs/1911.04252)\n '
kwargs['bn_eps'] = BN_EPS_TF_DEFAULT
kwargs['pad_type'] = 'same'
model = _gen_efficientnet('tf_efficientnet_b7_ns', channel_multiplier=2.0, depth_multiplier=3.1, pretrained=pretrained, **kwargs)
return model
|
def tf_efficientnet_l2_ns_475(pretrained=False, **kwargs):
' EfficientNet-L2 NoisyStudent @ 475x475. Tensorflow compatible variant\n Paper: Self-training with Noisy Student improves ImageNet classification (https://arxiv.org/abs/1911.04252)\n '
kwargs['bn_eps'] = BN_EPS_TF_DEFAULT
kwargs['pad_type'] = 'same'
model = _gen_efficientnet('tf_efficientnet_l2_ns_475', channel_multiplier=4.3, depth_multiplier=5.3, pretrained=pretrained, **kwargs)
return model
|
def tf_efficientnet_l2_ns(pretrained=False, **kwargs):
' EfficientNet-L2 NoisyStudent. Tensorflow compatible variant\n Paper: Self-training with Noisy Student improves ImageNet classification (https://arxiv.org/abs/1911.04252)\n '
kwargs['bn_eps'] = BN_EPS_TF_DEFAULT
kwargs['pad_type'] = 'same'
model = _gen_efficientnet('tf_efficientnet_l2_ns', channel_multiplier=4.3, depth_multiplier=5.3, pretrained=pretrained, **kwargs)
return model
|
def tf_efficientnet_es(pretrained=False, **kwargs):
' EfficientNet-Edge Small. Tensorflow compatible variant '
kwargs['bn_eps'] = BN_EPS_TF_DEFAULT
kwargs['pad_type'] = 'same'
model = _gen_efficientnet_edge('tf_efficientnet_es', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs)
return model
|
def tf_efficientnet_em(pretrained=False, **kwargs):
' EfficientNet-Edge-Medium. Tensorflow compatible variant '
kwargs['bn_eps'] = BN_EPS_TF_DEFAULT
kwargs['pad_type'] = 'same'
model = _gen_efficientnet_edge('tf_efficientnet_em', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs)
return model
|
def tf_efficientnet_el(pretrained=False, **kwargs):
' EfficientNet-Edge-Large. Tensorflow compatible variant '
kwargs['bn_eps'] = BN_EPS_TF_DEFAULT
kwargs['pad_type'] = 'same'
model = _gen_efficientnet_edge('tf_efficientnet_el', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs)
return model
|
def tf_efficientnet_cc_b0_4e(pretrained=False, **kwargs):
' EfficientNet-CondConv-B0 w/ 4 Experts '
kwargs['bn_eps'] = BN_EPS_TF_DEFAULT
kwargs['pad_type'] = 'same'
model = _gen_efficientnet_condconv('tf_efficientnet_cc_b0_4e', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs)
return model
|
def tf_efficientnet_cc_b0_8e(pretrained=False, **kwargs):
' EfficientNet-CondConv-B0 w/ 8 Experts '
kwargs['bn_eps'] = BN_EPS_TF_DEFAULT
kwargs['pad_type'] = 'same'
model = _gen_efficientnet_condconv('tf_efficientnet_cc_b0_8e', channel_multiplier=1.0, depth_multiplier=1.0, experts_multiplier=2, pretrained=pretrained, **kwargs)
return model
|
def tf_efficientnet_cc_b1_8e(pretrained=False, **kwargs):
' EfficientNet-CondConv-B1 w/ 8 Experts '
kwargs['bn_eps'] = BN_EPS_TF_DEFAULT
kwargs['pad_type'] = 'same'
model = _gen_efficientnet_condconv('tf_efficientnet_cc_b1_8e', channel_multiplier=1.0, depth_multiplier=1.1, experts_multiplier=2, pretrained=pretrained, **kwargs)
return model
|
def tf_efficientnet_lite0(pretrained=False, **kwargs):
' EfficientNet-Lite0. Tensorflow compatible variant '
kwargs['bn_eps'] = BN_EPS_TF_DEFAULT
kwargs['pad_type'] = 'same'
model = _gen_efficientnet_lite('tf_efficientnet_lite0', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs)
return model
|
def tf_efficientnet_lite1(pretrained=False, **kwargs):
' EfficientNet-Lite1. Tensorflow compatible variant '
kwargs['bn_eps'] = BN_EPS_TF_DEFAULT
kwargs['pad_type'] = 'same'
model = _gen_efficientnet_lite('tf_efficientnet_lite1', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs)
return model
|
def tf_efficientnet_lite2(pretrained=False, **kwargs):
' EfficientNet-Lite2. Tensorflow compatible variant '
kwargs['bn_eps'] = BN_EPS_TF_DEFAULT
kwargs['pad_type'] = 'same'
model = _gen_efficientnet_lite('tf_efficientnet_lite2', channel_multiplier=1.1, depth_multiplier=1.2, pretrained=pretrained, **kwargs)
return model
|
def tf_efficientnet_lite3(pretrained=False, **kwargs):
' EfficientNet-Lite3. Tensorflow compatible variant '
kwargs['bn_eps'] = BN_EPS_TF_DEFAULT
kwargs['pad_type'] = 'same'
model = _gen_efficientnet_lite('tf_efficientnet_lite3', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs)
return model
|
def tf_efficientnet_lite4(pretrained=False, **kwargs):
' EfficientNet-Lite4. Tensorflow compatible variant '
kwargs['bn_eps'] = BN_EPS_TF_DEFAULT
kwargs['pad_type'] = 'same'
model = _gen_efficientnet_lite('tf_efficientnet_lite4', channel_multiplier=1.4, depth_multiplier=1.8, pretrained=pretrained, **kwargs)
return model
|
def mixnet_s(pretrained=False, **kwargs):
'Creates a MixNet Small model.\n '
model = _gen_mixnet_s('mixnet_s', channel_multiplier=1.0, pretrained=pretrained, **kwargs)
return model
|
def mixnet_m(pretrained=False, **kwargs):
'Creates a MixNet Medium model.\n '
model = _gen_mixnet_m('mixnet_m', channel_multiplier=1.0, pretrained=pretrained, **kwargs)
return model
|
def mixnet_l(pretrained=False, **kwargs):
'Creates a MixNet Large model.\n '
model = _gen_mixnet_m('mixnet_l', channel_multiplier=1.3, pretrained=pretrained, **kwargs)
return model
|
def mixnet_xl(pretrained=False, **kwargs):
'Creates a MixNet Extra-Large model.\n Not a paper spec, experimental def by RW w/ depth scaling.\n '
model = _gen_mixnet_m('mixnet_xl', channel_multiplier=1.6, depth_multiplier=1.2, pretrained=pretrained, **kwargs)
return model
|
def mixnet_xxl(pretrained=False, **kwargs):
'Creates a MixNet Double Extra Large model.\n Not a paper spec, experimental def by RW w/ depth scaling.\n '
model = _gen_mixnet_m('mixnet_xxl', channel_multiplier=2.4, depth_multiplier=1.3, pretrained=pretrained, **kwargs)
return model
|
def tf_mixnet_s(pretrained=False, **kwargs):
'Creates a MixNet Small model. Tensorflow compatible variant\n '
kwargs['bn_eps'] = BN_EPS_TF_DEFAULT
kwargs['pad_type'] = 'same'
model = _gen_mixnet_s('tf_mixnet_s', channel_multiplier=1.0, pretrained=pretrained, **kwargs)
return model
|
def tf_mixnet_m(pretrained=False, **kwargs):
'Creates a MixNet Medium model. Tensorflow compatible variant\n '
kwargs['bn_eps'] = BN_EPS_TF_DEFAULT
kwargs['pad_type'] = 'same'
model = _gen_mixnet_m('tf_mixnet_m', channel_multiplier=1.0, pretrained=pretrained, **kwargs)
return model
|
def tf_mixnet_l(pretrained=False, **kwargs):
'Creates a MixNet Large model. Tensorflow compatible variant\n '
kwargs['bn_eps'] = BN_EPS_TF_DEFAULT
kwargs['pad_type'] = 'same'
model = _gen_mixnet_m('tf_mixnet_l', channel_multiplier=1.3, pretrained=pretrained, **kwargs)
return model
|
def load_checkpoint(model, checkpoint_path):
if (checkpoint_path and os.path.isfile(checkpoint_path)):
print("=> Loading checkpoint '{}'".format(checkpoint_path))
checkpoint = torch.load(checkpoint_path)
if (isinstance(checkpoint, dict) and ('state_dict' in checkpoint)):
new_state_dict = OrderedDict()
for (k, v) in checkpoint['state_dict'].items():
if k.startswith('module'):
name = k[7:]
else:
name = k
new_state_dict[name] = v
model.load_state_dict(new_state_dict)
else:
model.load_state_dict(checkpoint)
print("=> Loaded checkpoint '{}'".format(checkpoint_path))
else:
print("=> Error: No checkpoint found at '{}'".format(checkpoint_path))
raise FileNotFoundError()
|
def load_pretrained(model, url, filter_fn=None, strict=True):
if (not url):
print('=> Warning: Pretrained model URL is empty, using random initialization.')
return
state_dict = load_state_dict_from_url(url, progress=False, map_location='cpu')
input_conv = 'conv_stem'
classifier = 'classifier'
in_chans = getattr(model, input_conv).weight.shape[1]
num_classes = getattr(model, classifier).weight.shape[0]
input_conv_weight = (input_conv + '.weight')
pretrained_in_chans = state_dict[input_conv_weight].shape[1]
if (in_chans != pretrained_in_chans):
if (in_chans == 1):
print('=> Converting pretrained input conv {} from {} to 1 channel'.format(input_conv_weight, pretrained_in_chans))
conv1_weight = state_dict[input_conv_weight]
state_dict[input_conv_weight] = conv1_weight.sum(dim=1, keepdim=True)
else:
print('=> Discarding pretrained input conv {} since input channel count != {}'.format(input_conv_weight, pretrained_in_chans))
del state_dict[input_conv_weight]
strict = False
classifier_weight = (classifier + '.weight')
pretrained_num_classes = state_dict[classifier_weight].shape[0]
if (num_classes != pretrained_num_classes):
print('=> Discarding pretrained classifier since num_classes != {}'.format(pretrained_num_classes))
del state_dict[classifier_weight]
del state_dict[(classifier + '.bias')]
strict = False
if (filter_fn is not None):
state_dict = filter_fn(state_dict)
model.load_state_dict(state_dict, strict=strict)
|
class MobileNetV3(nn.Module):
" MobileNet-V3\n\n A this model utilizes the MobileNet-v3 specific 'efficient head', where global pooling is done before the\n head convolution without a final batch-norm layer before the classifier.\n\n Paper: https://arxiv.org/abs/1905.02244\n "
def __init__(self, block_args, num_classes=1000, in_chans=3, stem_size=16, num_features=1280, head_bias=True, channel_multiplier=1.0, pad_type='', act_layer=HardSwish, drop_rate=0.0, drop_connect_rate=0.0, se_kwargs=None, norm_layer=nn.BatchNorm2d, norm_kwargs=None, weight_init='goog'):
super(MobileNetV3, self).__init__()
self.drop_rate = drop_rate
stem_size = round_channels(stem_size, channel_multiplier)
self.conv_stem = select_conv2d(in_chans, stem_size, 3, stride=2, padding=pad_type)
self.bn1 = nn.BatchNorm2d(stem_size, **norm_kwargs)
self.act1 = act_layer(inplace=True)
in_chs = stem_size
builder = EfficientNetBuilder(channel_multiplier, pad_type=pad_type, act_layer=act_layer, se_kwargs=se_kwargs, norm_layer=norm_layer, norm_kwargs=norm_kwargs, drop_connect_rate=drop_connect_rate)
self.blocks = nn.Sequential(*builder(in_chs, block_args))
in_chs = builder.in_chs
self.global_pool = nn.AdaptiveAvgPool2d(1)
self.conv_head = select_conv2d(in_chs, num_features, 1, padding=pad_type, bias=head_bias)
self.act2 = act_layer(inplace=True)
self.classifier = nn.Linear(num_features, num_classes)
for m in self.modules():
if (weight_init == 'goog'):
initialize_weight_goog(m)
else:
initialize_weight_default(m)
def as_sequential(self):
layers = [self.conv_stem, self.bn1, self.act1]
layers.extend(self.blocks)
layers.extend([self.global_pool, self.conv_head, self.act2, nn.Flatten(), nn.Dropout(self.drop_rate), self.classifier])
return nn.Sequential(*layers)
def features(self, x):
x = self.conv_stem(x)
x = self.bn1(x)
x = self.act1(x)
x = self.blocks(x)
x = self.global_pool(x)
x = self.conv_head(x)
x = self.act2(x)
return x
def forward(self, x):
x = self.features(x)
x = x.flatten(1)
if (self.drop_rate > 0.0):
x = F.dropout(x, p=self.drop_rate, training=self.training)
return self.classifier(x)
|
def _create_model(model_kwargs, variant, pretrained=False):
as_sequential = model_kwargs.pop('as_sequential', False)
model = MobileNetV3(**model_kwargs)
if (pretrained and model_urls[variant]):
load_pretrained(model, model_urls[variant])
if as_sequential:
model = model.as_sequential()
return model
|
def _gen_mobilenet_v3_rw(variant, channel_multiplier=1.0, pretrained=False, **kwargs):
'Creates a MobileNet-V3 model (RW variant).\n\n Paper: https://arxiv.org/abs/1905.02244\n\n This was my first attempt at reproducing the MobileNet-V3 from paper alone. It came close to the\n eventual Tensorflow reference impl but has a few differences:\n 1. This model has no bias on the head convolution\n 2. This model forces no residual (noskip) on the first DWS block, this is different than MnasNet\n 3. This model always uses ReLU for the SE activation layer, other models in the family inherit their act layer\n from their parent block\n 4. This model does not enforce divisible by 8 limitation on the SE reduction channel count\n\n Overall the changes are fairly minor and result in a very small parameter count difference and no\n top-1/5\n\n Args:\n channel_multiplier: multiplier to number of channels per layer.\n '
arch_def = [['ds_r1_k3_s1_e1_c16_nre_noskip'], ['ir_r1_k3_s2_e4_c24_nre', 'ir_r1_k3_s1_e3_c24_nre'], ['ir_r3_k5_s2_e3_c40_se0.25_nre'], ['ir_r1_k3_s2_e6_c80', 'ir_r1_k3_s1_e2.5_c80', 'ir_r2_k3_s1_e2.3_c80'], ['ir_r2_k3_s1_e6_c112_se0.25'], ['ir_r3_k5_s2_e6_c160_se0.25'], ['cn_r1_k1_s1_c960']]
with layer_config_kwargs(kwargs):
model_kwargs = dict(block_args=decode_arch_def(arch_def), head_bias=False, channel_multiplier=channel_multiplier, act_layer=resolve_act_layer(kwargs, 'hard_swish'), se_kwargs=dict(gate_fn=get_act_fn('hard_sigmoid'), reduce_mid=True), norm_kwargs=resolve_bn_args(kwargs), **kwargs)
model = _create_model(model_kwargs, variant, pretrained)
return model
|
def _gen_mobilenet_v3(variant, channel_multiplier=1.0, pretrained=False, **kwargs):
'Creates a MobileNet-V3 large/small/minimal models.\n\n Ref impl: https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet_v3.py\n Paper: https://arxiv.org/abs/1905.02244\n\n Args:\n channel_multiplier: multiplier to number of channels per layer.\n '
if ('small' in variant):
num_features = 1024
if ('minimal' in variant):
act_layer = 'relu'
arch_def = [['ds_r1_k3_s2_e1_c16'], ['ir_r1_k3_s2_e4.5_c24', 'ir_r1_k3_s1_e3.67_c24'], ['ir_r1_k3_s2_e4_c40', 'ir_r2_k3_s1_e6_c40'], ['ir_r2_k3_s1_e3_c48'], ['ir_r3_k3_s2_e6_c96'], ['cn_r1_k1_s1_c576']]
else:
act_layer = 'hard_swish'
arch_def = [['ds_r1_k3_s2_e1_c16_se0.25_nre'], ['ir_r1_k3_s2_e4.5_c24_nre', 'ir_r1_k3_s1_e3.67_c24_nre'], ['ir_r1_k5_s2_e4_c40_se0.25', 'ir_r2_k5_s1_e6_c40_se0.25'], ['ir_r2_k5_s1_e3_c48_se0.25'], ['ir_r3_k5_s2_e6_c96_se0.25'], ['cn_r1_k1_s1_c576']]
else:
num_features = 1280
if ('minimal' in variant):
act_layer = 'relu'
arch_def = [['ds_r1_k3_s1_e1_c16'], ['ir_r1_k3_s2_e4_c24', 'ir_r1_k3_s1_e3_c24'], ['ir_r3_k3_s2_e3_c40'], ['ir_r1_k3_s2_e6_c80', 'ir_r1_k3_s1_e2.5_c80', 'ir_r2_k3_s1_e2.3_c80'], ['ir_r2_k3_s1_e6_c112'], ['ir_r3_k3_s2_e6_c160'], ['cn_r1_k1_s1_c960']]
else:
act_layer = 'hard_swish'
arch_def = [['ds_r1_k3_s1_e1_c16_nre'], ['ir_r1_k3_s2_e4_c24_nre', 'ir_r1_k3_s1_e3_c24_nre'], ['ir_r3_k5_s2_e3_c40_se0.25_nre'], ['ir_r1_k3_s2_e6_c80', 'ir_r1_k3_s1_e2.5_c80', 'ir_r2_k3_s1_e2.3_c80'], ['ir_r2_k3_s1_e6_c112_se0.25'], ['ir_r3_k5_s2_e6_c160_se0.25'], ['cn_r1_k1_s1_c960']]
with layer_config_kwargs(kwargs):
model_kwargs = dict(block_args=decode_arch_def(arch_def), num_features=num_features, stem_size=16, channel_multiplier=channel_multiplier, act_layer=resolve_act_layer(kwargs, act_layer), se_kwargs=dict(act_layer=get_act_layer('relu'), gate_fn=get_act_fn('hard_sigmoid'), reduce_mid=True, divisor=8), norm_kwargs=resolve_bn_args(kwargs), **kwargs)
model = _create_model(model_kwargs, variant, pretrained)
return model
|
def mobilenetv3_rw(pretrained=False, **kwargs):
' MobileNet-V3 RW\n Attn: See note in gen function for this variant.\n '
if pretrained:
kwargs['bn_eps'] = BN_EPS_TF_DEFAULT
model = _gen_mobilenet_v3_rw('mobilenetv3_rw', 1.0, pretrained=pretrained, **kwargs)
return model
|
def mobilenetv3_large_075(pretrained=False, **kwargs):
' MobileNet V3 Large 0.75'
model = _gen_mobilenet_v3('mobilenetv3_large_075', 0.75, pretrained=pretrained, **kwargs)
return model
|
def mobilenetv3_large_100(pretrained=False, **kwargs):
' MobileNet V3 Large 1.0 '
model = _gen_mobilenet_v3('mobilenetv3_large_100', 1.0, pretrained=pretrained, **kwargs)
return model
|
def mobilenetv3_large_minimal_100(pretrained=False, **kwargs):
' MobileNet V3 Large (Minimalistic) 1.0 '
model = _gen_mobilenet_v3('mobilenetv3_large_minimal_100', 1.0, pretrained=pretrained, **kwargs)
return model
|
def mobilenetv3_small_075(pretrained=False, **kwargs):
' MobileNet V3 Small 0.75 '
model = _gen_mobilenet_v3('mobilenetv3_small_075', 0.75, pretrained=pretrained, **kwargs)
return model
|
def mobilenetv3_small_100(pretrained=False, **kwargs):
' MobileNet V3 Small 1.0 '
model = _gen_mobilenet_v3('mobilenetv3_small_100', 1.0, pretrained=pretrained, **kwargs)
return model
|
def mobilenetv3_small_minimal_100(pretrained=False, **kwargs):
' MobileNet V3 Small (Minimalistic) 1.0 '
model = _gen_mobilenet_v3('mobilenetv3_small_minimal_100', 1.0, pretrained=pretrained, **kwargs)
return model
|
def tf_mobilenetv3_large_075(pretrained=False, **kwargs):
' MobileNet V3 Large 0.75. Tensorflow compat variant. '
kwargs['bn_eps'] = BN_EPS_TF_DEFAULT
kwargs['pad_type'] = 'same'
model = _gen_mobilenet_v3('tf_mobilenetv3_large_075', 0.75, pretrained=pretrained, **kwargs)
return model
|
def tf_mobilenetv3_large_100(pretrained=False, **kwargs):
' MobileNet V3 Large 1.0. Tensorflow compat variant. '
kwargs['bn_eps'] = BN_EPS_TF_DEFAULT
kwargs['pad_type'] = 'same'
model = _gen_mobilenet_v3('tf_mobilenetv3_large_100', 1.0, pretrained=pretrained, **kwargs)
return model
|
def tf_mobilenetv3_large_minimal_100(pretrained=False, **kwargs):
' MobileNet V3 Large Minimalistic 1.0. Tensorflow compat variant. '
kwargs['bn_eps'] = BN_EPS_TF_DEFAULT
kwargs['pad_type'] = 'same'
model = _gen_mobilenet_v3('tf_mobilenetv3_large_minimal_100', 1.0, pretrained=pretrained, **kwargs)
return model
|
def tf_mobilenetv3_small_075(pretrained=False, **kwargs):
' MobileNet V3 Small 0.75. Tensorflow compat variant. '
kwargs['bn_eps'] = BN_EPS_TF_DEFAULT
kwargs['pad_type'] = 'same'
model = _gen_mobilenet_v3('tf_mobilenetv3_small_075', 0.75, pretrained=pretrained, **kwargs)
return model
|
def tf_mobilenetv3_small_100(pretrained=False, **kwargs):
' MobileNet V3 Small 1.0. Tensorflow compat variant.'
kwargs['bn_eps'] = BN_EPS_TF_DEFAULT
kwargs['pad_type'] = 'same'
model = _gen_mobilenet_v3('tf_mobilenetv3_small_100', 1.0, pretrained=pretrained, **kwargs)
return model
|
def tf_mobilenetv3_small_minimal_100(pretrained=False, **kwargs):
' MobileNet V3 Small Minimalistic 1.0. Tensorflow compat variant. '
kwargs['bn_eps'] = BN_EPS_TF_DEFAULT
kwargs['pad_type'] = 'same'
model = _gen_mobilenet_v3('tf_mobilenetv3_small_minimal_100', 1.0, pretrained=pretrained, **kwargs)
return model
|
def create_model(model_name='mnasnet_100', pretrained=None, num_classes=1000, in_chans=3, checkpoint_path='', **kwargs):
model_kwargs = dict(num_classes=num_classes, in_chans=in_chans, pretrained=pretrained, **kwargs)
if (model_name in globals()):
create_fn = globals()[model_name]
model = create_fn(**model_kwargs)
else:
raise RuntimeError(('Unknown model (%s)' % model_name))
if (checkpoint_path and (not pretrained)):
load_checkpoint(model, checkpoint_path)
return model
|
def main():
args = parser.parse_args()
args.pretrained = True
if args.checkpoint:
args.pretrained = False
print('==> Creating PyTorch {} model'.format(args.model))
model = geffnet.create_model(args.model, num_classes=args.num_classes, in_chans=3, pretrained=args.pretrained, checkpoint_path=args.checkpoint, exportable=True)
model.eval()
example_input = torch.randn((args.batch_size, 3, (args.img_size or 224), (args.img_size or 224)), requires_grad=True)
model(example_input)
print("==> Exporting model to ONNX format at '{}'".format(args.output))
input_names = ['input0']
output_names = ['output0']
dynamic_axes = {'input0': {0: 'batch'}, 'output0': {0: 'batch'}}
if args.dynamic_size:
dynamic_axes['input0'][2] = 'height'
dynamic_axes['input0'][3] = 'width'
if args.aten_fallback:
export_type = torch.onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK
else:
export_type = torch.onnx.OperatorExportTypes.ONNX
torch_out = torch.onnx._export(model, example_input, args.output, export_params=True, verbose=True, input_names=input_names, output_names=output_names, keep_initializers_as_inputs=args.keep_init, dynamic_axes=dynamic_axes, opset_version=args.opset, operator_export_type=export_type)
print("==> Loading and checking exported model from '{}'".format(args.output))
onnx_model = onnx.load(args.output)
onnx.checker.check_model(onnx_model)
print('==> Passed')
if (args.keep_init and args.aten_fallback):
import caffe2.python.onnx.backend as onnx_caffe2
print('==> Loading model into Caffe2 backend and comparing forward pass.'.format(args.output))
caffe2_backend = onnx_caffe2.prepare(onnx_model)
B = {onnx_model.graph.input[0].name: x.data.numpy()}
c2_out = caffe2_backend.run(B)[0]
np.testing.assert_almost_equal(torch_out.data.numpy(), c2_out, decimal=5)
print('==> Passed')
|
def traverse_graph(graph, prefix=''):
content = []
indent = (prefix + ' ')
graphs = []
num_nodes = 0
for node in graph.node:
(pn, gs) = onnx.helper.printable_node(node, indent, subgraphs=True)
assert isinstance(gs, list)
content.append(pn)
graphs.extend(gs)
num_nodes += 1
for g in graphs:
(g_count, g_str) = traverse_graph(g)
content.append(('\n' + g_str))
num_nodes += g_count
return (num_nodes, '\n'.join(content))
|
def main():
args = parser.parse_args()
onnx_model = onnx.load(args.model)
(num_original_nodes, original_graph_str) = traverse_graph(onnx_model.graph)
passes = ['eliminate_identity', 'eliminate_nop_dropout', 'eliminate_nop_pad', 'eliminate_nop_transpose', 'eliminate_unused_initializer', 'extract_constant_to_initializer', 'fuse_add_bias_into_conv', 'fuse_bn_into_conv', 'fuse_consecutive_concats', 'fuse_consecutive_reduce_unsqueeze', 'fuse_consecutive_squeezes', 'fuse_consecutive_transposes', 'fuse_pad_into_conv']
warnings.warn("I've had issues with optimizer in recent versions of PyTorch / ONNX.Try onnxruntime optimization if this doesn't work.")
optimized_model = optimizer.optimize(onnx_model, passes)
(num_optimized_nodes, optimzied_graph_str) = traverse_graph(optimized_model.graph)
print('==> The model after optimization:\n{}\n'.format(optimzied_graph_str))
print('==> The optimized model has {} nodes, the original had {}.'.format(num_optimized_nodes, num_original_nodes))
onnx.save(optimized_model, args.output)
|
def main():
args = parser.parse_args()
onnx_model = onnx.load(args.model)
(caffe2_init, caffe2_predict) = Caffe2Backend.onnx_graph_to_caffe2_net(onnx_model)
caffe2_init_str = caffe2_init.SerializeToString()
with open((args.c2_prefix + '.init.pb'), 'wb') as f:
f.write(caffe2_init_str)
caffe2_predict_str = caffe2_predict.SerializeToString()
with open((args.c2_prefix + '.predict.pb'), 'wb') as f:
f.write(caffe2_predict_str)
|
class AverageMeter():
'Computes and stores the average and current value'
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += (val * n)
self.count += n
self.avg = (self.sum / self.count)
|
def accuracy(output, target, topk=(1,)):
'Computes the precision@k for the specified values of k'
maxk = max(topk)
batch_size = target.size(0)
(_, pred) = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, (- 1)).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].reshape((- 1)).float().sum(0)
res.append(correct_k.mul_((100.0 / batch_size)))
return res
|
def get_outdir(path, *paths, inc=False):
outdir = os.path.join(path, *paths)
if (not os.path.exists(outdir)):
os.makedirs(outdir)
elif inc:
count = 1
outdir_inc = ((outdir + '-') + str(count))
while os.path.exists(outdir_inc):
count = (count + 1)
outdir_inc = ((outdir + '-') + str(count))
assert (count < 100)
outdir = outdir_inc
os.makedirs(outdir)
return outdir
|
def main():
args = parser.parse_args()
if ((not args.checkpoint) and (not args.pretrained)):
args.pretrained = True
amp_autocast = suppress
if args.amp:
if (not has_native_amp):
print('Native Torch AMP is not available (requires torch >= 1.6), using FP32.')
else:
amp_autocast = torch.cuda.amp.autocast
model = geffnet.create_model(args.model, num_classes=args.num_classes, in_chans=3, pretrained=args.pretrained, checkpoint_path=args.checkpoint, scriptable=args.torchscript)
if args.channels_last:
model = model.to(memory_format=torch.channels_last)
if args.torchscript:
torch.jit.optimized_execution(True)
model = torch.jit.script(model)
print(('Model %s created, param count: %d' % (args.model, sum([m.numel() for m in model.parameters()]))))
data_config = resolve_data_config(model, args)
criterion = nn.CrossEntropyLoss()
if (not args.no_cuda):
if (args.num_gpu > 1):
model = torch.nn.DataParallel(model, device_ids=list(range(args.num_gpu))).cuda()
else:
model = model.cuda()
criterion = criterion.cuda()
loader = create_loader(Dataset(args.data, load_bytes=args.tf_preprocessing), input_size=data_config['input_size'], batch_size=args.batch_size, use_prefetcher=(not args.no_cuda), interpolation=data_config['interpolation'], mean=data_config['mean'], std=data_config['std'], num_workers=args.workers, crop_pct=data_config['crop_pct'], tensorflow_preprocessing=args.tf_preprocessing)
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
model.eval()
end = time.time()
with torch.no_grad():
for (i, (input, target)) in enumerate(loader):
if (not args.no_cuda):
target = target.cuda()
input = input.cuda()
if args.channels_last:
input = input.contiguous(memory_format=torch.channels_last)
with amp_autocast():
output = model(input)
loss = criterion(output, target)
(prec1, prec5) = accuracy(output.data, target, topk=(1, 5))
losses.update(loss.item(), input.size(0))
top1.update(prec1.item(), input.size(0))
top5.update(prec5.item(), input.size(0))
batch_time.update((time.time() - end))
end = time.time()
if ((i % args.print_freq) == 0):
print('Test: [{0}/{1}]\tTime {batch_time.val:.3f} ({batch_time.avg:.3f}, {rate_avg:.3f}/s) \tLoss {loss.val:.4f} ({loss.avg:.4f})\tPrec@1 {top1.val:.3f} ({top1.avg:.3f})\tPrec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(i, len(loader), batch_time=batch_time, rate_avg=(input.size(0) / batch_time.avg), loss=losses, top1=top1, top5=top5))
print(' * Prec@1 {top1.avg:.3f} ({top1a:.3f}) Prec@5 {top5.avg:.3f} ({top5a:.3f})'.format(top1=top1, top1a=(100 - top1.avg), top5=top5, top5a=(100.0 - top5.avg)))
|
def mvtec_classes():
return ['bottle', 'cable', 'capsule', 'carpet', 'grid', 'hazelnut', 'leather', 'metal_nut', 'pill', 'screw', 'tile', 'toothbrush', 'transistor', 'wood', 'zipper']
|
class MVTecDataset():
def __init__(self, cls: str, size: int=224):
self.cls = cls
self.size = size
if (cls in mvtec_classes()):
self._download()
self.train_ds = MVTecTrainDataset(cls, size)
self.test_ds = MVTecTestDataset(cls, size)
def _download(self):
if (not isdir((DATASETS_PATH / self.cls))):
print(f" Could not find '{self.cls}' in '{DATASETS_PATH}/'. Downloading ... ")
url = f'https://www.mydrive.ch/shares/38536/3830184030e49fe74747669442f0f282/download/420938134-1629953256/{self.cls}.tar.xz'
wget.download(url)
with tarfile.open(f'{self.cls}.tar.xz') as tar:
tar.extractall(DATASETS_PATH)
os.remove(f'{self.cls}.tar.xz')
print('')
else:
print(f''' Found '{self.cls}' in '{DATASETS_PATH}/'
''')
def get_datasets(self):
return (self.train_ds, self.test_ds)
def get_dataloaders(self):
return (DataLoader(self.train_ds), DataLoader(self.test_ds))
|
class MVTecTrainDataset(ImageFolder):
def __init__(self, cls: str, size: int):
super().__init__(root=((DATASETS_PATH / cls) / 'train'), transform=transforms.Compose([transforms.Resize(256, interpolation=transforms.InterpolationMode.BICUBIC), transforms.CenterCrop(size), transforms.ToTensor(), transforms.Normalize(IMAGENET_MEAN, IMAGENET_STD)]))
self.cls = cls
self.size = size
|
class MVTecTestDataset(ImageFolder):
def __init__(self, cls: str, size: int):
super().__init__(root=((DATASETS_PATH / cls) / 'test'), transform=transforms.Compose([transforms.Resize(256, interpolation=transforms.InterpolationMode.BICUBIC), transforms.CenterCrop(size), transforms.ToTensor(), transforms.Normalize(IMAGENET_MEAN, IMAGENET_STD)]), target_transform=transforms.Compose([transforms.Resize(256, interpolation=transforms.InterpolationMode.NEAREST), transforms.CenterCrop(size), transforms.ToTensor()]))
self.cls = cls
self.size = size
def __getitem__(self, index):
(path, _) = self.samples[index]
sample = self.loader(path)
if ('good' in path):
target = Image.new('L', (self.size, self.size))
sample_class = 0
else:
target_path = path.replace('test', 'ground_truth')
target_path = target_path.replace('.png', '_mask.png')
target = self.loader(target_path)
sample_class = 1
if (self.transform is not None):
sample = self.transform(sample)
if (self.target_transform is not None):
target = self.target_transform(target)
return (sample, target[:1], sample_class)
|
class StreamingDataset():
'This dataset is made specifically for the streamlit app.'
def __init__(self, size: int=224):
self.size = size
self.transform = transforms.Compose([transforms.Resize(256, interpolation=transforms.InterpolationMode.BICUBIC), transforms.CenterCrop(size), transforms.ToTensor(), transforms.Normalize(IMAGENET_MEAN, IMAGENET_STD)])
self.samples = []
def add_pil_image(self, image: Image):
image = image.convert('RGB')
self.samples.append(image)
def __len__(self):
return len(self.samples)
def __getitem__(self, index):
sample = self.samples[index]
return (self.transform(sample), tensor(0.0))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.