code stringlengths 17 6.64M |
|---|
def efficientnet_cc_b0_4e(pretrained=False, **kwargs):
' EfficientNet-CondConv-B0 w/ 8 Experts '
model = _gen_efficientnet_condconv('efficientnet_cc_b0_4e', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs)
return model
|
def efficientnet_cc_b0_8e(pretrained=False, **kwargs):
' EfficientNet-CondConv-B0 w/ 8 Experts '
model = _gen_efficientnet_condconv('efficientnet_cc_b0_8e', channel_multiplier=1.0, depth_multiplier=1.0, experts_multiplier=2, pretrained=pretrained, **kwargs)
return model
|
def efficientnet_cc_b1_8e(pretrained=False, **kwargs):
' EfficientNet-CondConv-B1 w/ 8 Experts '
model = _gen_efficientnet_condconv('efficientnet_cc_b1_8e', channel_multiplier=1.0, depth_multiplier=1.1, experts_multiplier=2, pretrained=pretrained, **kwargs)
return model
|
def efficientnet_lite0(pretrained=False, **kwargs):
' EfficientNet-Lite0 '
model = _gen_efficientnet_lite('efficientnet_lite0', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs)
return model
|
def efficientnet_lite1(pretrained=False, **kwargs):
' EfficientNet-Lite1 '
model = _gen_efficientnet_lite('efficientnet_lite1', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs)
return model
|
def efficientnet_lite2(pretrained=False, **kwargs):
' EfficientNet-Lite2 '
model = _gen_efficientnet_lite('efficientnet_lite2', channel_multiplier=1.1, depth_multiplier=1.2, pretrained=pretrained, **kwargs)
return model
|
def efficientnet_lite3(pretrained=False, **kwargs):
' EfficientNet-Lite3 '
model = _gen_efficientnet_lite('efficientnet_lite3', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs)
return model
|
def efficientnet_lite4(pretrained=False, **kwargs):
' EfficientNet-Lite4 '
model = _gen_efficientnet_lite('efficientnet_lite4', channel_multiplier=1.4, depth_multiplier=1.8, pretrained=pretrained, **kwargs)
return model
|
def tf_efficientnet_b0(pretrained=False, **kwargs):
' EfficientNet-B0 AutoAug. Tensorflow compatible variant '
kwargs['bn_eps'] = BN_EPS_TF_DEFAULT
kwargs['pad_type'] = 'same'
model = _gen_efficientnet('tf_efficientnet_b0', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs)
return model
|
def tf_efficientnet_b1(pretrained=False, **kwargs):
' EfficientNet-B1 AutoAug. Tensorflow compatible variant '
kwargs['bn_eps'] = BN_EPS_TF_DEFAULT
kwargs['pad_type'] = 'same'
model = _gen_efficientnet('tf_efficientnet_b1', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs)
return model
|
def tf_efficientnet_b2(pretrained=False, **kwargs):
' EfficientNet-B2 AutoAug. Tensorflow compatible variant '
kwargs['bn_eps'] = BN_EPS_TF_DEFAULT
kwargs['pad_type'] = 'same'
model = _gen_efficientnet('tf_efficientnet_b2', channel_multiplier=1.1, depth_multiplier=1.2, pretrained=pretrained, **kwargs)
return model
|
def tf_efficientnet_b3(pretrained=False, **kwargs):
' EfficientNet-B3 AutoAug. Tensorflow compatible variant '
kwargs['bn_eps'] = BN_EPS_TF_DEFAULT
kwargs['pad_type'] = 'same'
model = _gen_efficientnet('tf_efficientnet_b3', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs)
return model
|
def tf_efficientnet_b4(pretrained=False, **kwargs):
' EfficientNet-B4 AutoAug. Tensorflow compatible variant '
kwargs['bn_eps'] = BN_EPS_TF_DEFAULT
kwargs['pad_type'] = 'same'
model = _gen_efficientnet('tf_efficientnet_b4', channel_multiplier=1.4, depth_multiplier=1.8, pretrained=pretrained, **kwargs)
return model
|
def tf_efficientnet_b5(pretrained=False, **kwargs):
' EfficientNet-B5 RandAug. Tensorflow compatible variant '
kwargs['bn_eps'] = BN_EPS_TF_DEFAULT
kwargs['pad_type'] = 'same'
model = _gen_efficientnet('tf_efficientnet_b5', channel_multiplier=1.6, depth_multiplier=2.2, pretrained=pretrained, **kwargs)
return model
|
def tf_efficientnet_b6(pretrained=False, **kwargs):
' EfficientNet-B6 AutoAug. Tensorflow compatible variant '
kwargs['bn_eps'] = BN_EPS_TF_DEFAULT
kwargs['pad_type'] = 'same'
model = _gen_efficientnet('tf_efficientnet_b6', channel_multiplier=1.8, depth_multiplier=2.6, pretrained=pretrained, **kwargs)
return model
|
def tf_efficientnet_b7(pretrained=False, **kwargs):
' EfficientNet-B7 RandAug. Tensorflow compatible variant '
kwargs['bn_eps'] = BN_EPS_TF_DEFAULT
kwargs['pad_type'] = 'same'
model = _gen_efficientnet('tf_efficientnet_b7', channel_multiplier=2.0, depth_multiplier=3.1, pretrained=pretrained, **kwargs)
return model
|
def tf_efficientnet_b8(pretrained=False, **kwargs):
' EfficientNet-B8 RandAug. Tensorflow compatible variant '
kwargs['bn_eps'] = BN_EPS_TF_DEFAULT
kwargs['pad_type'] = 'same'
model = _gen_efficientnet('tf_efficientnet_b8', channel_multiplier=2.2, depth_multiplier=3.6, pretrained=pretrained, **kwargs)
return model
|
def tf_efficientnet_b0_ap(pretrained=False, **kwargs):
' EfficientNet-B0 AdvProp. Tensorflow compatible variant\n Paper: Adversarial Examples Improve Image Recognition (https://arxiv.org/abs/1911.09665)\n '
kwargs['bn_eps'] = BN_EPS_TF_DEFAULT
kwargs['pad_type'] = 'same'
model = _gen_efficientnet('tf_efficientnet_b0_ap', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs)
return model
|
def tf_efficientnet_b1_ap(pretrained=False, **kwargs):
' EfficientNet-B1 AdvProp. Tensorflow compatible variant\n Paper: Adversarial Examples Improve Image Recognition (https://arxiv.org/abs/1911.09665)\n '
kwargs['bn_eps'] = BN_EPS_TF_DEFAULT
kwargs['pad_type'] = 'same'
model = _gen_efficientnet('tf_efficientnet_b1_ap', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs)
return model
|
def tf_efficientnet_b2_ap(pretrained=False, **kwargs):
' EfficientNet-B2 AdvProp. Tensorflow compatible variant\n Paper: Adversarial Examples Improve Image Recognition (https://arxiv.org/abs/1911.09665)\n '
kwargs['bn_eps'] = BN_EPS_TF_DEFAULT
kwargs['pad_type'] = 'same'
model = _gen_efficientnet('tf_efficientnet_b2_ap', channel_multiplier=1.1, depth_multiplier=1.2, pretrained=pretrained, **kwargs)
return model
|
def tf_efficientnet_b3_ap(pretrained=False, **kwargs):
' EfficientNet-B3 AdvProp. Tensorflow compatible variant\n Paper: Adversarial Examples Improve Image Recognition (https://arxiv.org/abs/1911.09665)\n '
kwargs['bn_eps'] = BN_EPS_TF_DEFAULT
kwargs['pad_type'] = 'same'
model = _gen_efficientnet('tf_efficientnet_b3_ap', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs)
return model
|
def tf_efficientnet_b4_ap(pretrained=False, **kwargs):
' EfficientNet-B4 AdvProp. Tensorflow compatible variant\n Paper: Adversarial Examples Improve Image Recognition (https://arxiv.org/abs/1911.09665)\n '
kwargs['bn_eps'] = BN_EPS_TF_DEFAULT
kwargs['pad_type'] = 'same'
model = _gen_efficientnet('tf_efficientnet_b4_ap', channel_multiplier=1.4, depth_multiplier=1.8, pretrained=pretrained, **kwargs)
return model
|
def tf_efficientnet_b5_ap(pretrained=False, **kwargs):
' EfficientNet-B5 AdvProp. Tensorflow compatible variant\n Paper: Adversarial Examples Improve Image Recognition (https://arxiv.org/abs/1911.09665)\n '
kwargs['bn_eps'] = BN_EPS_TF_DEFAULT
kwargs['pad_type'] = 'same'
model = _gen_efficientnet('tf_efficientnet_b5_ap', channel_multiplier=1.6, depth_multiplier=2.2, pretrained=pretrained, **kwargs)
return model
|
def tf_efficientnet_b6_ap(pretrained=False, **kwargs):
' EfficientNet-B6 AdvProp. Tensorflow compatible variant\n Paper: Adversarial Examples Improve Image Recognition (https://arxiv.org/abs/1911.09665)\n '
kwargs['bn_eps'] = BN_EPS_TF_DEFAULT
kwargs['pad_type'] = 'same'
model = _gen_efficientnet('tf_efficientnet_b6_ap', channel_multiplier=1.8, depth_multiplier=2.6, pretrained=pretrained, **kwargs)
return model
|
def tf_efficientnet_b7_ap(pretrained=False, **kwargs):
' EfficientNet-B7 AdvProp. Tensorflow compatible variant\n Paper: Adversarial Examples Improve Image Recognition (https://arxiv.org/abs/1911.09665)\n '
kwargs['bn_eps'] = BN_EPS_TF_DEFAULT
kwargs['pad_type'] = 'same'
model = _gen_efficientnet('tf_efficientnet_b7_ap', channel_multiplier=2.0, depth_multiplier=3.1, pretrained=pretrained, **kwargs)
return model
|
def tf_efficientnet_b8_ap(pretrained=False, **kwargs):
' EfficientNet-B8 AdvProp. Tensorflow compatible variant\n Paper: Adversarial Examples Improve Image Recognition (https://arxiv.org/abs/1911.09665)\n '
kwargs['bn_eps'] = BN_EPS_TF_DEFAULT
kwargs['pad_type'] = 'same'
model = _gen_efficientnet('tf_efficientnet_b8_ap', channel_multiplier=2.2, depth_multiplier=3.6, pretrained=pretrained, **kwargs)
return model
|
def tf_efficientnet_b0_ns(pretrained=False, **kwargs):
' EfficientNet-B0 NoisyStudent. Tensorflow compatible variant\n Paper: Self-training with Noisy Student improves ImageNet classification (https://arxiv.org/abs/1911.04252)\n '
kwargs['bn_eps'] = BN_EPS_TF_DEFAULT
kwargs['pad_type'] = 'same'
model = _gen_efficientnet('tf_efficientnet_b0_ns', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs)
return model
|
def tf_efficientnet_b1_ns(pretrained=False, **kwargs):
' EfficientNet-B1 NoisyStudent. Tensorflow compatible variant\n Paper: Self-training with Noisy Student improves ImageNet classification (https://arxiv.org/abs/1911.04252)\n '
kwargs['bn_eps'] = BN_EPS_TF_DEFAULT
kwargs['pad_type'] = 'same'
model = _gen_efficientnet('tf_efficientnet_b1_ns', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs)
return model
|
def tf_efficientnet_b2_ns(pretrained=False, **kwargs):
' EfficientNet-B2 NoisyStudent. Tensorflow compatible variant\n Paper: Self-training with Noisy Student improves ImageNet classification (https://arxiv.org/abs/1911.04252)\n '
kwargs['bn_eps'] = BN_EPS_TF_DEFAULT
kwargs['pad_type'] = 'same'
model = _gen_efficientnet('tf_efficientnet_b2_ns', channel_multiplier=1.1, depth_multiplier=1.2, pretrained=pretrained, **kwargs)
return model
|
def tf_efficientnet_b3_ns(pretrained=False, **kwargs):
' EfficientNet-B3 NoisyStudent. Tensorflow compatible variant\n Paper: Self-training with Noisy Student improves ImageNet classification (https://arxiv.org/abs/1911.04252)\n '
kwargs['bn_eps'] = BN_EPS_TF_DEFAULT
kwargs['pad_type'] = 'same'
model = _gen_efficientnet('tf_efficientnet_b3_ns', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs)
return model
|
def tf_efficientnet_b4_ns(pretrained=False, **kwargs):
' EfficientNet-B4 NoisyStudent. Tensorflow compatible variant\n Paper: Self-training with Noisy Student improves ImageNet classification (https://arxiv.org/abs/1911.04252)\n '
kwargs['bn_eps'] = BN_EPS_TF_DEFAULT
kwargs['pad_type'] = 'same'
model = _gen_efficientnet('tf_efficientnet_b4_ns', channel_multiplier=1.4, depth_multiplier=1.8, pretrained=pretrained, **kwargs)
return model
|
def tf_efficientnet_b5_ns(pretrained=False, **kwargs):
' EfficientNet-B5 NoisyStudent. Tensorflow compatible variant\n Paper: Self-training with Noisy Student improves ImageNet classification (https://arxiv.org/abs/1911.04252)\n '
kwargs['bn_eps'] = BN_EPS_TF_DEFAULT
kwargs['pad_type'] = 'same'
model = _gen_efficientnet('tf_efficientnet_b5_ns', channel_multiplier=1.6, depth_multiplier=2.2, pretrained=pretrained, **kwargs)
return model
|
def tf_efficientnet_b6_ns(pretrained=False, **kwargs):
' EfficientNet-B6 NoisyStudent. Tensorflow compatible variant\n Paper: Self-training with Noisy Student improves ImageNet classification (https://arxiv.org/abs/1911.04252)\n '
kwargs['bn_eps'] = BN_EPS_TF_DEFAULT
kwargs['pad_type'] = 'same'
model = _gen_efficientnet('tf_efficientnet_b6_ns', channel_multiplier=1.8, depth_multiplier=2.6, pretrained=pretrained, **kwargs)
return model
|
def tf_efficientnet_b7_ns(pretrained=False, **kwargs):
' EfficientNet-B7 NoisyStudent. Tensorflow compatible variant\n Paper: Self-training with Noisy Student improves ImageNet classification (https://arxiv.org/abs/1911.04252)\n '
kwargs['bn_eps'] = BN_EPS_TF_DEFAULT
kwargs['pad_type'] = 'same'
model = _gen_efficientnet('tf_efficientnet_b7_ns', channel_multiplier=2.0, depth_multiplier=3.1, pretrained=pretrained, **kwargs)
return model
|
def tf_efficientnet_l2_ns_475(pretrained=False, **kwargs):
' EfficientNet-L2 NoisyStudent @ 475x475. Tensorflow compatible variant\n Paper: Self-training with Noisy Student improves ImageNet classification (https://arxiv.org/abs/1911.04252)\n '
kwargs['bn_eps'] = BN_EPS_TF_DEFAULT
kwargs['pad_type'] = 'same'
model = _gen_efficientnet('tf_efficientnet_l2_ns_475', channel_multiplier=4.3, depth_multiplier=5.3, pretrained=pretrained, **kwargs)
return model
|
def tf_efficientnet_l2_ns(pretrained=False, **kwargs):
' EfficientNet-L2 NoisyStudent. Tensorflow compatible variant\n Paper: Self-training with Noisy Student improves ImageNet classification (https://arxiv.org/abs/1911.04252)\n '
kwargs['bn_eps'] = BN_EPS_TF_DEFAULT
kwargs['pad_type'] = 'same'
model = _gen_efficientnet('tf_efficientnet_l2_ns', channel_multiplier=4.3, depth_multiplier=5.3, pretrained=pretrained, **kwargs)
return model
|
def tf_efficientnet_es(pretrained=False, **kwargs):
' EfficientNet-Edge Small. Tensorflow compatible variant '
kwargs['bn_eps'] = BN_EPS_TF_DEFAULT
kwargs['pad_type'] = 'same'
model = _gen_efficientnet_edge('tf_efficientnet_es', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs)
return model
|
def tf_efficientnet_em(pretrained=False, **kwargs):
' EfficientNet-Edge-Medium. Tensorflow compatible variant '
kwargs['bn_eps'] = BN_EPS_TF_DEFAULT
kwargs['pad_type'] = 'same'
model = _gen_efficientnet_edge('tf_efficientnet_em', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs)
return model
|
def tf_efficientnet_el(pretrained=False, **kwargs):
' EfficientNet-Edge-Large. Tensorflow compatible variant '
kwargs['bn_eps'] = BN_EPS_TF_DEFAULT
kwargs['pad_type'] = 'same'
model = _gen_efficientnet_edge('tf_efficientnet_el', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs)
return model
|
def tf_efficientnet_cc_b0_4e(pretrained=False, **kwargs):
' EfficientNet-CondConv-B0 w/ 4 Experts '
kwargs['bn_eps'] = BN_EPS_TF_DEFAULT
kwargs['pad_type'] = 'same'
model = _gen_efficientnet_condconv('tf_efficientnet_cc_b0_4e', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs)
return model
|
def tf_efficientnet_cc_b0_8e(pretrained=False, **kwargs):
' EfficientNet-CondConv-B0 w/ 8 Experts '
kwargs['bn_eps'] = BN_EPS_TF_DEFAULT
kwargs['pad_type'] = 'same'
model = _gen_efficientnet_condconv('tf_efficientnet_cc_b0_8e', channel_multiplier=1.0, depth_multiplier=1.0, experts_multiplier=2, pretrained=pretrained, **kwargs)
return model
|
def tf_efficientnet_cc_b1_8e(pretrained=False, **kwargs):
' EfficientNet-CondConv-B1 w/ 8 Experts '
kwargs['bn_eps'] = BN_EPS_TF_DEFAULT
kwargs['pad_type'] = 'same'
model = _gen_efficientnet_condconv('tf_efficientnet_cc_b1_8e', channel_multiplier=1.0, depth_multiplier=1.1, experts_multiplier=2, pretrained=pretrained, **kwargs)
return model
|
def tf_efficientnet_lite0(pretrained=False, **kwargs):
' EfficientNet-Lite0. Tensorflow compatible variant '
kwargs['bn_eps'] = BN_EPS_TF_DEFAULT
kwargs['pad_type'] = 'same'
model = _gen_efficientnet_lite('tf_efficientnet_lite0', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs)
return model
|
def tf_efficientnet_lite1(pretrained=False, **kwargs):
' EfficientNet-Lite1. Tensorflow compatible variant '
kwargs['bn_eps'] = BN_EPS_TF_DEFAULT
kwargs['pad_type'] = 'same'
model = _gen_efficientnet_lite('tf_efficientnet_lite1', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs)
return model
|
def tf_efficientnet_lite2(pretrained=False, **kwargs):
' EfficientNet-Lite2. Tensorflow compatible variant '
kwargs['bn_eps'] = BN_EPS_TF_DEFAULT
kwargs['pad_type'] = 'same'
model = _gen_efficientnet_lite('tf_efficientnet_lite2', channel_multiplier=1.1, depth_multiplier=1.2, pretrained=pretrained, **kwargs)
return model
|
def tf_efficientnet_lite3(pretrained=False, **kwargs):
' EfficientNet-Lite3. Tensorflow compatible variant '
kwargs['bn_eps'] = BN_EPS_TF_DEFAULT
kwargs['pad_type'] = 'same'
model = _gen_efficientnet_lite('tf_efficientnet_lite3', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs)
return model
|
def tf_efficientnet_lite4(pretrained=False, **kwargs):
' EfficientNet-Lite4. Tensorflow compatible variant '
kwargs['bn_eps'] = BN_EPS_TF_DEFAULT
kwargs['pad_type'] = 'same'
model = _gen_efficientnet_lite('tf_efficientnet_lite4', channel_multiplier=1.4, depth_multiplier=1.8, pretrained=pretrained, **kwargs)
return model
|
def mixnet_s(pretrained=False, **kwargs):
'Creates a MixNet Small model.\n '
model = _gen_mixnet_s('mixnet_s', channel_multiplier=1.0, pretrained=pretrained, **kwargs)
return model
|
def mixnet_m(pretrained=False, **kwargs):
'Creates a MixNet Medium model.\n '
model = _gen_mixnet_m('mixnet_m', channel_multiplier=1.0, pretrained=pretrained, **kwargs)
return model
|
def mixnet_l(pretrained=False, **kwargs):
'Creates a MixNet Large model.\n '
model = _gen_mixnet_m('mixnet_l', channel_multiplier=1.3, pretrained=pretrained, **kwargs)
return model
|
def mixnet_xl(pretrained=False, **kwargs):
'Creates a MixNet Extra-Large model.\n Not a paper spec, experimental def by RW w/ depth scaling.\n '
model = _gen_mixnet_m('mixnet_xl', channel_multiplier=1.6, depth_multiplier=1.2, pretrained=pretrained, **kwargs)
return model
|
def mixnet_xxl(pretrained=False, **kwargs):
'Creates a MixNet Double Extra Large model.\n Not a paper spec, experimental def by RW w/ depth scaling.\n '
model = _gen_mixnet_m('mixnet_xxl', channel_multiplier=2.4, depth_multiplier=1.3, pretrained=pretrained, **kwargs)
return model
|
def tf_mixnet_s(pretrained=False, **kwargs):
'Creates a MixNet Small model. Tensorflow compatible variant\n '
kwargs['bn_eps'] = BN_EPS_TF_DEFAULT
kwargs['pad_type'] = 'same'
model = _gen_mixnet_s('tf_mixnet_s', channel_multiplier=1.0, pretrained=pretrained, **kwargs)
return model
|
def tf_mixnet_m(pretrained=False, **kwargs):
'Creates a MixNet Medium model. Tensorflow compatible variant\n '
kwargs['bn_eps'] = BN_EPS_TF_DEFAULT
kwargs['pad_type'] = 'same'
model = _gen_mixnet_m('tf_mixnet_m', channel_multiplier=1.0, pretrained=pretrained, **kwargs)
return model
|
def tf_mixnet_l(pretrained=False, **kwargs):
'Creates a MixNet Large model. Tensorflow compatible variant\n '
kwargs['bn_eps'] = BN_EPS_TF_DEFAULT
kwargs['pad_type'] = 'same'
model = _gen_mixnet_m('tf_mixnet_l', channel_multiplier=1.3, pretrained=pretrained, **kwargs)
return model
|
def load_checkpoint(model, checkpoint_path):
if (checkpoint_path and os.path.isfile(checkpoint_path)):
print("=> Loading checkpoint '{}'".format(checkpoint_path))
checkpoint = torch.load(checkpoint_path)
if (isinstance(checkpoint, dict) and ('state_dict' in checkpoint)):
new_state_dict = OrderedDict()
for (k, v) in checkpoint['state_dict'].items():
if k.startswith('module'):
name = k[7:]
else:
name = k
new_state_dict[name] = v
model.load_state_dict(new_state_dict)
else:
model.load_state_dict(checkpoint)
print("=> Loaded checkpoint '{}'".format(checkpoint_path))
else:
print("=> Error: No checkpoint found at '{}'".format(checkpoint_path))
raise FileNotFoundError()
|
def load_pretrained(model, url, filter_fn=None, strict=True):
if (not url):
print('=> Warning: Pretrained model URL is empty, using random initialization.')
return
state_dict = load_state_dict_from_url(url, progress=False, map_location='cpu')
input_conv = 'conv_stem'
classifier = 'classifier'
in_chans = getattr(model, input_conv).weight.shape[1]
num_classes = getattr(model, classifier).weight.shape[0]
input_conv_weight = (input_conv + '.weight')
pretrained_in_chans = state_dict[input_conv_weight].shape[1]
if (in_chans != pretrained_in_chans):
if (in_chans == 1):
print('=> Converting pretrained input conv {} from {} to 1 channel'.format(input_conv_weight, pretrained_in_chans))
conv1_weight = state_dict[input_conv_weight]
state_dict[input_conv_weight] = conv1_weight.sum(dim=1, keepdim=True)
else:
print('=> Discarding pretrained input conv {} since input channel count != {}'.format(input_conv_weight, pretrained_in_chans))
del state_dict[input_conv_weight]
strict = False
classifier_weight = (classifier + '.weight')
pretrained_num_classes = state_dict[classifier_weight].shape[0]
if (num_classes != pretrained_num_classes):
print('=> Discarding pretrained classifier since num_classes != {}'.format(pretrained_num_classes))
del state_dict[classifier_weight]
del state_dict[(classifier + '.bias')]
strict = False
if (filter_fn is not None):
state_dict = filter_fn(state_dict)
model.load_state_dict(state_dict, strict=strict)
|
class MobileNetV3(nn.Module):
" MobileNet-V3\n\n A this model utilizes the MobileNet-v3 specific 'efficient head', where global pooling is done before the\n head convolution without a final batch-norm layer before the classifier.\n\n Paper: https://arxiv.org/abs/1905.02244\n "
def __init__(self, block_args, num_classes=1000, in_chans=3, stem_size=16, num_features=1280, head_bias=True, channel_multiplier=1.0, pad_type='', act_layer=HardSwish, drop_rate=0.0, drop_connect_rate=0.0, se_kwargs=None, norm_layer=nn.BatchNorm2d, norm_kwargs=None, weight_init='goog'):
super(MobileNetV3, self).__init__()
self.drop_rate = drop_rate
stem_size = round_channels(stem_size, channel_multiplier)
self.conv_stem = select_conv2d(in_chans, stem_size, 3, stride=2, padding=pad_type)
self.bn1 = nn.BatchNorm2d(stem_size, **norm_kwargs)
self.act1 = act_layer(inplace=True)
in_chs = stem_size
builder = EfficientNetBuilder(channel_multiplier, pad_type=pad_type, act_layer=act_layer, se_kwargs=se_kwargs, norm_layer=norm_layer, norm_kwargs=norm_kwargs, drop_connect_rate=drop_connect_rate)
self.blocks = nn.Sequential(*builder(in_chs, block_args))
in_chs = builder.in_chs
self.global_pool = nn.AdaptiveAvgPool2d(1)
self.conv_head = select_conv2d(in_chs, num_features, 1, padding=pad_type, bias=head_bias)
self.act2 = act_layer(inplace=True)
self.classifier = nn.Linear(num_features, num_classes)
for m in self.modules():
if (weight_init == 'goog'):
initialize_weight_goog(m)
else:
initialize_weight_default(m)
def as_sequential(self):
layers = [self.conv_stem, self.bn1, self.act1]
layers.extend(self.blocks)
layers.extend([self.global_pool, self.conv_head, self.act2, nn.Flatten(), nn.Dropout(self.drop_rate), self.classifier])
return nn.Sequential(*layers)
def features(self, x):
x = self.conv_stem(x)
x = self.bn1(x)
x = self.act1(x)
x = self.blocks(x)
x = self.global_pool(x)
x = self.conv_head(x)
x = self.act2(x)
return x
def forward(self, x):
x = self.features(x)
x = x.flatten(1)
if (self.drop_rate > 0.0):
x = F.dropout(x, p=self.drop_rate, training=self.training)
return self.classifier(x)
|
def _create_model(model_kwargs, variant, pretrained=False):
as_sequential = model_kwargs.pop('as_sequential', False)
model = MobileNetV3(**model_kwargs)
if (pretrained and model_urls[variant]):
load_pretrained(model, model_urls[variant])
if as_sequential:
model = model.as_sequential()
return model
|
def _gen_mobilenet_v3_rw(variant, channel_multiplier=1.0, pretrained=False, **kwargs):
'Creates a MobileNet-V3 model (RW variant).\n\n Paper: https://arxiv.org/abs/1905.02244\n\n This was my first attempt at reproducing the MobileNet-V3 from paper alone. It came close to the\n eventual Tensorflow reference impl but has a few differences:\n 1. This model has no bias on the head convolution\n 2. This model forces no residual (noskip) on the first DWS block, this is different than MnasNet\n 3. This model always uses ReLU for the SE activation layer, other models in the family inherit their act layer\n from their parent block\n 4. This model does not enforce divisible by 8 limitation on the SE reduction channel count\n\n Overall the changes are fairly minor and result in a very small parameter count difference and no\n top-1/5\n\n Args:\n channel_multiplier: multiplier to number of channels per layer.\n '
arch_def = [['ds_r1_k3_s1_e1_c16_nre_noskip'], ['ir_r1_k3_s2_e4_c24_nre', 'ir_r1_k3_s1_e3_c24_nre'], ['ir_r3_k5_s2_e3_c40_se0.25_nre'], ['ir_r1_k3_s2_e6_c80', 'ir_r1_k3_s1_e2.5_c80', 'ir_r2_k3_s1_e2.3_c80'], ['ir_r2_k3_s1_e6_c112_se0.25'], ['ir_r3_k5_s2_e6_c160_se0.25'], ['cn_r1_k1_s1_c960']]
with layer_config_kwargs(kwargs):
model_kwargs = dict(block_args=decode_arch_def(arch_def), head_bias=False, channel_multiplier=channel_multiplier, act_layer=resolve_act_layer(kwargs, 'hard_swish'), se_kwargs=dict(gate_fn=get_act_fn('hard_sigmoid'), reduce_mid=True), norm_kwargs=resolve_bn_args(kwargs), **kwargs)
model = _create_model(model_kwargs, variant, pretrained)
return model
|
def _gen_mobilenet_v3(variant, channel_multiplier=1.0, pretrained=False, **kwargs):
'Creates a MobileNet-V3 large/small/minimal models.\n\n Ref impl: https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet_v3.py\n Paper: https://arxiv.org/abs/1905.02244\n\n Args:\n channel_multiplier: multiplier to number of channels per layer.\n '
if ('small' in variant):
num_features = 1024
if ('minimal' in variant):
act_layer = 'relu'
arch_def = [['ds_r1_k3_s2_e1_c16'], ['ir_r1_k3_s2_e4.5_c24', 'ir_r1_k3_s1_e3.67_c24'], ['ir_r1_k3_s2_e4_c40', 'ir_r2_k3_s1_e6_c40'], ['ir_r2_k3_s1_e3_c48'], ['ir_r3_k3_s2_e6_c96'], ['cn_r1_k1_s1_c576']]
else:
act_layer = 'hard_swish'
arch_def = [['ds_r1_k3_s2_e1_c16_se0.25_nre'], ['ir_r1_k3_s2_e4.5_c24_nre', 'ir_r1_k3_s1_e3.67_c24_nre'], ['ir_r1_k5_s2_e4_c40_se0.25', 'ir_r2_k5_s1_e6_c40_se0.25'], ['ir_r2_k5_s1_e3_c48_se0.25'], ['ir_r3_k5_s2_e6_c96_se0.25'], ['cn_r1_k1_s1_c576']]
else:
num_features = 1280
if ('minimal' in variant):
act_layer = 'relu'
arch_def = [['ds_r1_k3_s1_e1_c16'], ['ir_r1_k3_s2_e4_c24', 'ir_r1_k3_s1_e3_c24'], ['ir_r3_k3_s2_e3_c40'], ['ir_r1_k3_s2_e6_c80', 'ir_r1_k3_s1_e2.5_c80', 'ir_r2_k3_s1_e2.3_c80'], ['ir_r2_k3_s1_e6_c112'], ['ir_r3_k3_s2_e6_c160'], ['cn_r1_k1_s1_c960']]
else:
act_layer = 'hard_swish'
arch_def = [['ds_r1_k3_s1_e1_c16_nre'], ['ir_r1_k3_s2_e4_c24_nre', 'ir_r1_k3_s1_e3_c24_nre'], ['ir_r3_k5_s2_e3_c40_se0.25_nre'], ['ir_r1_k3_s2_e6_c80', 'ir_r1_k3_s1_e2.5_c80', 'ir_r2_k3_s1_e2.3_c80'], ['ir_r2_k3_s1_e6_c112_se0.25'], ['ir_r3_k5_s2_e6_c160_se0.25'], ['cn_r1_k1_s1_c960']]
with layer_config_kwargs(kwargs):
model_kwargs = dict(block_args=decode_arch_def(arch_def), num_features=num_features, stem_size=16, channel_multiplier=channel_multiplier, act_layer=resolve_act_layer(kwargs, act_layer), se_kwargs=dict(act_layer=get_act_layer('relu'), gate_fn=get_act_fn('hard_sigmoid'), reduce_mid=True, divisor=8), norm_kwargs=resolve_bn_args(kwargs), **kwargs)
model = _create_model(model_kwargs, variant, pretrained)
return model
|
def mobilenetv3_rw(pretrained=False, **kwargs):
' MobileNet-V3 RW\n Attn: See note in gen function for this variant.\n '
if pretrained:
kwargs['bn_eps'] = BN_EPS_TF_DEFAULT
model = _gen_mobilenet_v3_rw('mobilenetv3_rw', 1.0, pretrained=pretrained, **kwargs)
return model
|
def mobilenetv3_large_075(pretrained=False, **kwargs):
' MobileNet V3 Large 0.75'
model = _gen_mobilenet_v3('mobilenetv3_large_075', 0.75, pretrained=pretrained, **kwargs)
return model
|
def mobilenetv3_large_100(pretrained=False, **kwargs):
' MobileNet V3 Large 1.0 '
model = _gen_mobilenet_v3('mobilenetv3_large_100', 1.0, pretrained=pretrained, **kwargs)
return model
|
def mobilenetv3_large_minimal_100(pretrained=False, **kwargs):
' MobileNet V3 Large (Minimalistic) 1.0 '
model = _gen_mobilenet_v3('mobilenetv3_large_minimal_100', 1.0, pretrained=pretrained, **kwargs)
return model
|
def mobilenetv3_small_075(pretrained=False, **kwargs):
' MobileNet V3 Small 0.75 '
model = _gen_mobilenet_v3('mobilenetv3_small_075', 0.75, pretrained=pretrained, **kwargs)
return model
|
def mobilenetv3_small_100(pretrained=False, **kwargs):
' MobileNet V3 Small 1.0 '
model = _gen_mobilenet_v3('mobilenetv3_small_100', 1.0, pretrained=pretrained, **kwargs)
return model
|
def mobilenetv3_small_minimal_100(pretrained=False, **kwargs):
' MobileNet V3 Small (Minimalistic) 1.0 '
model = _gen_mobilenet_v3('mobilenetv3_small_minimal_100', 1.0, pretrained=pretrained, **kwargs)
return model
|
def tf_mobilenetv3_large_075(pretrained=False, **kwargs):
' MobileNet V3 Large 0.75. Tensorflow compat variant. '
kwargs['bn_eps'] = BN_EPS_TF_DEFAULT
kwargs['pad_type'] = 'same'
model = _gen_mobilenet_v3('tf_mobilenetv3_large_075', 0.75, pretrained=pretrained, **kwargs)
return model
|
def tf_mobilenetv3_large_100(pretrained=False, **kwargs):
' MobileNet V3 Large 1.0. Tensorflow compat variant. '
kwargs['bn_eps'] = BN_EPS_TF_DEFAULT
kwargs['pad_type'] = 'same'
model = _gen_mobilenet_v3('tf_mobilenetv3_large_100', 1.0, pretrained=pretrained, **kwargs)
return model
|
def tf_mobilenetv3_large_minimal_100(pretrained=False, **kwargs):
' MobileNet V3 Large Minimalistic 1.0. Tensorflow compat variant. '
kwargs['bn_eps'] = BN_EPS_TF_DEFAULT
kwargs['pad_type'] = 'same'
model = _gen_mobilenet_v3('tf_mobilenetv3_large_minimal_100', 1.0, pretrained=pretrained, **kwargs)
return model
|
def tf_mobilenetv3_small_075(pretrained=False, **kwargs):
' MobileNet V3 Small 0.75. Tensorflow compat variant. '
kwargs['bn_eps'] = BN_EPS_TF_DEFAULT
kwargs['pad_type'] = 'same'
model = _gen_mobilenet_v3('tf_mobilenetv3_small_075', 0.75, pretrained=pretrained, **kwargs)
return model
|
def tf_mobilenetv3_small_100(pretrained=False, **kwargs):
' MobileNet V3 Small 1.0. Tensorflow compat variant.'
kwargs['bn_eps'] = BN_EPS_TF_DEFAULT
kwargs['pad_type'] = 'same'
model = _gen_mobilenet_v3('tf_mobilenetv3_small_100', 1.0, pretrained=pretrained, **kwargs)
return model
|
def tf_mobilenetv3_small_minimal_100(pretrained=False, **kwargs):
' MobileNet V3 Small Minimalistic 1.0. Tensorflow compat variant. '
kwargs['bn_eps'] = BN_EPS_TF_DEFAULT
kwargs['pad_type'] = 'same'
model = _gen_mobilenet_v3('tf_mobilenetv3_small_minimal_100', 1.0, pretrained=pretrained, **kwargs)
return model
|
def create_model(model_name='mnasnet_100', pretrained=None, num_classes=1000, in_chans=3, checkpoint_path='', **kwargs):
model_kwargs = dict(num_classes=num_classes, in_chans=in_chans, pretrained=pretrained, **kwargs)
if (model_name in globals()):
create_fn = globals()[model_name]
model = create_fn(**model_kwargs)
else:
raise RuntimeError(('Unknown model (%s)' % model_name))
if (checkpoint_path and (not pretrained)):
load_checkpoint(model, checkpoint_path)
return model
|
def main():
args = parser.parse_args()
args.pretrained = True
if args.checkpoint:
args.pretrained = False
print('==> Creating PyTorch {} model'.format(args.model))
model = geffnet.create_model(args.model, num_classes=args.num_classes, in_chans=3, pretrained=args.pretrained, checkpoint_path=args.checkpoint, exportable=True)
model.eval()
example_input = torch.randn((args.batch_size, 3, (args.img_size or 224), (args.img_size or 224)), requires_grad=True)
model(example_input)
print("==> Exporting model to ONNX format at '{}'".format(args.output))
input_names = ['input0']
output_names = ['output0']
dynamic_axes = {'input0': {0: 'batch'}, 'output0': {0: 'batch'}}
if args.dynamic_size:
dynamic_axes['input0'][2] = 'height'
dynamic_axes['input0'][3] = 'width'
if args.aten_fallback:
export_type = torch.onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK
else:
export_type = torch.onnx.OperatorExportTypes.ONNX
torch_out = torch.onnx._export(model, example_input, args.output, export_params=True, verbose=True, input_names=input_names, output_names=output_names, keep_initializers_as_inputs=args.keep_init, dynamic_axes=dynamic_axes, opset_version=args.opset, operator_export_type=export_type)
print("==> Loading and checking exported model from '{}'".format(args.output))
onnx_model = onnx.load(args.output)
onnx.checker.check_model(onnx_model)
print('==> Passed')
if (args.keep_init and args.aten_fallback):
import caffe2.python.onnx.backend as onnx_caffe2
print('==> Loading model into Caffe2 backend and comparing forward pass.'.format(args.output))
caffe2_backend = onnx_caffe2.prepare(onnx_model)
B = {onnx_model.graph.input[0].name: x.data.numpy()}
c2_out = caffe2_backend.run(B)[0]
np.testing.assert_almost_equal(torch_out.data.numpy(), c2_out, decimal=5)
print('==> Passed')
|
def traverse_graph(graph, prefix=''):
content = []
indent = (prefix + ' ')
graphs = []
num_nodes = 0
for node in graph.node:
(pn, gs) = onnx.helper.printable_node(node, indent, subgraphs=True)
assert isinstance(gs, list)
content.append(pn)
graphs.extend(gs)
num_nodes += 1
for g in graphs:
(g_count, g_str) = traverse_graph(g)
content.append(('\n' + g_str))
num_nodes += g_count
return (num_nodes, '\n'.join(content))
|
def main():
args = parser.parse_args()
onnx_model = onnx.load(args.model)
(num_original_nodes, original_graph_str) = traverse_graph(onnx_model.graph)
passes = ['eliminate_identity', 'eliminate_nop_dropout', 'eliminate_nop_pad', 'eliminate_nop_transpose', 'eliminate_unused_initializer', 'extract_constant_to_initializer', 'fuse_add_bias_into_conv', 'fuse_bn_into_conv', 'fuse_consecutive_concats', 'fuse_consecutive_reduce_unsqueeze', 'fuse_consecutive_squeezes', 'fuse_consecutive_transposes', 'fuse_pad_into_conv']
warnings.warn("I've had issues with optimizer in recent versions of PyTorch / ONNX.Try onnxruntime optimization if this doesn't work.")
optimized_model = optimizer.optimize(onnx_model, passes)
(num_optimized_nodes, optimzied_graph_str) = traverse_graph(optimized_model.graph)
print('==> The model after optimization:\n{}\n'.format(optimzied_graph_str))
print('==> The optimized model has {} nodes, the original had {}.'.format(num_optimized_nodes, num_original_nodes))
onnx.save(optimized_model, args.output)
|
def main():
args = parser.parse_args()
onnx_model = onnx.load(args.model)
(caffe2_init, caffe2_predict) = Caffe2Backend.onnx_graph_to_caffe2_net(onnx_model)
caffe2_init_str = caffe2_init.SerializeToString()
with open((args.c2_prefix + '.init.pb'), 'wb') as f:
f.write(caffe2_init_str)
caffe2_predict_str = caffe2_predict.SerializeToString()
with open((args.c2_prefix + '.predict.pb'), 'wb') as f:
f.write(caffe2_predict_str)
|
class AverageMeter():
'Computes and stores the average and current value'
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += (val * n)
self.count += n
self.avg = (self.sum / self.count)
|
def accuracy(output, target, topk=(1,)):
'Computes the precision@k for the specified values of k'
maxk = max(topk)
batch_size = target.size(0)
(_, pred) = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, (- 1)).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].reshape((- 1)).float().sum(0)
res.append(correct_k.mul_((100.0 / batch_size)))
return res
|
def get_outdir(path, *paths, inc=False):
outdir = os.path.join(path, *paths)
if (not os.path.exists(outdir)):
os.makedirs(outdir)
elif inc:
count = 1
outdir_inc = ((outdir + '-') + str(count))
while os.path.exists(outdir_inc):
count = (count + 1)
outdir_inc = ((outdir + '-') + str(count))
assert (count < 100)
outdir = outdir_inc
os.makedirs(outdir)
return outdir
|
def main():
args = parser.parse_args()
if ((not args.checkpoint) and (not args.pretrained)):
args.pretrained = True
amp_autocast = suppress
if args.amp:
if (not has_native_amp):
print('Native Torch AMP is not available (requires torch >= 1.6), using FP32.')
else:
amp_autocast = torch.cuda.amp.autocast
model = geffnet.create_model(args.model, num_classes=args.num_classes, in_chans=3, pretrained=args.pretrained, checkpoint_path=args.checkpoint, scriptable=args.torchscript)
if args.channels_last:
model = model.to(memory_format=torch.channels_last)
if args.torchscript:
torch.jit.optimized_execution(True)
model = torch.jit.script(model)
print(('Model %s created, param count: %d' % (args.model, sum([m.numel() for m in model.parameters()]))))
data_config = resolve_data_config(model, args)
criterion = nn.CrossEntropyLoss()
if (not args.no_cuda):
if (args.num_gpu > 1):
model = torch.nn.DataParallel(model, device_ids=list(range(args.num_gpu))).cuda()
else:
model = model.cuda()
criterion = criterion.cuda()
loader = create_loader(Dataset(args.data, load_bytes=args.tf_preprocessing), input_size=data_config['input_size'], batch_size=args.batch_size, use_prefetcher=(not args.no_cuda), interpolation=data_config['interpolation'], mean=data_config['mean'], std=data_config['std'], num_workers=args.workers, crop_pct=data_config['crop_pct'], tensorflow_preprocessing=args.tf_preprocessing)
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
model.eval()
end = time.time()
with torch.no_grad():
for (i, (input, target)) in enumerate(loader):
if (not args.no_cuda):
target = target.cuda()
input = input.cuda()
if args.channels_last:
input = input.contiguous(memory_format=torch.channels_last)
with amp_autocast():
output = model(input)
loss = criterion(output, target)
(prec1, prec5) = accuracy(output.data, target, topk=(1, 5))
losses.update(loss.item(), input.size(0))
top1.update(prec1.item(), input.size(0))
top5.update(prec5.item(), input.size(0))
batch_time.update((time.time() - end))
end = time.time()
if ((i % args.print_freq) == 0):
print('Test: [{0}/{1}]\tTime {batch_time.val:.3f} ({batch_time.avg:.3f}, {rate_avg:.3f}/s) \tLoss {loss.val:.4f} ({loss.avg:.4f})\tPrec@1 {top1.val:.3f} ({top1.avg:.3f})\tPrec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(i, len(loader), batch_time=batch_time, rate_avg=(input.size(0) / batch_time.avg), loss=losses, top1=top1, top5=top5))
print(' * Prec@1 {top1.avg:.3f} ({top1a:.3f}) Prec@5 {top5.avg:.3f} ({top5a:.3f})'.format(top1=top1, top1a=(100 - top1.avg), top5=top5, top5a=(100.0 - top5.avg)))
|
class Predictions_Initial_DiffusionNet():
def Predict(verts, faces, normals):
verts = torch.tensor(np.ascontiguousarray(verts)).float()
faces = torch.tensor(np.ascontiguousarray(faces)).long()
normals = torch.tensor(np.ascontiguousarray(normals)).float()
weights_initial_DiffusionNet = (os.path.dirname(__file__) + '/../models/Weights_initial_DiffusionNet.pth')
if torch.cuda.is_available():
device = torch.device('cuda')
else:
device = torch.device('cpu')
verts = diffusion_net.geometry.normalize_positions(verts)
outputs = diffusion_net.geometry.get_operators(verts, faces, k_eig=128, op_cache_dir=None, normals=normals)
frames = outputs[0].to(device)
mass = outputs[1].to(device)
L = outputs[2].to(device)
evals = outputs[3].to(device)
evecs = outputs[4].to(device)
gradX = outputs[5].to(device)
gradY = outputs[6].to(device)
features = diffusion_net.geometry.compute_hks_autoscale(evals, evecs, 16)
model = diffusion_net.layers.DiffusionNet(C_in=16, C_out=6, C_width=256, N_block=12, outputs_at='vertices')
model = model.to(device)
if torch.cuda.is_available():
model.load_state_dict(torch.load(weights_initial_DiffusionNet))
else:
model.load_state_dict(torch.load(weights_initial_DiffusionNet, map_location=torch.device('cpu')))
model.eval()
pred_initial_DiffusionNet = model(features, mass, L=L, evals=evals, evecs=evecs, gradX=gradX, gradY=gradY, faces=faces)
pred_initial_DiffusionNet = np.asarray(pred_initial_DiffusionNet.cpu().detach().numpy())
return pred_initial_DiffusionNet
|
class Predictions_Final_DiffusionNet():
def Predict(verts, faces, normals):
verts = torch.tensor(np.ascontiguousarray(verts)).float()
faces = torch.tensor(np.ascontiguousarray(faces)).long()
normals = torch.tensor(np.ascontiguousarray(normals)).float()
weights_final_DiffusionNet = (os.path.dirname(__file__) + '/../models/Weights_final_DiffusionNet.pth')
device = torch.device('cpu')
verts = diffusion_net.geometry.normalize_positions(verts)
outputs = diffusion_net.geometry.get_operators(verts, faces, k_eig=128, op_cache_dir=None, normals=normals)
frames = outputs[0].to(device)
mass = outputs[1].to(device)
L = outputs[2].to(device)
evals = outputs[3].to(device)
evecs = outputs[4].to(device)
gradX = outputs[5].to(device)
gradY = outputs[6].to(device)
model = diffusion_net.layers.DiffusionNet(C_in=3, C_out=10, C_width=384, N_block=12, mlp_hidden_dims=[(2 * 384), (2 * 384)], outputs_at='vertices')
model = model.to(device)
model.load_state_dict(torch.load(weights_final_DiffusionNet, map_location=torch.device('cpu')))
model.eval()
pred_final_DiffusionNet = model(verts, mass, L=L, evals=evals, evecs=evecs, gradX=gradX, gradY=gradY, faces=faces)
pred_final_DiffusionNet = np.asarray(pred_final_DiffusionNet.cpu().detach().numpy())
return pred_final_DiffusionNet
|
class Facial_segmentation():
def segment(folder_path, scan_file, translation, rot_m1, rot_m2, save_segmented_mesh):
ms = pymeshlab.MeshSet()
ms.load_new_mesh(os.path.join(folder_path, 'output', (scan_file[:(- 4)] + '_original_mm.obj')))
mesh = ms.current_mesh()
vertices_mm = torch.from_numpy(mesh.vertex_matrix().astype(np.float32)).to('cuda:0')
ms.clear()
ms = pymeshlab.MeshSet()
ms.load_new_mesh(os.path.join(folder_path, scan_file))
mesh2 = ms.current_mesh()
original_vertices_np = mesh2.vertex_matrix().astype(np.float32)
original_vertices_torch = torch.from_numpy(mesh2.vertex_matrix().astype(np.float32)).to('cuda:0')
faces_orig_torch = torch.from_numpy(mesh2.face_matrix().astype(np.float32)).to('cuda:0')
faces_orig_np = mesh2.face_matrix()
normals_orig_np = mesh2.vertex_normal_matrix()
mapping = dnc.vertices_mapping_close(original_vertices_torch, vertices_mm, 15.0)
mapping = mapping.to(torch.int8)
vertices_crop = original_vertices_torch[(mapping == 1)]
vertex_ids = torch.where((mapping == 1))[0].cpu().numpy()
vertex_map = torch.full((len(original_vertices_torch),), torch.tensor((- 1)), dtype=torch.long)
vertex_map[vertex_ids] = torch.arange(len(vertex_ids))
for i in range(len(original_vertices_np)):
if (i not in vertex_ids):
original_vertices_np[(i, 0)] = 10000001
m = pymeshlab.Mesh(original_vertices_np, faces_orig_np, normals_orig_np)
ms.add_mesh(m, set_as_current=True)
ms.compute_selection_by_condition_per_vertex(condselect='x > 10000')
ms.meshing_remove_selected_vertices()
if (save_segmented_mesh == True):
ms.save_current_mesh(os.path.join(folder_path, 'output', (scan_file[:(- 4)] + '_segmented.obj')))
ms.compute_matrix_from_translation(axisx=(- translation[0]), axisy=(- translation[1]), axisz=(- translation[2]))
ms.set_matrix(transformmatrix=rot_m1)
ms.set_matrix(transformmatrix=rot_m2)
ms.meshing_remove_duplicate_faces()
ms.meshing_remove_duplicate_vertices()
ms.meshing_repair_non_manifold_edges(method='Remove Faces')
ms.meshing_remove_connected_component_by_diameter(mincomponentdiag=pymeshlab.Percentage(5), removeunref=True)
ms.meshing_close_holes(maxholesize=100)
ms.meshing_repair_non_manifold_edges(method='Remove Faces')
mesh_segmentation = ms.current_mesh()
vertices_segmentation = mesh_segmentation.vertex_matrix().astype(np.float32)
faces_segmenation = mesh_segmentation.face_matrix()
normals_segmentation = mesh_segmentation.vertex_normal_matrix()
return (vertices_segmentation, faces_segmenation, normals_segmentation)
|
class DataLoader(object):
'\n Only load data file and information file.\n '
@staticmethod
def parse_data_args(parser):
'\n data loader related command line arguments parser\n :param parser:\n :return:\n '
parser.add_argument('--path', type=str, default='../dataset/', help='Input data dir.')
parser.add_argument('--dataset', type=str, default='ml100k01-1-5', help='Choose a dataset.')
parser.add_argument('--sep', type=str, default='\t', help='sep of csv file.')
parser.add_argument('--label', type=str, default='label', help='name of dataset label column.')
return parser
def __init__(self, path, dataset, label='label', load_data=True, sep='\t', seqs_sep=','):
'\n Initialization\n :param path: dataset path\n :param dataset: dataset name\n :param label: label column name\n :param load_data: True for load dataset file, False for only load info file\n :param sep: the separator for .csv file\n :param seqs_sep: the separator for negative/history sequence, for example: "1,2,3"\n '
self.dataset = dataset
self.path = os.path.join(path, dataset)
self.train_file = os.path.join(self.path, (dataset + global_p.TRAIN_SUFFIX))
self.validation_file = os.path.join(self.path, (dataset + global_p.VALIDATION_SUFFIX))
self.test_file = os.path.join(self.path, (dataset + global_p.TEST_SUFFIX))
self.info_file = os.path.join(self.path, (dataset + global_p.INFO_SUFFIX))
self.user_file = os.path.join(self.path, (dataset + global_p.USER_SUFFIX))
self.item_file = os.path.join(self.path, (dataset + global_p.ITEM_SUFFIX))
self.train_his_file = os.path.join(self.path, (dataset + global_p.TRAIN_GROUP_SUFFIX))
self.vt_his_file = os.path.join(self.path, (dataset + global_p.VT_GROUP_SUFFIX))
(self.sep, self.seqs_sep) = (sep, seqs_sep)
self.load_data = load_data
self.label = label
(self.train_df, self.validation_df, self.test_df) = (None, None, None)
self._load_user_item()
self._load_data()
self._load_his()
self._load_info()
def _load_user_item(self):
'\n load user .csv file\n :return:\n '
(self.user_df, self.item_df) = (None, None)
if (os.path.exists(self.user_file) and self.load_data):
logging.info('load user csv...')
self.user_df = pd.read_csv(self.user_file, sep='\t')
if (os.path.exists(self.item_file) and self.load_data):
logging.info('load item csv...')
self.item_df = pd.read_csv(self.item_file, sep='\t')
def _load_data(self):
'\n load train, validation, test .csv files\n :return:\n '
if (os.path.exists(self.train_file) and self.load_data):
logging.info('load train csv...')
self.train_df = pd.read_csv(self.train_file, sep=self.sep)
logging.info(('size of train: %d' % len(self.train_df)))
if (os.path.exists(self.validation_file) and self.load_data):
logging.info('load validation csv...')
self.validation_df = pd.read_csv(self.validation_file, sep=self.sep)
logging.info(('size of validation: %d' % len(self.validation_df)))
if (os.path.exists(self.test_file) and self.load_data):
logging.info('load test csv...')
self.test_df = pd.read_csv(self.test_file, sep=self.sep)
logging.info(('size of test: %d' % len(self.test_df)))
def _load_info(self):
'\n load dataset information file. If not found then create one.\n :return:\n '
def json_type(o):
if isinstance(o, np.int64):
return int(o)
raise TypeError
(max_dict, min_dict) = ({}, {})
if (not os.path.exists(self.info_file)):
for df in [self.train_df, self.validation_df, self.test_df, self.user_df, self.item_df]:
if (df is None):
continue
for c in df.columns:
if (c not in max_dict):
max_dict[c] = df[c].max()
else:
max_dict[c] = max(df[c].max(), max_dict[c])
if (c not in min_dict):
min_dict[c] = df[c].min()
else:
min_dict[c] = min(df[c].min(), min_dict[c])
max_json = json.dumps(max_dict, default=json_type)
min_json = json.dumps(min_dict, default=json_type)
out_f = open(self.info_file, 'w')
out_f.write(((max_json + os.linesep) + min_json))
else:
lines = open(self.info_file, 'r').readlines()
max_dict = json.loads(lines[0])
min_dict = json.loads(lines[1])
self.column_max = max_dict
self.column_min = min_dict
self.label_max = self.column_max[self.label]
self.label_min = self.column_min[self.label]
logging.info(('label: %d-%d' % (self.label_min, self.label_max)))
(self.user_num, self.item_num) = (0, 0)
if ('uid' in self.column_max):
self.user_num = (self.column_max['uid'] + 1)
if ('iid' in self.column_max):
self.item_num = (self.column_max['iid'] + 1)
logging.info(('# of users: %d' % self.user_num))
logging.info(('# of items: %d' % self.item_num))
self.user_features = [f for f in self.column_max.keys() if f.startswith('u_')]
logging.info(('# of user features: %d' % len(self.user_features)))
self.item_features = [f for f in self.column_max.keys() if f.startswith('i_')]
logging.info(('# of item features: %d' % len(self.item_features)))
self.context_features = [f for f in self.column_max.keys() if f.startswith('c_')]
logging.info(('# of context features: %d' % len(self.context_features)))
self.features = ((self.context_features + self.user_features) + self.item_features)
logging.info(('# of features: %d' % len(self.features)))
def _load_his(self):
"\n load dataset interactions which have been grouped by uid.\n two columns 'uid' and 'iid'.\n :return:\n "
if (not self.load_data):
return
if (not os.path.exists(self.train_his_file)):
logging.info('building train history csv...')
train_his_df = group_user_interactions_df(self.train_df, label=self.label, seq_sep=self.seqs_sep)
train_his_df.to_csv(self.train_his_file, index=False, sep=self.sep)
if (not os.path.exists(self.vt_his_file)):
logging.info('building vt history csv...')
vt_df = pd.concat([self.validation_df, self.test_df])
vt_his_df = group_user_interactions_df(vt_df, label=self.label, seq_sep=self.seqs_sep)
vt_his_df.to_csv(self.vt_his_file, index=False, sep=self.sep)
def build_his(his_df, seqs_sep):
uids = his_df['uid'].tolist()
iids = his_df['iids'].str.split(seqs_sep).values
iids = [[int(j) for j in i] for i in iids]
user_his = dict(zip(uids, iids))
return user_his
(self.train_his_df, self.train_user_his) = (None, None)
(self.vt_his_df, self.vt_user_his) = (None, None)
if self.load_data:
logging.info('load history csv...')
self.train_his_df = pd.read_csv(self.train_his_file, sep=self.sep)
self.train_user_his = build_his(self.train_his_df, self.seqs_sep)
self.vt_his_df = pd.read_csv(self.vt_his_file, sep=self.sep)
self.vt_user_his = build_his(self.vt_his_df, self.seqs_sep)
def feature_info(self, include_id=True, include_item_features=True, include_user_features=True):
features = []
if include_id:
features.extend(['uid', 'iid'])
if include_user_features:
features.extend(self.user_features)
if include_item_features:
features.extend(self.item_features)
feature_dims = 0
(feature_min, feature_max) = ([], [])
for f in features:
feature_min.append(feature_dims)
feature_dims += int((self.column_max[f] + 1))
feature_max.append((feature_dims - 1))
logging.info(('Model # of features %d' % len(features)))
logging.info(('Model # of feature dims %d' % feature_dims))
return (features, feature_dims, feature_min, feature_max)
def append_his(self, last_n=10, supply=True, neg=False, neg_column=True):
'\n Generate user history interaction sequence.\n Data in train,validation,test must be sorted by timestamp. Train data comes earlier\n than validation and earlier than test.\n :param last_n: the max history length to keep,<=0 value means keeps all.\n :param supply: True for appending -1 to make up the length\n :param neg: if the interactions including negative feedbacks\n :param neg_column: if add a new column for negative sequence,iff neg_include=True is valid.\n :return:\n '
(his_dict, neg_dict) = ({}, {})
for df in [self.train_df, self.validation_df, self.test_df]:
if (df is None):
continue
(history, neg_history) = ([], [])
(uids, iids, labels) = (df['uid'].tolist(), df['iid'].tolist(), df[self.label].tolist())
for (i, uid) in enumerate(uids):
(iid, label) = (str(iids[i]), labels[i])
if (uid not in his_dict):
his_dict[uid] = []
if (uid not in neg_dict):
neg_dict[uid] = []
tmp_his = (his_dict[uid] if (last_n <= 0) else his_dict[uid][(- last_n):])
tmp_neg = (neg_dict[uid] if (last_n <= 0) else neg_dict[uid][(- last_n):])
if supply:
tmp_his = (tmp_his + (['-1'] * last_n))
tmp_neg = (tmp_neg + (['-1'] * last_n))
history.append(','.join(tmp_his[:last_n]))
neg_history.append(','.join(tmp_neg[:last_n]))
if ((label <= 0) and (not neg_column) and neg):
his_dict[uid].append(('~' + iid))
elif ((label <= 0) and neg_column):
neg_dict[uid].append(iid)
elif (label > 0):
his_dict[uid].append(iid)
df[global_p.C_HISTORY] = history
if (neg and neg_column):
df[global_p.C_HISTORY_NEG] = neg_history
def drop_neg(self):
logging.info('Drop Neg Samples...')
self.train_df = self.train_df[(self.train_df[self.label] > 0)].reset_index(drop=True)
self.validation_df = self.validation_df[(self.validation_df[self.label] > 0)].reset_index(drop=True)
self.test_df = self.test_df[(self.test_df[self.label] > 0)].reset_index(drop=True)
self.train_df[self.label] = 1
self.validation_df[self.label] = 1
self.test_df[self.label] = 1
logging.info(('size of train: %d' % len(self.train_df)))
logging.info(('size of validation: %d' % len(self.validation_df)))
logging.info(('size of test: %d' % len(self.test_df)))
|
class DataProcessor(object):
data_columns = ['X']
@staticmethod
def parse_dp_args(parser):
'\n\t\tparse data processor related command line arguments\n\t\t'
parser.add_argument('--test_neg_n', type=int, default=10, help='Negative sample num for each instance in test/validation set.')
return parser
def __init__(self, data_loader, model, rank, test_neg_n):
'\n\t\tInitialization\n\t\t:param data_loader: DataLoader object\n\t\t:param model: Model object\n\t\t:param rank: 1=ranking, 0=rating prediction\n\t\t:param test_neg_n: ranking negative sample rate, pos:neg=1:test_neg_n\n\t\t'
self.data_loader = data_loader
self.model = model
self.rank = rank
(self.train_data, self.validation_data, self.test_data) = (None, None, None)
self.val_for_train_data = None
self.test_neg_n = test_neg_n
if (self.rank == 1):
self.train_history_dict = defaultdict(set)
for uid in data_loader.train_user_his.keys():
self.train_history_dict[uid] = set(data_loader.train_user_his[uid])
self.vt_history_dict = defaultdict(set)
for uid in data_loader.vt_user_his.keys():
self.vt_history_dict[uid] = set(data_loader.vt_user_his[uid])
self.vt_batches_buffer = {}
def get_train_data(self, epoch):
if ((self.train_data is None) or (epoch < 0)):
logging.info('Prepare Train Data...')
self.train_data = self.format_data_dict(self.data_loader.train_df)
self.train_data[global_p.K_SAMPLE_ID] = np.arange(0, len(self.train_data['Y']))
if (epoch >= 0):
utils.shuffle_in_unison_scary(self.train_data)
return self.train_data
def get_val_data_for_train(self, epoch):
if ((self.val_for_train_data is None) or (epoch < 0)):
logging.info('Prepare Validation for Training Data...')
self.val_for_train_data = self.format_data_dict(self.data_loader.validation_df)
self.val_for_train_data[global_p.K_SAMPLE_ID] = np.arange(0, len(self.val_for_train_data['Y']))
if (epoch >= 0):
utils.shuffle_in_unison_scary(self.val_for_train_data)
return self.val_for_train_data
def get_validation_data(self):
if (self.validation_data is None):
logging.info('Prepare Validation Data...')
df = self.data_loader.validation_df
if (self.rank == 1):
neg_df = self.generate_neg_df(uid_list=df['uid'].tolist(), iid_list=df['iid'].tolist(), df=df, neg_n=self.test_neg_n, train=False)
df = pd.concat([df, neg_df], ignore_index=True)
self.validation_data = self.format_data_dict(df)
self.validation_data[global_p.K_SAMPLE_ID] = np.arange(0, len(self.validation_data['Y']))
return self.validation_data
def get_test_data(self):
if (self.test_data is None):
logging.info('Prepare Test Data...')
df = self.data_loader.test_df
if (self.rank == 1):
neg_df = self.generate_neg_df(uid_list=df['uid'].tolist(), iid_list=df['iid'].tolist(), df=df, neg_n=self.test_neg_n, train=False)
df = pd.concat([df, neg_df], ignore_index=True)
self.test_data = self.format_data_dict(df)
self.test_data[global_p.K_SAMPLE_ID] = np.arange(0, len(self.test_data['Y']))
return self.test_data
def get_train_batches(self, batch_size, epoch):
return self.prepare_batches(self.get_train_data(epoch), batch_size, train=True)
def get_validation_batches(self, batch_size):
return self.prepare_batches(self.get_validation_data(), batch_size, train=False)
def get_test_batches(self, batch_size):
return self.prepare_batches(self.get_test_data(), batch_size, train=False)
def _get_feed_dict_rt(self, data, batch_start, batch_size, train):
batch_end = min(len(data['X']), (batch_start + batch_size))
real_batch_size = (batch_end - batch_start)
feed_dict = {'train': train, 'rank': 0, global_p.K_SAMPLE_ID: data[global_p.K_SAMPLE_ID][batch_start:(batch_start + real_batch_size)]}
if ('Y' in data):
feed_dict['Y'] = utils.numpy_to_torch(data['Y'][batch_start:(batch_start + real_batch_size)])
else:
feed_dict['Y'] = utils.numpy_to_torch(np.zeros(shape=real_batch_size))
for c in self.data_columns:
feed_dict[c] = utils.numpy_to_torch(data[c][batch_start:(batch_start + real_batch_size)])
return feed_dict
def _get_feed_dict_rk(self, data, batch_start, batch_size, train, neg_data=None):
if (not train):
feed_dict = self._get_feed_dict_rt(data=data, batch_start=batch_start, batch_size=batch_size, train=train)
feed_dict['rank'] = 1
else:
batch_end = min(len(data['X']), (batch_start + batch_size))
real_batch_size = (batch_end - batch_start)
neg_columns_dict = {}
if (neg_data is None):
logging.warning('neg_data is None')
neg_df = self.generate_neg_df(uid_list=data['uid'][batch_start:(batch_start + real_batch_size)], iid_list=data['iid'][batch_start:(batch_start + real_batch_size)], df=self.data_loader.train_df, neg_n=1, train=True)
neg_data = self.format_data_dict(neg_df)
for c in self.data_columns:
neg_columns_dict[c] = neg_data[c]
else:
for c in self.data_columns:
neg_columns_dict[c] = neg_data[c][batch_start:(batch_start + real_batch_size)]
y = np.concatenate([np.ones(shape=real_batch_size, dtype=np.float32), np.zeros(shape=real_batch_size, dtype=np.float32)])
sample_id = data[global_p.K_SAMPLE_ID][batch_start:(batch_start + real_batch_size)]
neg_sample_id = (sample_id + len(self.train_data['Y']))
feed_dict = {'train': train, 'rank': 1, 'Y': utils.numpy_to_torch(y), global_p.K_SAMPLE_ID: np.concatenate([sample_id, neg_sample_id])}
for c in self.data_columns:
feed_dict[c] = utils.numpy_to_torch(np.concatenate([data[c][batch_start:(batch_start + real_batch_size)], neg_columns_dict[c]]))
return feed_dict
def _prepare_batches_rt(self, data, batch_size, train):
'\n\t\tfor rating/clicking prediction\n\t\t'
if (data is None):
return None
num_example = len(data['X'])
total_batch = int((((num_example + batch_size) - 1) / batch_size))
assert (num_example > 0)
batches = []
for batch in tqdm(range(total_batch), leave=False, ncols=100, mininterval=1, desc='Prepare Batches'):
batches.append(self._get_feed_dict_rt(data, (batch * batch_size), batch_size, train))
return batches
def _prepare_batches_rk(self, data, batch_size, train):
'\n\t\tfor ranking task\n\t\t'
if (data is None):
return None
num_example = len(data['X'])
total_batch = int((((num_example + batch_size) - 1) / batch_size))
assert (num_example > 0)
neg_data = None
if train:
neg_df = self.generate_neg_df(uid_list=data['uid'], iid_list=data['iid'], df=self.data_loader.train_df, neg_n=1, train=True)
neg_data = self.format_data_dict(neg_df)
batches = []
for batch in tqdm(range(total_batch), leave=False, ncols=100, mininterval=1, desc='Prepare Batches'):
batches.append(self._get_feed_dict_rk(data, (batch * batch_size), batch_size, train, neg_data))
return batches
def prepare_batches(self, data, batch_size, train):
'\n\t\tconvert data dict to batches\n\t\t:param data: dict generated by self.get_*_data() and self.format_data_dict()\n\t\t:param batch_size: batch size\n\t\t:param train: train or validation/test\n\t\t:return: list of batches\n\t\t'
buffer_key = ''
if (data is self.validation_data):
buffer_key = ('validation_' + str(batch_size))
elif (data is self.test_data):
buffer_key = ('test_' + str(batch_size))
if (buffer_key in self.vt_batches_buffer):
return self.vt_batches_buffer[buffer_key]
if (self.rank == 1):
batches = self._prepare_batches_rk(data=data, batch_size=batch_size, train=train)
else:
batches = self._prepare_batches_rt(data=data, batch_size=batch_size, train=train)
if (buffer_key != ''):
self.vt_batches_buffer[buffer_key] = batches
return batches
def get_feed_dict(self, data, batch_start, batch_size, train, neg_data=None):
'\n\t\t:param data: data dict,generated by self.get_*_data() and self.format_data_dict()\n\t\t:param batch_start: start index of each batch\n\t\t:param batch_size: batch size\n\t\t:param train: train or validation/test\n\t\t:param neg_data: negative sample data dictionary\n\t\t:return: feed dict\n\t\t:return:\n\t\t'
if (self.rank == 1):
return self._get_feed_dict_rk(data=data, batch_start=batch_start, batch_size=batch_size, train=train, neg_data=neg_data)
return self._get_feed_dict_rt(data=data, batch_start=batch_start, batch_size=batch_size, train=train)
def format_data_dict(self, df):
"\n\t\tformat Dataframe to data dictionary\n\t\t:param df: pandas Dataframe, contains 'uid','iid','label' three columns (at least)\n\t\t:return: data dict\n\t\t"
(data_loader, model) = (self.data_loader, self.model)
data = {}
out_columns = []
if ('uid' in df):
out_columns.append('uid')
data['uid'] = df['uid'].values
if ('iid' in df):
out_columns.append('iid')
data['iid'] = df['iid'].values
if (data_loader.label in df.columns):
data['Y'] = np.array(df[data_loader.label], dtype=np.float32)
else:
logging.warning(('No Labels In Data: ' + data_loader.label))
data['Y'] = np.zeros(len(df), dtype=np.float32)
ui_id = df[out_columns]
out_df = ui_id
if ((data_loader.user_df is not None) and model.include_user_features):
out_columns.extend(data_loader.user_features)
out_df = pd.merge(out_df, data_loader.user_df, on='uid', how='left')
if ((data_loader.item_df is not None) and model.include_item_features):
out_columns.extend(data_loader.item_features)
out_df = pd.merge(out_df, data_loader.item_df, on='iid', how='left')
out_df = out_df.fillna(0)
if model.include_context_features:
context = df[data_loader.context_features]
out_df = pd.concat([out_df, context], axis=1, ignore_index=True)
if (not model.include_id):
out_df = out_df.drop(columns=['uid', 'iid'])
base = 0
for feature in out_df.columns:
out_df[feature] = out_df[feature].apply((lambda x: (x + base)))
base += int((data_loader.column_max[feature] + 1))
if model.append_id:
x = pd.concat([ui_id, out_df], axis=1, ignore_index=True)
data['X'] = x.values.astype(int)
else:
data['X'] = out_df.values.astype(int)
assert (len(data['X']) == len(data['Y']))
return data
def generate_neg_df(self, uid_list, iid_list, df, neg_n, train):
'\n\t\tGenerate negative samples\n\t\t:param uid_list: users who need to get negative samples\n\t\t:param iid_list: users observed interactions\n\t\t:param df: dataframe information\n\t\t:param neg_n: number of negative samples\n\t\t:param train: sample for train or validation/test\n\t\t:return:\n\t\t'
neg_df = self._sample_neg_from_uid_list(uids=uid_list, neg_n=neg_n, train=train, other_infos={'iid': iid_list})
neg_df = pd.merge(neg_df, df, on=['uid', 'iid'], how='left')
neg_df = neg_df.drop(columns=['iid'])
neg_df = neg_df.rename(columns={'iid_neg': 'iid'})
neg_df = neg_df[df.columns]
neg_df[self.data_loader.label] = 0
return neg_df
def _sample_neg_from_uid_list(self, uids, neg_n, train, other_infos=None):
'\n\t\tGet negative samples based on user history\n\t\t:param uids: uid list\n\t\t:param neg_n: the number of negative samples\n\t\t:param train: sample for train data or validation/testing\n\t\t:param other_infos: other than uid,iid,label,history interactions are included here\n\t\t:return: DataFrame, which needs self.format_data_dict() to convert to data dictionary\n\t\t'
if (other_infos is None):
other_infos = {}
(uid_list, iid_list) = ([], [])
other_info_list = {}
for info in other_infos:
other_info_list[info] = []
tmp_history_dict = defaultdict(set)
item_num = self.data_loader.item_num
for (index, uid) in enumerate(uids):
if train:
inter_iids = (self.train_history_dict[uid] | tmp_history_dict[uid])
else:
inter_iids = ((self.train_history_dict[uid] | self.vt_history_dict[uid]) | tmp_history_dict[uid])
remain_iids_num = (item_num - len(inter_iids))
remain_iids = None
if (((1.0 * remain_iids_num) / item_num) < 0.2):
remain_iids = [i for i in range(1, item_num) if (i not in inter_iids)]
assert (remain_iids_num >= neg_n)
if (remain_iids is None):
for i in range(neg_n):
iid = np.random.randint(1, self.data_loader.item_num)
while (iid in inter_iids):
iid = np.random.randint(1, self.data_loader.item_num)
uid_list.append(uid)
iid_list.append(iid)
tmp_history_dict[uid].add(iid)
else:
iids = np.random.choice(remain_iids, neg_n, replace=False)
uid_list.extend(([uid] * neg_n))
iid_list.extend(iids)
tmp_history_dict[uid].update(iids)
for info in other_infos:
other_info_list[info].extend(([other_infos[info][index]] * neg_n))
neg_df = pd.DataFrame(data=list(zip(uid_list, iid_list)), columns=['uid', 'iid_neg'])
for info in other_infos:
neg_df[info] = other_info_list[info]
return neg_df
|
def main():
parser = argparse.ArgumentParser(description='Model')
parser.add_argument('--gpu', type=str, default='0', help='Set CUDA_VISIBLE_DEVICES')
parser.add_argument('--verbose', type=int, default=logging.INFO, help='Logging Level, 0, 10, ..., 50')
parser.add_argument('--log_file', type=str, default='../log/log_0.txt', help='Logging file path')
parser.add_argument('--result_file', type=str, default='../result/result.npy', help='Result file path')
parser.add_argument('--random_seed', type=int, default=42, help='Random seed of numpy and pytorch')
parser.add_argument('--model_name', type=str, default='BiasedMF', help='Choose model to run.')
parser.add_argument('--model_path', type=str, help='Model save path.', default=os.path.join(MODEL_DIR, 'biasedMF.pt'))
parser.add_argument('--controller_model_path', type=str, help='Controller Model save path.', default=os.path.join(MODEL_DIR, 'controller.pt'))
parser.add_argument('--shared_cnn_model_path', type=str, help='Shared CNN Model save path.', default=os.path.join(MODEL_DIR, 'loss_formula.pt'))
parser.add_argument('--formula_path', type=str, help='Loss Formula save path.', default=os.path.join(MODEL_DIR, 'Formula.txt'))
parser.add_argument('--u_vector_size', type=int, default=64, help='Size of user vectors.')
parser.add_argument('--i_vector_size', type=int, default=64, help='Size of item vectors.')
parser.add_argument('--child_num_layers', type=int, default=12)
parser.add_argument('--child_num_branches', type=int, default=8)
parser.add_argument('--child_out_filters', type=int, default=36)
parser.add_argument('--sample_branch_id', action='store_true')
parser.add_argument('--sample_skip_id', action='store_true')
parser.add_argument('--search_loss', action='store_true', help='To search a loss or verify a loss')
parser.add_argument('--train_with_optim', action='store_true')
parser.add_argument('--child_grad_bound', type=float, default=5.0)
parser.add_argument('--smooth_coef', type=float, default=1e-06)
parser.add_argument('--layers', type=str, default='[64, 16]', help='Size of each layer. (For Deep RS Model.)')
parser.add_argument('--loss_func', type=str, default='BCE', help='Loss Function. Choose from ["BCE", "MSE", "Hinge", "Focal", "MaxR", "SumR", "LogMin"]')
parser = DataLoader.parse_data_args(parser)
parser = DataProcessor.parse_dp_args(parser)
parser = BaseRunner.parse_runner_args(parser)
parser = Controller.parse_Ctrl_args(parser)
parser = LossFormula.parse_Formula_args(parser)
(args, extras) = parser.parse_known_args()
torch.manual_seed(args.random_seed)
torch.cuda.manual_seed(args.random_seed)
np.random.seed(args.random_seed)
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
controller = Controller(search_for=args.search_for, search_whole_channels=True, num_layers=(args.child_num_layers + 3), num_branches=args.child_num_branches, out_filters=args.child_out_filters, lstm_size=args.controller_lstm_size, lstm_num_layers=args.controller_lstm_num_layers, tanh_constant=args.controller_tanh_constant, temperature=None, skip_target=args.controller_skip_target, skip_weight=args.controller_skip_weight, entropy_weight=args.controller_entropy_weight, bl_dec=args.controller_bl_dec, num_aggregate=args.controller_num_aggregate, model_path=args.controller_model_path, sample_branch_id=args.sample_branch_id, sample_skip_id=args.sample_skip_id)
controller = controller.cuda()
loss_formula = LossFormula(num_layers=(args.child_num_layers + 3), num_branches=args.child_num_branches, out_filters=args.child_out_filters, keep_prob=args.child_keep_prob, model_path=args.shared_cnn_model_path, epsilon=args.epsilon)
loss_formula = loss_formula.cuda()
controller_optimizer = torch.optim.Adam(params=controller.parameters(), lr=args.controller_lr, betas=(0.0, 0.999), eps=0.001)
for handler in logging.root.handlers[:]:
logging.root.removeHandler(handler)
logging.basicConfig(filename=args.log_file, level=args.verbose)
logging.info(vars(args))
model_name = eval(args.model_name)
data_loader = DataLoader(path=args.path, dataset=args.dataset, label=args.label, sep=args.sep)
model = model_name(user_num=data_loader.user_num, item_num=data_loader.item_num, u_vector_size=args.u_vector_size, i_vector_size=args.i_vector_size, model_path=args.model_path, smooth_coef=args.smooth_coef, layers=args.layers, loss_func=args.loss_func)
if (torch.cuda.device_count() > 0):
model = model.cuda()
data_processor = DataProcessor(data_loader, model, rank=False, test_neg_n=args.test_neg_n)
runner = BaseRunner(optimizer=args.optimizer, learning_rate=args.lr, epoch=args.epoch, batch_size=args.batch_size, eval_batch_size=args.eval_batch_size, dropout=args.dropout, l2=args.l2, metrics=args.metric, check_epoch=args.check_epoch, early_stop=args.early_stop, loss_formula=loss_formula, controller=controller, controller_optimizer=controller_optimizer, args=args)
runner.train(model, data_processor, skip_eval=args.skip_eval)
runner.evaluate(model, data_processor.get_test_data(), data_processor)
|
class LossLayer(nn.Module):
'\n\thttps://github.com/melodyguan/enas/blob/master/src/cifar10/general_child.py#L245\n\t'
def __init__(self, layer_id, in_planes, out_planes, epsilon=1e-06):
super(LossLayer, self).__init__()
self.layer_id = layer_id
self.in_planes = in_planes
self.out_planes = out_planes
self.branch_0 = nn.Identity()
self.branch_1 = nn.Sigmoid()
self.branch_2 = nn.Tanh()
self.branch_3 = nn.ReLU()
self.epsilon = epsilon
def forward(self, prev_layers, sample_arc, small_epsilon=False):
layer_type = sample_arc[0]
if (self.layer_id > 0):
skip_indices = sample_arc[1]
else:
skip_indices = []
out = []
for (i, skip) in enumerate(skip_indices):
if (skip != 1):
out.append(prev_layers[i].reshape((- 1), 1))
out = torch.cat(out, 1).cuda()
epsilon = (1e-06 if small_epsilon else self.epsilon)
if (layer_type == 0):
out = torch.sum(out, dim=1)
elif (layer_type == 1):
out = torch.prod(out, dim=1)
elif (layer_type == 2):
out = torch.max(out, dim=1)[0]
elif (layer_type == 3):
out = torch.min(out, dim=1)[0]
elif (layer_type == 4):
out = (- out)
elif (layer_type == 5):
out = self.branch_0(out)
elif (layer_type == 6):
out = (torch.sign(out) * torch.log((torch.abs(out) + epsilon)))
elif (layer_type == 7):
out = (out ** 2)
elif (layer_type == 8):
out = (torch.sign(out) / (torch.abs(out) + epsilon))
elif (layer_type == 9):
out = self.branch_1(out)
elif (layer_type == 10):
out = self.branch_2(out)
elif (layer_type == 11):
out = self.branch_3(out)
elif (layer_type == 12):
out = torch.abs(out)
elif (layer_type == 13):
out = (torch.sign(out) * torch.sqrt((torch.abs(out) + epsilon)))
elif (layer_type == 14):
out = torch.exp(out)
else:
raise ValueError('Unknown layer_type {}'.format(layer_type))
out = torch.clamp(out, min=1e-05, max=100000.0)
return out
|
class LossFormula(nn.Module):
@staticmethod
def parse_Formula_args(parser):
parser.add_argument('--child_keep_prob', type=float, default=0.9)
parser.add_argument('--child_lr_max', type=float, default=0.05)
parser.add_argument('--child_lr_min', type=float, default=0.0005)
parser.add_argument('--child_lr_T', type=float, default=10)
parser.add_argument('--child_l2_reg', type=float, default=0.00025)
parser.add_argument('--epsilon', type=float, default=1e-06)
return parser
def __init__(self, model_path, num_layers=12, num_branches=6, out_filters=24, keep_prob=1.0, fixed_arc=None, epsilon=1e-06):
super(LossFormula, self).__init__()
self.num_layers = num_layers
self.num_branches = num_branches
self.out_filters = out_filters
self.keep_prob = keep_prob
self.fixed_arc = fixed_arc
self.model_path = model_path
self.layers = nn.ModuleList([])
for layer_id in range(self.num_layers):
layer = LossLayer(layer_id, (self.num_layers + 3), 1, epsilon)
self.layers.append(layer)
def forward(self, x, y, sample_arc, small_epsilon=False):
prev_layers = []
for i in range(self.num_layers):
prev_layers.append(torch.zeros_like(x))
prev_layers[0] = x
prev_layers[1] = y
prev_layers[2] = torch.ones_like(x)
for layer_id in range(3, self.num_layers):
out = self.layers[layer_id](prev_layers, sample_arc[str(layer_id)], small_epsilon)
prev_layers[layer_id] = out
return torch.mean(prev_layers[(- 1)])
def log_formula(self, sample_arc, id):
if (id == 0):
return 'pred'
if (id == 1):
return 'label'
if (id == 2):
return '1'
skip_indices = sample_arc[str(id)][1]
layer_type = int(sample_arc[str(id)][0][0])
return_str = '('
for i in range(id):
if (skip_indices[i] != 1):
if (layer_type == 0):
return_str += (self.log_formula(sample_arc, i) + ' + ')
elif (layer_type == 1):
return_str += (self.log_formula(sample_arc, i) + ' * ')
elif (layer_type == 2):
return_str += (self.log_formula(sample_arc, i) + ' , ')
elif (layer_type == 3):
return_str += (self.log_formula(sample_arc, i) + ' , ')
elif (layer_type == 4):
return (('- (' + self.log_formula(sample_arc, i)) + ')')
elif (layer_type == 5):
return self.log_formula(sample_arc, i)
elif (layer_type == 6):
return (('Log (' + self.log_formula(sample_arc, i)) + ')')
elif (layer_type == 7):
return (('(' + self.log_formula(sample_arc, i)) + ') ^ 2')
elif (layer_type == 8):
return (('1 / (' + self.log_formula(sample_arc, i)) + ')')
elif (layer_type == 9):
return (('Sigmoid (' + self.log_formula(sample_arc, i)) + ')')
elif (layer_type == 10):
return (('Tanh (' + self.log_formula(sample_arc, i)) + ')')
elif (layer_type == 11):
return (('ReLU (' + self.log_formula(sample_arc, i)) + ')')
elif (layer_type == 12):
return (('|' + self.log_formula(sample_arc, i)) + '|')
elif (layer_type == 13):
return (('Sqrt (' + self.log_formula(sample_arc, i)) + ')')
elif (layer_type == 14):
return (('e ^ (' + self.log_formula(sample_arc, i)) + ')')
if (layer_type == 2):
return_str = (('max' + return_str[:(- 3)]) + ')')
elif (layer_type == 3):
return_str = (('min' + return_str[:(- 3)]) + ')')
else:
return_str = (return_str[:(- 3)] + ')')
return return_str
def save_model(self, model_path=None):
'\n\t\tsave model\n\t\t'
if (model_path is None):
model_path = self.model_path
dir_path = os.path.dirname(model_path)
if (not os.path.exists(dir_path)):
os.mkdir(dir_path)
torch.save(self.state_dict(), model_path)
def load_model(self, model_path=None):
'\n\t\tload model\n\t\t'
if (model_path is None):
model_path = self.model_path
self.load_state_dict(torch.load(model_path))
self.eval()
|
class BaseRunner(object):
@staticmethod
def parse_runner_args(parser):
parser.add_argument('--load', type=int, default=0, help='Whether load model and continue to train')
parser.add_argument('--epoch', type=int, default=100, help='Number of epochs.')
parser.add_argument('--check_epoch', type=int, default=1, help='Check every epochs.')
parser.add_argument('--early_stop', type=int, default=1, help='whether to early-stop.')
parser.add_argument('--lr', type=float, default=0.01, help='Learning rate.')
parser.add_argument('--batch_size', type=int, default=128, help='Batch size during training.')
parser.add_argument('--eval_batch_size', type=int, default=(128 * 128), help='Batch size during testing.')
parser.add_argument('--dropout', type=float, default=0.2, help='Dropout probability for each deep layer')
parser.add_argument('--l2', type=float, default=0.0001, help='Weight of l2_regularize in loss.')
parser.add_argument('--optimizer', type=str, default='GD', help='optimizer: GD, Adam, Adagrad')
parser.add_argument('--metric', type=str, default='AUC', help='metrics: RMSE, MAE, AUC, F1, Accuracy, Precision, Recall')
parser.add_argument('--skip_eval', type=int, default=0, help='number of epochs without evaluation')
parser.add_argument('--skip_rate', type=float, default=1.005, help='bad loss skip rate')
parser.add_argument('--rej_rate', type=float, default=1.005, help='bad training reject rate')
parser.add_argument('--skip_lim', type=float, default=1e-05, help='bad loss skip limit')
parser.add_argument('--rej_lim', type=float, default=1e-05, help='bad training reject limit')
parser.add_argument('--lower_bound_zero_gradient', type=float, default=0.0001, help='bound to check zero gradient')
parser.add_argument('--search_train_epoch', type=int, default=1, help='epoch num for training when searching loss')
parser.add_argument('--step_train_epoch', type=int, default=1, help='epoch num for training each step')
return parser
def __init__(self, optimizer='GD', learning_rate=0.01, epoch=100, batch_size=128, eval_batch_size=(128 * 128), dropout=0.2, l2=1e-05, metrics='AUC,RMSE', check_epoch=10, early_stop=1, controller=None, loss_formula=None, controller_optimizer=None, args=None):
self.optimizer_name = optimizer
self.learning_rate = learning_rate
self.epoch = epoch
self.batch_size = batch_size
self.eval_batch_size = eval_batch_size
self.dropout = dropout
self.no_dropout = 0.0
self.l2_weight = l2
self.metrics = metrics.lower().split(',')
self.check_epoch = check_epoch
self.early_stop = early_stop
self.time = None
(self.train_results, self.valid_results, self.test_results) = ([], [], [])
self.controller = controller
self.loss_formula = loss_formula
self.controller_optimizer = controller_optimizer
self.args = args
self.print_prediction = {}
def _build_optimizer(self, model):
optimizer_name = self.optimizer_name.lower()
if (optimizer_name == 'gd'):
logging.info('Optimizer: GD')
optimizer = torch.optim.SGD(model.parameters(), lr=self.learning_rate, weight_decay=self.l2_weight)
elif (optimizer_name == 'adagrad'):
logging.info('Optimizer: Adagrad')
optimizer = torch.optim.Adagrad(model.parameters(), lr=self.learning_rate, weight_decay=self.l2_weight)
elif (optimizer_name == 'adam'):
logging.info('Optimizer: Adam')
optimizer = torch.optim.Adam(model.parameters(), lr=self.learning_rate, weight_decay=self.l2_weight)
else:
logging.error(('Unknown Optimizer: ' + self.optimizer_name))
assert (self.optimizer_name in ['GD', 'Adagrad', 'Adam'])
optimizer = torch.optim.SGD(model.parameters(), lr=self.learning_rate, weight_decay=self.l2_weight)
return optimizer
def _check_time(self, start=False):
if ((self.time is None) or start):
self.time = ([time()] * 2)
return self.time[0]
tmp_time = self.time[1]
self.time[1] = time()
return (self.time[1] - tmp_time)
def batches_add_control(self, batches, train):
for batch in batches:
batch['train'] = train
batch['dropout'] = (self.dropout if train else self.no_dropout)
return batches
def predict(self, model, data, data_processor, train=False):
batches = data_processor.prepare_batches(data, self.eval_batch_size, train=train)
batches = self.batches_add_control(batches, train=train)
model.eval()
predictions = []
for batch in tqdm(batches, leave=False, ncols=100, mininterval=1, desc='Predict'):
prediction = model.predict(batch)['prediction']
predictions.append(prediction.detach().cpu())
predictions = np.concatenate(predictions)
sample_ids = np.concatenate([b[global_p.K_SAMPLE_ID] for b in batches])
reorder_dict = dict(zip(sample_ids, predictions))
predictions = np.array([reorder_dict[i] for i in data[global_p.K_SAMPLE_ID]])
return predictions
def fit(self, model, data, data_processor, epoch=(- 1), loss_fun=None, sample_arc=None, regularizer=True):
if (model.optimizer is None):
model.optimizer = self._build_optimizer(model)
batches = data_processor.prepare_batches(data, self.batch_size, train=True)
batches = self.batches_add_control(batches, train=True)
batch_size = (self.batch_size if (data_processor.rank == 0) else (self.batch_size * 2))
model.train()
accumulate_size = 0
to_show = (batches if self.args.search_loss else tqdm(batches, leave=False, desc=('Epoch %5d' % (epoch + 1)), ncols=100, mininterval=1))
for batch in to_show:
accumulate_size += len(batch['Y'])
model.optimizer.zero_grad()
output_dict = model(batch)
loss = (output_dict['loss'] + (model.l2() * self.l2_weight))
if ((loss_fun is not None) and (sample_arc is not None)):
loss = loss_fun(output_dict['prediction'], batch['Y'], sample_arc)
if regularizer:
loss += (model.l2() * self.l2_weight)
loss.backward()
torch.nn.utils.clip_grad_value_(model.parameters(), 50)
if ((accumulate_size >= batch_size) or (batch is batches[(- 1)])):
model.optimizer.step()
accumulate_size = 0
model.eval()
return output_dict
def eva_termination(self, model):
'\n\t\t检查是否终止训练,基于验证集\n\t\t:param model: 模型\n\t\t:return: 是否终止训练\n\t\t'
metric = self.metrics[0]
valid = self.valid_results
if ((len(valid) > 100) and (metric in utils.LOWER_METRIC_LIST) and utils.strictly_increasing(valid[(- 10):])):
return True
elif ((len(valid) > 100) and (metric not in utils.LOWER_METRIC_LIST) and utils.strictly_decreasing(valid[(- 10):])):
return True
elif ((len(valid) - valid.index(utils.best_result(metric, valid))) > 100):
return True
return False
def predict_with_grad(self, model, data, data_processor, train=False):
'\n\t\t预测,不训练\n\t\t:param model: 模型\n\t\t:param data: 数据dict,由DataProcessor的self.get_*_data()和self.format_data_dict()系列函数产生\n\t\t:param data_processor: DataProcessor实例\n\t\t:return: prediction 拼接好的 np.array\n\t\t'
batches = data_processor.prepare_batches(data, self.eval_batch_size, train=train)
batches = self.batches_add_control(batches, train=train)
model.eval()
predictions = []
for batch in tqdm(batches, leave=False, ncols=100, mininterval=1, desc='Predict'):
prediction = model.predict(batch)['prediction']
predictions.append(prediction)
predictions = torch.cat(predictions)
sample_ids = np.concatenate([b[global_p.K_SAMPLE_ID] for b in batches])
reorder_dict = dict(zip(sample_ids, predictions))
predictions = torch.tensor([reorder_dict[i] for i in data[global_p.K_SAMPLE_ID]], requires_grad=True).cuda()
return predictions
def train(self, model, data_processor, skip_eval=0):
train_data = data_processor.get_train_data(epoch=(- 1))
validation_data = data_processor.get_validation_data()
test_data = data_processor.get_test_data()
self._check_time(start=True)
init_train = (self.evaluate(model, train_data, data_processor, metrics=self.metrics[0:1]) if (train_data is not None) else ([(- 1.0)] * len(self.metrics)))
init_valid = (self.evaluate(model, validation_data, data_processor, metrics=self.metrics[0:1]) if (validation_data is not None) else ([(- 1.0)] * len(self.metrics)))
init_test = (self.evaluate(model, test_data, data_processor) if (test_data is not None) else ([(- 1.0)] * len(self.metrics)))
logging.info((('Init: \t train= %s validation= %s test= %s [%.1f s] ' % (utils.format_metric(init_train), utils.format_metric(init_valid), utils.format_metric(init_test), self._check_time())) + ','.join(self.metrics)))
min_reward = torch.tensor((- 1.0)).cuda()
if (model.optimizer is None):
model.optimizer = self._build_optimizer(model)
last_search_cnt = (self.controller.num_aggregate * self.args.controller_train_steps)
try:
for epoch in range(self.epoch):
self._check_time()
epoch_train_data = data_processor.get_train_data(epoch=epoch)
self.loss_formula.eval()
self.controller.zero_grad()
epoch_val_for_train_data = data_processor.get_val_data_for_train(epoch=epoch)
if self.args.search_loss:
start_auc = self.evaluate(model, validation_data, data_processor)[0]
baseline = torch.tensor(start_auc).cuda()
cur_model = copy.deepcopy(model)
grad_dict = dict()
test_pred = ((torch.rand(20).cuda() * 0.8) + 0.1)
test_label = torch.rand(20).cuda()
test_pred.requires_grad = True
max_reward = min_reward.clone().detach()
best_arc = None
for i in tqdm(range(last_search_cnt), leave=False, desc=('Epoch %5d' % (epoch + 1)), ncols=100, mininterval=1):
while True:
reward = None
self.controller()
sample_arc = self.controller.sample_arc
if (test_pred.grad is not None):
test_pred.grad.data.zero_()
test_loss = self.loss_formula(test_pred, test_label, sample_arc, small_epsilon=True)
try:
test_loss.backward()
except RuntimeError:
pass
if ((test_pred.grad is None) or (torch.norm(test_pred.grad, float('inf')) < self.args.lower_bound_zero_gradient)):
reward = min_reward.clone().detach()
if (reward is None):
for (key, value) in grad_dict.items():
if (torch.norm((test_pred.grad - key), float('inf')) < self.args.lower_bound_zero_gradient):
reward = value.clone().detach()
break
if (reward is None):
model.zero_grad()
for j in range(self.args.search_train_epoch):
last_batch = self.fit(model, epoch_train_data, data_processor, epoch=epoch, loss_fun=self.loss_formula, sample_arc=sample_arc, regularizer=False)
reward = torch.tensor(self.evaluate(model, validation_data, data_processor)[0]).cuda()
grad_dict[test_pred.grad.clone().detach()] = reward.clone().detach()
model = copy.deepcopy(cur_model)
if (reward < (baseline - self.args.skip_lim)):
reward = min_reward.clone().detach()
reward += (self.args.controller_entropy_weight * self.controller.sample_entropy)
else:
if (reward > max_reward):
max_reward = reward.clone().detach()
if self.args.train_with_optim:
best_arc = copy.deepcopy(sample_arc)
reward += (self.args.controller_entropy_weight * self.controller.sample_entropy)
baseline -= ((1 - self.args.controller_bl_dec) * (baseline - reward))
baseline = baseline.detach()
ctrl_loss = (((- 1) * self.controller.sample_log_prob) * (reward - baseline))
ctrl_loss /= self.controller.num_aggregate
if (((i + 1) % self.controller.num_aggregate) == 0):
ctrl_loss.backward()
grad_norm = torch.nn.utils.clip_grad_norm_(self.controller.parameters(), self.args.child_grad_bound)
self.controller_optimizer.step()
self.controller.zero_grad()
else:
ctrl_loss.backward(retain_graph=True)
break
self.controller.eval()
logging.info(('Best auc during controller train: %.3f; Starting auc: %.3f' % (max_reward.item(), start_auc)))
last_search_cnt = 0
if (self.args.train_with_optim and (best_arc is not None) and (max_reward > (start_auc - self.args.rej_lim))):
sample_arc = copy.deepcopy(best_arc)
for j in range(self.args.step_train_epoch):
last_batch = self.fit(model, epoch_train_data, data_processor, epoch=epoch, loss_fun=self.loss_formula, sample_arc=sample_arc)
new_auc = torch.tensor(self.evaluate(model, validation_data, data_processor)[0]).cuda()
print('Optimal: ', self.loss_formula.log_formula(sample_arc=sample_arc, id=(self.loss_formula.num_layers - 1)))
else:
grad_dict = dict()
self.controller.zero_grad()
while True:
with torch.no_grad():
self.controller(sampling=True)
last_search_cnt += 1
sample_arc = self.controller.sample_arc
if (test_pred.grad is not None):
test_pred.grad.data.zero_()
test_loss = self.loss_formula(test_pred, test_label, sample_arc, small_epsilon=True)
try:
test_loss.backward()
except RuntimeError:
pass
if ((test_pred.grad is None) or (torch.norm(test_pred.grad, float('inf')) < self.args.lower_bound_zero_gradient)):
continue
dup_flag = False
for key in grad_dict.keys():
if (torch.norm((test_pred.grad - key), float('inf')) < self.args.lower_bound_zero_gradient):
dup_flag = True
break
if dup_flag:
continue
print(self.loss_formula.log_formula(sample_arc=sample_arc, id=(self.loss_formula.num_layers - 1)))
grad_dict[test_pred.grad.clone().detach()] = True
model = copy.deepcopy(cur_model)
model.zero_grad()
for j in range(self.args.step_train_epoch):
last_batch = self.fit(model, epoch_train_data, data_processor, epoch=epoch, loss_fun=self.loss_formula, sample_arc=sample_arc)
new_auc = torch.tensor(self.evaluate(model, validation_data, data_processor)[0]).cuda()
if (new_auc > (start_auc - self.args.rej_lim)):
break
print(('Epoch %d: Reject!' % (epoch + 1)))
last_search_cnt = max((last_search_cnt // 10), (self.controller.num_aggregate * self.args.controller_train_steps))
if ((last_search_cnt % self.controller.num_aggregate) != 0):
last_search_cnt = (((last_search_cnt // self.controller.num_aggregate) + 1) * self.controller.num_aggregate)
logging.info(self.loss_formula.log_formula(sample_arc=sample_arc, id=(self.loss_formula.num_layers - 1)))
self.controller.train()
else:
last_batch = self.fit(model, epoch_train_data, data_processor, epoch=epoch, loss_fun=None, sample_arc=None)
training_time = self._check_time()
if (epoch >= skip_eval):
metrics = self.metrics[0:1]
train_result = (self.evaluate(model, train_data, data_processor, metrics=metrics) if (train_data is not None) else ([(- 1.0)] * len(self.metrics)))
valid_result = (self.evaluate(model, validation_data, data_processor, metrics=metrics) if (validation_data is not None) else ([(- 1.0)] * len(self.metrics)))
test_result = (self.evaluate(model, test_data, data_processor) if (test_data is not None) else ([(- 1.0)] * len(self.metrics)))
testing_time = self._check_time()
self.train_results.append(train_result)
self.valid_results.append(valid_result)
self.test_results.append(test_result)
logging.info((('Epoch %5d [%.1f s]\t train= %s validation= %s test= %s [%.1f s] ' % ((epoch + 1), training_time, utils.format_metric(train_result), utils.format_metric(valid_result), utils.format_metric(test_result), testing_time)) + ','.join(self.metrics)))
if (not self.args.search_loss):
print((('Epoch %5d [%.1f s]\t train= %s validation= %s test= %s [%.1f s] ' % ((epoch + 1), training_time, utils.format_metric(train_result), utils.format_metric(valid_result), utils.format_metric(test_result), testing_time)) + ','.join(self.metrics)))
if (utils.best_result(self.metrics[0], self.valid_results) == self.valid_results[(- 1)]):
model.save_model()
self.controller.save_model()
self.loss_formula.save_model()
if ((self.args.search_loss == False) and self.eva_termination(model) and (self.early_stop == 1)):
logging.info(('Early stop at %d based on validation result.' % (epoch + 1)))
break
if (epoch < skip_eval):
logging.info(('Epoch %5d [%.1f s]' % ((epoch + 1), training_time)))
except KeyboardInterrupt:
logging.info('Early stop manually')
save_here = input('Save here? (1/0) (default 0):')
if str(save_here).lower().startswith('1'):
model.save_model()
self.controller.save_model()
self.loss_formula.save_model()
best_valid_score = utils.best_result(self.metrics[0], self.valid_results)
best_epoch = self.valid_results.index(best_valid_score)
logging.info((('Best Iter(validation)= %5d\t train= %s valid= %s test= %s [%.1f s] ' % ((best_epoch + 1), utils.format_metric(self.train_results[best_epoch]), utils.format_metric(self.valid_results[best_epoch]), utils.format_metric(self.test_results[best_epoch]), (self.time[1] - self.time[0]))) + ','.join(self.metrics)))
best_test_score = utils.best_result(self.metrics[0], self.test_results)
best_epoch = self.test_results.index(best_test_score)
logging.info((('Best Iter(test)= %5d\t train= %s valid= %s test= %s [%.1f s] ' % ((best_epoch + 1), utils.format_metric(self.train_results[best_epoch]), utils.format_metric(self.valid_results[best_epoch]), utils.format_metric(self.test_results[best_epoch]), (self.time[1] - self.time[0]))) + ','.join(self.metrics)))
model.load_model()
self.controller.load_model()
self.loss_formula.load_model()
def evaluate(self, model, data, data_processor, metrics=None):
'\n\t\tevaluate模型效果\n\t\t:param model: 模型\n\t\t:param data: 数据dict,由DataProcessor的self.get_*_data()和self.format_data_dict()系列函数产生\n\t\t:param data_processor: DataProcessor\n\t\t:param metrics: list of str\n\t\t:return: list of float 每个对应一个 metric\n\t\t'
if (metrics is None):
metrics = self.metrics
predictions = self.predict(model, data, data_processor)
return model.evaluate_method(predictions, data, metrics=metrics)
|
def subs(ss):
i = 0
return_s = ''
prev = 0
while (i < len(ss)):
if (ss[i] == '/'):
j = (i + 1)
count = 0
while (j < len(ss)):
if (ss[j] == '('):
count += 1
elif (ss[j] == ')'):
count -= 1
if (count == 0):
mid_s = subs(ss[(i + 2):j])
mid_s = (((((('(-1 if (' + mid_s) + ')<0 else 1) * (abs(') + mid_s) + ')+') + eps) + ')')
return_s += ((ss[prev:(i + 2)] + mid_s) + ss[j])
i = j
prev = (i + 1)
break
j += 1
elif ((ss[i] == 'l') and (ss[i:(i + 3)] == 'log')):
j = (i + 3)
count = 0
while (j < len(ss)):
if (ss[j] == '('):
count += 1
elif (ss[j] == ')'):
count -= 1
if (count == 0):
mid_s = subs(ss[(i + 4):j])
mid_s = (((((((('(' + mid_s) + '+') + eps) + ') if (') + mid_s) + ')>=0 else -(') + mid_s) + ')')
return_s += (((ss[prev:i] + 'np.log(') + mid_s) + ss[j])
i = j
prev = (i + 1)
break
j += 1
i += 1
return_s += ss[prev:]
return return_s
|
def pos_f(input_x):
x = input_x
return eval(pos_s)
|
def neg_f(input_x):
x = input_x
return eval(neg_s)
|
def group_user_interactions_csv(in_csv, out_csv, label='label', sep='\t'):
print('group_user_interactions_csv', out_csv)
all_data = pd.read_csv(in_csv, sep=sep)
group_inters = group_user_interactions_df(in_df=all_data, label=label)
group_inters.to_csv(out_csv, sep=sep, index=False)
return group_inters
|
def group_user_interactions_df(in_df, label='label', seq_sep=','):
all_data = in_df
if (label in all_data.columns):
all_data = all_data[(all_data[label] > 0)]
(uids, inters) = ([], [])
for (name, group) in all_data.groupby('uid'):
uids.append(name)
inters.append(seq_sep.join(group['iid'].astype(str).tolist()))
group_inters = pd.DataFrame()
group_inters['uid'] = uids
group_inters['iids'] = inters
return group_inters
|
def parse_global_args(parser):
'\n 全局命令行参数\n :param parser:\n :return:\n \n Global command-line parameters\n :param parser:\n :return:\n '
parser.add_argument('--gpu', type=str, default='0', help='Set CUDA_VISIBLE_DEVICES')
parser.add_argument('--verbose', type=int, default=logging.INFO, help='Logging Level, 0, 10, ..., 50')
parser.add_argument('--log_file', type=str, default=os.path.join(LOG_DIR, 'log.txt'), help='Logging file path')
parser.add_argument('--result_file', type=str, default=os.path.join(RESULT_DIR, 'result.npy'), help='Result file path')
parser.add_argument('--random_seed', type=int, default=DEFAULT_SEED, help='Random seed of numpy and tensorflow.')
parser.add_argument('--train', type=int, default=1, help='To train the model or not.')
return parser
|
def balance_data(data):
'\n 让正负样本数接近,正负样本数差距太大时使用\n :param data:\n :return:\n \n Make the number of positive and negative examples close, use when the difference between the number of positive/negative examples is too large\n :param data:\n :return:\n '
pos_indexes = np.where((data['Y'] == 1))[0]
copy_num = int(((len(data['Y']) - len(pos_indexes)) / len(pos_indexes)))
if (copy_num > 1):
copy_indexes = np.tile(pos_indexes, copy_num)
sample_index = np.concatenate([np.arange(0, len(data['Y'])), copy_indexes])
for k in data:
data[k] = data[k][sample_index]
return data
|
def input_data_is_list(data):
'\n 如果data是一个dict的list,则合并这些dict,在测试多个数据集比如验证测试同时计算时\n :param data: dict or list\n :return:\n \n If data is a list of dict, then merge these dict, when testing multiple datasets, e.g., when validation and testing are done concurrently\n :param data: dict or list\n :return:\n '
if ((type(data) is list) or (type(data) is tuple)):
print('input_data_is_list')
new_data = {}
for k in data[0]:
new_data[k] = np.concatenate([d[k] for d in data])
return new_data
return data
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.