text
stringlengths 1
93.6k
|
|---|
def resnext101_32x8d(pretrained=False, progress=True, **kwargs):
|
r"""ResNeXt-101 32x8d model from
|
`"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_
|
Args:
|
pretrained (bool): If True, returns a model pre-trained on ImageNet
|
progress (bool): If True, displays a progress bar of the download to stderr
|
"""
|
kwargs['groups'] = 32
|
kwargs['width_per_group'] = 8
|
return _resnet('resnext101_32x8d', Bottleneck, [3, 4, 23, 3],
|
pretrained, progress, **kwargs)
|
def wide_resnet50_2(pretrained=False, progress=True, **kwargs):
|
r"""Wide ResNet-50-2 model from
|
`"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_
|
The model is the same as ResNet except for the bottleneck number of channels
|
which is twice larger in every block. The number of channels in outer 1x1
|
convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048
|
channels, and in Wide ResNet-50-2 has 2048-1024-2048.
|
Args:
|
pretrained (bool): If True, returns a model pre-trained on ImageNet
|
progress (bool): If True, displays a progress bar of the download to stderr
|
"""
|
kwargs['width_per_group'] = 64 * 2
|
return _resnet('wide_resnet50_2', Bottleneck, [3, 4, 6, 3],
|
pretrained, progress, **kwargs)
|
def wide_resnet101_2(pretrained=False, progress=True, **kwargs):
|
r"""Wide ResNet-101-2 model from
|
`"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_
|
The model is the same as ResNet except for the bottleneck number of channels
|
which is twice larger in every block. The number of channels in outer 1x1
|
convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048
|
channels, and in Wide ResNet-50-2 has 2048-1024-2048.
|
Args:
|
pretrained (bool): If True, returns a model pre-trained on ImageNet
|
progress (bool): If True, displays a progress bar of the download to stderr
|
"""
|
kwargs['width_per_group'] = 64 * 2
|
return _resnet('wide_resnet101_2', Bottleneck, [3, 4, 23, 3],
|
pretrained, progress, **kwargs)
|
# <FILESEP>
|
import os
|
import time
|
import utils
|
import torch
|
import dataloader
|
import torchvision
|
from utils import *
|
from torch.nn import BCELoss
|
from torch.autograd import grad
|
import torchvision.utils as tvls
|
import torchvision.datasets as dsets
|
import torchvision.transforms as transforms
|
from discri import DGWGAN
|
from generator import Generator
|
def freeze(net):
|
for p in net.parameters():
|
p.requires_grad_(False)
|
def unfreeze(net):
|
for p in net.parameters():
|
p.requires_grad_(True)
|
def gradient_penalty(x, y):
|
# interpolation
|
shape = [x.size(0)] + [1] * (x.dim() - 1)
|
alpha = torch.rand(shape).cuda()
|
z = x + alpha * (y - x)
|
z = z.cuda()
|
z.requires_grad = True
|
o = DG(z)
|
g = grad(o, z, grad_outputs = torch.ones(o.size()).cuda(), create_graph = True)[0].view(z.size(0), -1)
|
gp = ((g.norm(p = 2, dim = 1) - 1) ** 2).mean()
|
return gp
|
save_img_dir = "./binaryGAN/imgs_celeba_gan"
|
save_model_dir= "./binaryGAN/"
|
os.makedirs(save_model_dir, exist_ok=True)
|
os.makedirs(save_img_dir, exist_ok=True)
|
dataset_name = "celeba"
|
log_path = "./attack_logs"
|
os.makedirs(log_path, exist_ok=True)
|
log_file = "binaryGAN_celeba.txt"
|
utils.Tee(os.path.join(log_path, log_file), 'w')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.