code stringlengths 17 6.64M |
|---|
def create_model(opt):
"Create a model given the option.\n This function warps the class CustomDatasetDataLoader.\n This is the main interface between this package and 'train.py'/'test.py'\n Example:\n >>> from models import create_model\n >>> model = create_model(opt)\n "
model = find_model_using_name(opt.model)
instance = model(opt)
print(('model [%s] was created' % type(instance).__name__))
return instance
|
class BaseModel(ABC):
'This class is an abstract base class (ABC) for models.\n To create a subclass, you need to implement the following five functions:\n -- <__init__>: initialize the class; first call BaseModel.__init__(self, opt).\n -- <set_input>: unpack data from dataset and apply preprocessing.\n -- <forward>: produce intermediate results.\n -- <optimize_parameters>: calculate losses, gradients, and update network weights.\n -- <modify_commandline_options>: (optionally) add model-specific options and set default options.\n '
def __init__(self, opt):
'Initialize the BaseModel class.\n\n Parameters:\n opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions\n\n When creating your custom class, you need to implement your own initialization.\n In this fucntion, you should first call `BaseModel.__init__(self, opt)`\n Then, you need to define four lists:\n -- self.loss_names (str list): specify the training losses that you want to plot and save.\n -- self.model_names (str list): specify the images that you want to display and save.\n -- self.visual_names (str list): define networks used in our training.\n -- self.optimizers (optimizer list): define and initialize optimizers. You can define one optimizer for each network. If two networks are updated at the same time, you can use itertools.chain to group them. See cycle_gan_model.py for an example.\n '
self.opt = opt
self.gpu_ids = opt.gpu_ids
self.isTrain = opt.isTrain
self.device = (torch.device('cuda:{}'.format(self.gpu_ids[0])) if self.gpu_ids else torch.device('cpu'))
self.save_dir = os.path.join(opt.checkpoints_dir, opt.name)
if (opt.preprocess != 'scale_width'):
torch.backends.cudnn.benchmark = True
self.loss_names = []
self.model_names = []
self.visual_names = []
self.optimizers = []
self.image_paths = []
@staticmethod
def modify_commandline_options(parser, is_train):
'Add new model-specific options, and rewrite default values for existing options.\n\n Parameters:\n parser -- original option parser\n is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options.\n\n Returns:\n the modified parser.\n '
return parser
@abstractmethod
def set_input(self, input):
'Unpack input data from the dataloader and perform necessary pre-processing steps.\n\n Parameters:\n input (dict): includes the data itself and its metadata information.\n '
pass
@abstractmethod
def forward(self):
'Run forward pass; called by both functions <optimize_parameters> and <test>.'
pass
def is_train(self):
'check if the current batch is good for training.'
return True
@abstractmethod
def optimize_parameters(self):
'Calculate losses, gradients, and update network weights; called in every training iteration'
pass
def setup(self, opt):
'Load and print networks; create schedulers\n\n Parameters:\n opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions\n '
if self.isTrain:
self.schedulers = [networks.get_scheduler(optimizer, opt) for optimizer in self.optimizers]
if ((not self.isTrain) or opt.continue_train):
self.load_networks(opt.epoch)
self.print_networks(opt.verbose)
def eval(self):
'Make models eval mode during test time'
for name in self.model_names:
if isinstance(name, str):
net = getattr(self, ('net' + name))
net.eval()
def test(self):
"Forward function used in test time.\n\n This function wraps <forward> function in no_grad() so we don't save intermediate steps for backprop\n It also calls <compute_visuals> to produce additional visualization results\n "
with torch.no_grad():
self.forward()
self.compute_visuals()
def compute_visuals(self):
'Calculate additional output images for visdom and HTML visualization'
pass
def get_image_paths(self):
' Return image paths that are used to load current data'
return self.image_paths
def update_learning_rate(self):
'Update learning rates for all the networks; called at the end of every epoch'
for scheduler in self.schedulers:
scheduler.step()
lr = self.optimizers[0].param_groups[0]['lr']
print(('learning rate = %.7f' % lr))
def get_current_visuals(self):
'Return visualization images. train.py will display these images with visdom, and save the images to a HTML'
visual_ret = OrderedDict()
for name in self.visual_names:
if isinstance(name, str):
visual_ret[name] = getattr(self, name)
return visual_ret
def get_current_losses(self):
'Return traning losses / errors. train.py will print out these errors on console, and save them to a file'
errors_ret = OrderedDict()
for name in self.loss_names:
if isinstance(name, str):
errors_ret[name] = float(getattr(self, ('loss_' + name)))
return errors_ret
def save_networks(self, epoch):
"Save all the networks to the disk.\n\n Parameters:\n epoch (int) -- current epoch; used in the file name '%s_net_%s.pth' % (epoch, name)\n "
for name in self.model_names:
if isinstance(name, str):
save_filename = ('%s_net_%s.pth' % (epoch, name))
save_path = os.path.join(self.save_dir, save_filename)
net = getattr(self, ('net' + name))
if ((len(self.gpu_ids) > 0) and torch.cuda.is_available()):
torch.save(net.module.cpu().state_dict(), save_path)
net.cuda(self.gpu_ids[0])
else:
torch.save(net.cpu().state_dict(), save_path)
def __patch_instance_norm_state_dict(self, state_dict, module, keys, i=0):
'Fix InstanceNorm checkpoints incompatibility (prior to 0.4)'
key = keys[i]
if ((i + 1) == len(keys)):
if (module.__class__.__name__.startswith('InstanceNorm') and ((key == 'running_mean') or (key == 'running_var'))):
if (getattr(module, key) is None):
state_dict.pop('.'.join(keys))
if (module.__class__.__name__.startswith('InstanceNorm') and (key == 'num_batches_tracked')):
state_dict.pop('.'.join(keys))
else:
self.__patch_instance_norm_state_dict(state_dict, getattr(module, key), keys, (i + 1))
def load_networks(self, epoch):
"Load all the networks from the disk.\n\n Parameters:\n epoch (int) -- current epoch; used in the file name '%s_net_%s.pth' % (epoch, name)\n "
for name in self.model_names:
if isinstance(name, str):
load_filename = ('%s_net_%s.pth' % (epoch, name))
load_path = os.path.join(self.save_dir, load_filename)
net = getattr(self, ('net' + name))
if isinstance(net, torch.nn.DataParallel):
net = net.module
print(('loading the model from %s' % load_path))
state_dict = torch.load(load_path, map_location=str(self.device))
if hasattr(state_dict, '_metadata'):
del state_dict._metadata
for key in list(state_dict.keys()):
self.__patch_instance_norm_state_dict(state_dict, net, key.split('.'))
net.load_state_dict(state_dict)
def print_networks(self, verbose):
'Print the total number of parameters in the network and (if verbose) network architecture\n\n Parameters:\n verbose (bool) -- if verbose: print the network architecture\n '
print('---------- Networks initialized -------------')
for name in self.model_names:
if isinstance(name, str):
net = getattr(self, ('net' + name))
num_params = 0
for param in net.parameters():
num_params += param.numel()
if verbose:
print(net)
print(('[Network %s] Total number of parameters : %.3f M' % (name, (num_params / 1000000.0))))
print('-----------------------------------------------')
def set_requires_grad(self, nets, requires_grad=False):
'Set requires_grad=False for all the networks to avoid unnecessary computations\n Parameters:\n nets (network list) -- a list of networks\n requires_grad (bool) -- whether the networks require gradients or not\n '
if (not isinstance(nets, list)):
nets = [nets]
for net in nets:
if (net is not None):
for param in net.parameters():
param.requires_grad = requires_grad
|
class DivCo2Model(BaseModel):
@staticmethod
def modify_commandline_options(parser, is_train=True):
return parser
def __init__(self, opt):
if opt.isTrain:
assert ((opt.batch_size % 2) == 0)
BaseModel.__init__(self, opt)
self.nz = 8
self.loss_names = ['G_GAN', 'D', 'G_GAN2', 'D2', 'G_L1', 'z_L1', 'kl', 'contra']
self.visual_names = ['real_A_encoded', 'real_B_encoded', 'fake_B_random1a', 'fake_B_random1b', 'fake_B_random2', 'fake_B_encoded']
use_D = (opt.isTrain and (opt.lambda_GAN > 0.0))
use_D2 = (opt.isTrain and (opt.lambda_GAN2 > 0.0) and (not opt.use_same_D))
use_E = (opt.isTrain or (not opt.no_encode))
use_vae = True
self.model_names = ['G']
self.netG = networks.define_G(opt.input_nc, opt.output_nc, opt.nz, opt.ngf, netG=opt.netG, norm=opt.norm, nl=opt.nl, use_dropout=opt.use_dropout, init_type=opt.init_type, init_gain=opt.init_gain, gpu_ids=self.gpu_ids, where_add=opt.where_add, upsample=opt.upsample)
D_output_nc = ((opt.input_nc + opt.output_nc) if opt.conditional_D else opt.output_nc)
if use_D:
self.model_names += ['D']
self.netD = networks.define_D(D_output_nc, opt.ndf, netD=opt.netD, norm=opt.norm, nl=opt.nl, init_type=opt.init_type, init_gain=opt.init_gain, num_Ds=opt.num_Ds, gpu_ids=self.gpu_ids)
if use_D2:
self.model_names += ['D2']
self.netD2 = networks.define_D(D_output_nc, opt.ndf, netD=opt.netD2, norm=opt.norm, nl=opt.nl, init_type=opt.init_type, init_gain=opt.init_gain, num_Ds=opt.num_Ds, gpu_ids=self.gpu_ids)
else:
self.netD2 = None
if use_E:
self.model_names += ['E']
self.netE = networks.define_E(opt.output_nc, opt.nz, opt.nef, netE=opt.netE, norm=opt.norm, nl=opt.nl, init_type=opt.init_type, init_gain=opt.init_gain, gpu_ids=self.gpu_ids, vaeLike=use_vae)
if opt.isTrain:
self.criterionGAN = networks.GANLoss(gan_mode=opt.gan_mode).to(self.device)
self.criterionL1 = torch.nn.L1Loss()
self.criterionZ = torch.nn.L1Loss()
self.cross_entropy_loss = torch.nn.CrossEntropyLoss()
self.optimizers = []
self.optimizer_G = torch.optim.Adam(self.netG.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
self.optimizers.append(self.optimizer_G)
if use_E:
self.optimizer_E = torch.optim.Adam(self.netE.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
self.optimizers.append(self.optimizer_E)
if use_D:
self.optimizer_D = torch.optim.Adam(self.netD.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
self.optimizers.append(self.optimizer_D)
if use_D2:
self.optimizer_D2 = torch.optim.Adam(self.netD2.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
self.optimizers.append(self.optimizer_D2)
vgg16 = tv.models.vgg16(pretrained=True)
self.perceptualnet = nn.Sequential(*list(vgg16.features.children())[:23])
self.load_dict(vgg16)
for param in self.perceptualnet.parameters():
param.requires_grad = False
print(self.perceptualnet)
self.perceptualnet.cuda()
def load_dict(self, pretrained_net):
pretrained_dict = pretrained_net.state_dict()
process_dict = self.perceptualnet.state_dict()
pretrained_dict = {k: v for (k, v) in pretrained_dict.items() if (k in process_dict)}
process_dict.update(pretrained_dict)
self.perceptualnet.load_state_dict(process_dict)
def is_train(self):
'check if the current batch is good for training.'
return (self.opt.isTrain and (self.real_A.size(0) == self.opt.batch_size))
def set_input(self, input):
AtoB = (self.opt.direction == 'AtoB')
self.real_A = input[('A' if AtoB else 'B')].to(self.device)
self.real_B = input[('B' if AtoB else 'A')].to(self.device)
self.image_paths = input[('A_paths' if AtoB else 'B_paths')]
def get_z_random(self, batch_size, nz, random_type='gauss'):
if (random_type == 'uni'):
z = ((torch.rand(batch_size, nz) * 2.0) - 1.0)
elif (random_type == 'gauss'):
z = torch.randn(batch_size, nz)
return z.detach().to(self.device)
def encode(self, input_image):
(mu, logvar) = self.netE.forward(input_image)
std = logvar.mul(0.5).exp_()
eps = self.get_z_random(std.size(0), std.size(1))
z = eps.mul(std).add_(mu)
return (z, mu, logvar)
def test(self, z0=None, encode=False):
with torch.no_grad():
if encode:
(z0, _) = self.netE(self.real_B)
if (z0 is None):
z0 = self.get_z_random(self.real_A.size(0), self.opt.nz)
self.fake_B = self.netG(self.real_A, z0)
return (self.real_A, self.fake_B, self.real_B)
def latent_augmented_sampling(self):
query = self.get_z_random(self.real_A_encoded.size(0), self.nz, 'gauss')
pos = torch.cuda.FloatTensor(query.shape).uniform_((- self.opt.radius), self.opt.radius).add_(query)
negs = []
for k in range(self.opt.num_negative):
neg = self.get_z_random(self.real_A_encoded.size(0), self.nz, 'gauss')
while ((neg - query).abs().min() < self.opt.radius):
neg = self.get_z_random(self.real_A_encoded.size(0), self.nz, 'gauss')
negs.append(neg)
return (query, pos, negs)
def forward(self):
half_size = (self.opt.batch_size // 2)
self.half_size = half_size
self.real_A_encoded = self.real_A[0:half_size]
self.real_B_encoded = self.real_B[0:half_size]
self.real_A_random = self.real_A[half_size:]
self.real_B_random = self.real_B[half_size:]
(self.z_encoded, self.mu, self.logvar) = self.encode(self.real_B_encoded)
(query, pos, negs) = self.latent_augmented_sampling()
self.z_random = torch.cat(([query, pos] + negs), 0)
self.fake_B_encoded = self.netG(self.real_A_encoded, self.z_encoded)
self.fake_B_random = self.netG(self.real_A_encoded.repeat(([(self.opt.num_negative + 2)] + list(np.ones(len(self.real_A_encoded.shape[1:]), dtype=np.uint8)))), self.z_random)
if self.opt.conditional_D:
self.fake_data_encoded = torch.cat([self.real_A_encoded, self.fake_B_encoded], 1)
self.real_data_encoded = torch.cat([self.real_A_encoded, self.real_B_encoded], 1)
self.fake_data_random = torch.cat([self.real_A_encoded, self.fake_B_random[:half_size]], 1)
self.real_data_random = torch.cat([self.real_A_random, self.real_B_random], 1)
else:
self.fake_data_encoded = self.fake_B_encoded
self.fake_data_random = self.fake_B_random[:half_size]
self.real_data_encoded = self.real_B_encoded
self.real_data_random = self.real_B_random
self.fake_B_random1a = self.fake_B_random[0:1]
self.fake_B_random1b = self.fake_B_random[half_size:(half_size + 1)]
self.fake_B_random2 = self.fake_B_random[(half_size * 2):((half_size * 2) + 1)]
if ((self.opt.lambda_z > 0.0) or (self.opt.lambda_contra > 0.0)):
self.mu2 = nn.AdaptiveAvgPool2d(1)(self.perceptualnet(self.fake_B_random)).view(self.fake_B_random.shape[0], (- 1))
def backward_D(self, netD, real, fake):
pred_fake = netD(fake.detach())
pred_real = netD(real)
(loss_D_fake, _) = self.criterionGAN(pred_fake, False)
(loss_D_real, _) = self.criterionGAN(pred_real, True)
loss_D = (loss_D_fake + loss_D_real)
loss_D.backward()
return (loss_D, [loss_D_fake, loss_D_real])
def backward_G_GAN(self, fake, netD=None, ll=0.0):
if (ll > 0.0):
pred_fake = netD(fake)
(loss_G_GAN, _) = self.criterionGAN(pred_fake, True)
else:
loss_G_GAN = 0
return (loss_G_GAN * ll)
def backward_EG(self):
self.loss_G_GAN = self.backward_G_GAN(self.fake_data_encoded, self.netD, self.opt.lambda_GAN)
if self.opt.use_same_D:
self.loss_G_GAN2 = self.backward_G_GAN(self.fake_data_random, self.netD, self.opt.lambda_GAN2)
else:
self.loss_G_GAN2 = self.backward_G_GAN(self.fake_data_random, self.netD2, self.opt.lambda_GAN2)
if (self.opt.lambda_kl > 0.0):
self.loss_kl = (torch.sum((((1 + self.logvar) - self.mu.pow(2)) - self.logvar.exp())) * ((- 0.5) * self.opt.lambda_kl))
else:
self.loss_kl = 0
if (self.opt.lambda_L1 > 0.0):
self.loss_G_L1 = (self.criterionL1(self.fake_B_encoded, self.real_B_encoded) * self.opt.lambda_L1)
else:
self.loss_G_L1 = 0.0
self.loss_G = (((self.loss_G_GAN + self.loss_G_GAN2) + self.loss_G_L1) + self.loss_kl)
self.loss_G.backward(retain_graph=True)
def update_D(self):
self.set_requires_grad([self.netD, self.netD2], True)
if (self.opt.lambda_GAN > 0.0):
self.optimizer_D.zero_grad()
(self.loss_D, self.losses_D) = self.backward_D(self.netD, self.real_data_encoded, self.fake_data_encoded)
if self.opt.use_same_D:
(self.loss_D2, self.losses_D2) = self.backward_D(self.netD, self.real_data_random, self.fake_data_random)
self.optimizer_D.step()
if ((self.opt.lambda_GAN2 > 0.0) and (not self.opt.use_same_D)):
self.optimizer_D2.zero_grad()
(self.loss_D2, self.losses_D2) = self.backward_D(self.netD2, self.real_data_random, self.fake_data_random)
self.optimizer_D2.step()
def backward_G_alone(self):
if (self.opt.lambda_z > 0.0):
self.loss_z_L1 = (self.criterionZ(self.mu2, self.z_random) * self.opt.lambda_z)
self.loss_z_L1.backward()
else:
self.loss_z_L1 = 0.0
self.loss_contra = 0.0
if (self.opt.lambda_contra > 0.0):
for i in range(self.half_size):
mu = self.mu2[i:self.mu2.shape[0]:self.half_size]
if self.opt.featnorm:
mu /= torch.norm(mu, p=2, dim=1, keepdim=True)
self.loss_contra += self.compute_contrastive_loss(mu[0:1], mu[1:])
self.loss_contra = (self.loss_contra * self.opt.lambda_contra)
def compute_contrastive_loss(self, feat_q, feat_k):
out = (torch.mm(feat_q, feat_k.transpose(1, 0)) / self.opt.tau)
loss = self.cross_entropy_loss(out, torch.zeros(out.size(0), dtype=torch.long, device=feat_q.device))
return loss
def update_G_and_E(self):
self.set_requires_grad([self.netD, self.netD2], False)
self.optimizer_E.zero_grad()
self.optimizer_G.zero_grad()
self.backward_EG()
if ((self.opt.lambda_z > 0.0) or (self.opt.lambda_contra > 0.0)):
self.set_requires_grad([self.netE], False)
self.backward_G_alone()
self.set_requires_grad([self.netE], True)
self.optimizer_E.step()
self.optimizer_G.step()
def optimize_parameters(self):
self.forward()
self.update_G_and_E()
self.update_D()
|
class DivCoModel(BaseModel):
@staticmethod
def modify_commandline_options(parser, is_train=True):
return parser
def __init__(self, opt):
if opt.isTrain:
assert ((opt.batch_size % 2) == 0)
BaseModel.__init__(self, opt)
self.nz = 8
self.loss_names = ['G_GAN', 'D', 'G_GAN2', 'D2', 'G_L1', 'z_L1', 'kl', 'contra']
self.visual_names = ['real_A_encoded', 'real_B_encoded', 'fake_B_random1a', 'fake_B_random1b', 'fake_B_random2', 'fake_B_encoded']
use_D = (opt.isTrain and (opt.lambda_GAN > 0.0))
use_D2 = (opt.isTrain and (opt.lambda_GAN2 > 0.0) and (not opt.use_same_D))
use_E = (opt.isTrain or (not opt.no_encode))
use_vae = True
self.model_names = ['G']
self.netG = networks.define_G(opt.input_nc, opt.output_nc, opt.nz, opt.ngf, netG=opt.netG, norm=opt.norm, nl=opt.nl, use_dropout=opt.use_dropout, init_type=opt.init_type, init_gain=opt.init_gain, gpu_ids=self.gpu_ids, where_add=opt.where_add, upsample=opt.upsample)
D_output_nc = ((opt.input_nc + opt.output_nc) if opt.conditional_D else opt.output_nc)
if use_D:
self.model_names += ['D']
self.netD = networks.define_D(D_output_nc, opt.ndf, netD=opt.netD, norm=opt.norm, nl=opt.nl, init_type=opt.init_type, init_gain=opt.init_gain, num_Ds=opt.num_Ds, gpu_ids=self.gpu_ids)
if use_D2:
self.model_names += ['D2']
self.netD2 = networks.define_D(D_output_nc, opt.ndf, netD=opt.netD2, norm=opt.norm, nl=opt.nl, init_type=opt.init_type, init_gain=opt.init_gain, num_Ds=opt.num_Ds, gpu_ids=self.gpu_ids)
else:
self.netD2 = None
if use_E:
self.model_names += ['E']
self.netE = networks.define_E(opt.output_nc, opt.nz, opt.nef, netE=opt.netE, norm=opt.norm, nl=opt.nl, init_type=opt.init_type, init_gain=opt.init_gain, gpu_ids=self.gpu_ids, vaeLike=use_vae)
if opt.isTrain:
self.criterionGAN = networks.GANLoss(gan_mode=opt.gan_mode).to(self.device)
self.criterionL1 = torch.nn.L1Loss()
self.criterionZ = torch.nn.L1Loss()
self.cross_entropy_loss = torch.nn.CrossEntropyLoss()
self.optimizers = []
self.optimizer_G = torch.optim.Adam(self.netG.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
self.optimizers.append(self.optimizer_G)
if use_E:
self.optimizer_E = torch.optim.Adam(self.netE.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
self.optimizers.append(self.optimizer_E)
if use_D:
self.optimizer_D = torch.optim.Adam(self.netD.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
self.optimizers.append(self.optimizer_D)
if use_D2:
self.optimizer_D2 = torch.optim.Adam(self.netD2.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
self.optimizers.append(self.optimizer_D2)
def is_train(self):
'check if the current batch is good for training.'
return (self.opt.isTrain and (self.real_A.size(0) == self.opt.batch_size))
def set_input(self, input):
AtoB = (self.opt.direction == 'AtoB')
self.real_A = input[('A' if AtoB else 'B')].to(self.device)
self.real_B = input[('B' if AtoB else 'A')].to(self.device)
self.image_paths = input[('A_paths' if AtoB else 'B_paths')]
def get_z_random(self, batch_size, nz, random_type='gauss'):
if (random_type == 'uni'):
z = ((torch.rand(batch_size, nz) * 2.0) - 1.0)
elif (random_type == 'gauss'):
z = torch.randn(batch_size, nz)
return z.detach().to(self.device)
def encode(self, input_image):
(mu, logvar) = self.netE.forward(input_image)
std = logvar.mul(0.5).exp_()
eps = self.get_z_random(std.size(0), std.size(1))
z = eps.mul(std).add_(mu)
return (z, mu, logvar)
def test(self, z0=None, encode=False):
with torch.no_grad():
if encode:
(z0, _) = self.netE(self.real_B)
if (z0 is None):
z0 = self.get_z_random(self.real_A.size(0), self.opt.nz)
self.fake_B = self.netG(self.real_A, z0)
return (self.real_A, self.fake_B, self.real_B)
def latent_augmented_sampling(self):
query = self.get_z_random(self.real_A_encoded.size(0), self.nz, 'gauss')
pos = torch.cuda.FloatTensor(query.shape).uniform_((- self.opt.radius), self.opt.radius).add_(query)
negs = []
for k in range(self.opt.num_negative):
neg = self.get_z_random(self.real_A_encoded.size(0), self.nz, 'gauss')
while ((neg - query).abs().min() < self.opt.radius):
neg = self.get_z_random(self.real_A_encoded.size(0), self.nz, 'gauss')
negs.append(neg)
return (query, pos, negs)
def forward(self):
half_size = (self.opt.batch_size // 2)
self.half_size = half_size
self.real_A_encoded = self.real_A[0:half_size]
self.real_B_encoded = self.real_B[0:half_size]
self.real_A_random = self.real_A[half_size:]
self.real_B_random = self.real_B[half_size:]
(self.z_encoded, self.mu, self.logvar) = self.encode(self.real_B_encoded)
(query, pos, negs) = self.latent_augmented_sampling()
self.z_random = torch.cat(([query, pos] + negs), 0)
self.fake_B_encoded = self.netG(self.real_A_encoded, self.z_encoded)
self.fake_B_random = self.netG(self.real_A_encoded.repeat(([(self.opt.num_negative + 2)] + list(np.ones(len(self.real_A_encoded.shape[1:]), dtype=np.uint8)))), self.z_random)
if self.opt.conditional_D:
self.fake_data_encoded = torch.cat([self.real_A_encoded, self.fake_B_encoded], 1)
self.real_data_encoded = torch.cat([self.real_A_encoded, self.real_B_encoded], 1)
self.fake_data_random = torch.cat([self.real_A_encoded, self.fake_B_random[:half_size]], 1)
self.real_data_random = torch.cat([self.real_A_random, self.real_B_random], 1)
else:
self.fake_data_encoded = self.fake_B_encoded
self.fake_data_random = self.fake_B_random[:half_size]
self.real_data_encoded = self.real_B_encoded
self.real_data_random = self.real_B_random
self.fake_B_random1a = self.fake_B_random[0:1]
self.fake_B_random1b = self.fake_B_random[half_size:(half_size + 1)]
self.fake_B_random2 = self.fake_B_random[(half_size * 2):((half_size * 2) + 1)]
if ((self.opt.lambda_z > 0.0) or (self.opt.lambda_contra > 0.0)):
(self.mu2, logvar2) = self.netE(self.fake_B_random)
def backward_D(self, netD, real, fake):
pred_fake = netD(fake.detach())
pred_real = netD(real)
(loss_D_fake, _) = self.criterionGAN(pred_fake, False)
(loss_D_real, _) = self.criterionGAN(pred_real, True)
loss_D = (loss_D_fake + loss_D_real)
loss_D.backward()
return (loss_D, [loss_D_fake, loss_D_real])
def backward_G_GAN(self, fake, netD=None, ll=0.0):
if (ll > 0.0):
pred_fake = netD(fake)
(loss_G_GAN, _) = self.criterionGAN(pred_fake, True)
else:
loss_G_GAN = 0
return (loss_G_GAN * ll)
def backward_EG(self):
self.loss_G_GAN = self.backward_G_GAN(self.fake_data_encoded, self.netD, self.opt.lambda_GAN)
if self.opt.use_same_D:
self.loss_G_GAN2 = self.backward_G_GAN(self.fake_data_random, self.netD, self.opt.lambda_GAN2)
else:
self.loss_G_GAN2 = self.backward_G_GAN(self.fake_data_random, self.netD2, self.opt.lambda_GAN2)
if (self.opt.lambda_kl > 0.0):
self.loss_kl = (torch.sum((((1 + self.logvar) - self.mu.pow(2)) - self.logvar.exp())) * ((- 0.5) * self.opt.lambda_kl))
else:
self.loss_kl = 0
if (self.opt.lambda_L1 > 0.0):
self.loss_G_L1 = (self.criterionL1(self.fake_B_encoded, self.real_B_encoded) * self.opt.lambda_L1)
else:
self.loss_G_L1 = 0.0
self.loss_G = (((self.loss_G_GAN + self.loss_G_GAN2) + self.loss_G_L1) + self.loss_kl)
self.loss_G.backward(retain_graph=True)
def update_D(self):
self.set_requires_grad([self.netD, self.netD2], True)
if (self.opt.lambda_GAN > 0.0):
self.optimizer_D.zero_grad()
(self.loss_D, self.losses_D) = self.backward_D(self.netD, self.real_data_encoded, self.fake_data_encoded)
if self.opt.use_same_D:
(self.loss_D2, self.losses_D2) = self.backward_D(self.netD, self.real_data_random, self.fake_data_random)
self.optimizer_D.step()
if ((self.opt.lambda_GAN2 > 0.0) and (not self.opt.use_same_D)):
self.optimizer_D2.zero_grad()
(self.loss_D2, self.losses_D2) = self.backward_D(self.netD2, self.real_data_random, self.fake_data_random)
self.optimizer_D2.step()
def backward_G_alone(self):
if (self.opt.lambda_z > 0.0):
self.loss_z_L1 = (self.criterionZ(self.mu2, self.z_random) * self.opt.lambda_z)
self.loss_z_L1.backward()
else:
self.loss_z_L1 = 0.0
self.loss_contra = 0.0
if (self.opt.lambda_contra > 0.0):
for i in range(self.half_size):
mu = self.mu2[i:self.mu2.shape[0]:self.half_size]
if self.opt.featnorm:
mu /= torch.norm(mu, p=2, dim=1, keepdim=True)
self.loss_contra += self.compute_contrastive_loss(mu[0:1], mu[1:])
self.loss_contra = (self.loss_contra * self.opt.lambda_contra)
def compute_contrastive_loss(self, feat_q, feat_k):
out = (torch.mm(feat_q, feat_k.transpose(1, 0)) / self.opt.tau)
loss = self.cross_entropy_loss(out, torch.zeros(out.size(0), dtype=torch.long, device=feat_q.device))
return loss
def update_G_and_E(self):
self.set_requires_grad([self.netD, self.netD2], False)
self.optimizer_E.zero_grad()
self.optimizer_G.zero_grad()
self.backward_EG()
if ((self.opt.lambda_z > 0.0) or (self.opt.lambda_contra > 0.0)):
self.set_requires_grad([self.netE], False)
self.backward_G_alone()
self.set_requires_grad([self.netE], True)
self.optimizer_E.step()
self.optimizer_G.step()
def optimize_parameters(self):
self.forward()
self.update_G_and_E()
self.update_D()
|
def init_weights(net, init_type='normal', init_gain=0.02):
"Initialize network weights.\n Parameters:\n net (network) -- network to be initialized\n init_type (str) -- the name of an initialization method: normal | xavier | kaiming | orthogonal\n init_gain (float) -- scaling factor for normal, xavier and orthogonal.\n We use 'normal' in the original pix2pix and CycleGAN paper. But xavier and kaiming might\n work better for some applications. Feel free to try yourself.\n "
def init_func(m):
classname = m.__class__.__name__
if (hasattr(m, 'weight') and ((classname.find('Conv') != (- 1)) or (classname.find('Linear') != (- 1)))):
if (init_type == 'normal'):
init.normal_(m.weight.data, 0.0, init_gain)
elif (init_type == 'xavier'):
init.xavier_normal_(m.weight.data, gain=init_gain)
elif (init_type == 'kaiming'):
init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
elif (init_type == 'orthogonal'):
init.orthogonal_(m.weight.data, gain=init_gain)
else:
raise NotImplementedError(('initialization method [%s] is not implemented' % init_type))
if (hasattr(m, 'bias') and (m.bias is not None)):
init.constant_(m.bias.data, 0.0)
elif (classname.find('BatchNorm2d') != (- 1)):
init.normal_(m.weight.data, 1.0, init_gain)
init.constant_(m.bias.data, 0.0)
print(('initialize network with %s' % init_type))
net.apply(init_func)
|
def init_net(net, init_type='normal', init_gain=0.02, gpu_ids=[]):
'Initialize a network: 1. register CPU/GPU device (with multi-GPU support); 2. initialize the network weights\n Parameters:\n net (network) -- the network to be initialized\n init_type (str) -- the name of an initialization method: normal | xavier | kaiming | orthogonal\n gain (float) -- scaling factor for normal, xavier and orthogonal.\n gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2\n Return an initialized network.\n '
if (len(gpu_ids) > 0):
assert torch.cuda.is_available()
net.to(gpu_ids[0])
net = torch.nn.DataParallel(net, gpu_ids)
init_weights(net, init_type, init_gain=init_gain)
return net
|
def get_scheduler(optimizer, opt):
"Return a learning rate scheduler\n Parameters:\n optimizer -- the optimizer of the network\n opt (option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions.\u3000\n opt.lr_policy is the name of learning rate policy: linear | step | plateau | cosine\n For 'linear', we keep the same learning rate for the first <opt.niter> epochs\n and linearly decay the rate to zero over the next <opt.niter_decay> epochs.\n For other schedulers (step, plateau, and cosine), we use the default PyTorch schedulers.\n See https://pytorch.org/docs/stable/optim.html for more details.\n "
if (opt.lr_policy == 'linear'):
def lambda_rule(epoch):
lr_l = (1.0 - (max(0, ((epoch + opt.epoch_count) - opt.niter)) / float((opt.niter_decay + 1))))
return lr_l
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule)
elif (opt.lr_policy == 'step'):
scheduler = lr_scheduler.StepLR(optimizer, step_size=opt.lr_decay_iters, gamma=0.1)
elif (opt.lr_policy == 'plateau'):
scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.2, threshold=0.01, patience=5)
elif (opt.lr_policy == 'cosine'):
scheduler = lr_scheduler.CosineAnnealingLR(optimizer, T_max=opt.niter, eta_min=0)
else:
return NotImplementedError('learning rate policy [%s] is not implemented', opt.lr_policy)
return scheduler
|
def get_norm_layer(norm_type='instance'):
'Return a normalization layer\n Parameters:\n norm_type (str) -- the name of the normalization layer: batch | instance | none\n For BatchNorm, we use learnable affine parameters and track running statistics (mean/stddev).\n For InstanceNorm, we do not use learnable affine parameters. We do not track running statistics.\n '
if (norm_type == 'batch'):
norm_layer = functools.partial(nn.BatchNorm2d, affine=True, track_running_stats=True)
elif (norm_type == 'instance'):
norm_layer = functools.partial(nn.InstanceNorm2d, affine=False, track_running_stats=False)
elif (norm_type == 'none'):
norm_layer = None
else:
raise NotImplementedError(('normalization layer [%s] is not found' % norm_type))
return norm_layer
|
def get_non_linearity(layer_type='relu'):
if (layer_type == 'relu'):
nl_layer = functools.partial(nn.ReLU, inplace=True)
elif (layer_type == 'lrelu'):
nl_layer = functools.partial(nn.LeakyReLU, negative_slope=0.2, inplace=True)
elif (layer_type == 'elu'):
nl_layer = functools.partial(nn.ELU, inplace=True)
else:
raise NotImplementedError(('nonlinearity activitation [%s] is not found' % layer_type))
return nl_layer
|
def define_G(input_nc, output_nc, nz, ngf, netG='unet_128', norm='batch', nl='relu', use_dropout=False, init_type='xavier', init_gain=0.02, gpu_ids=[], where_add='input', upsample='bilinear'):
net = None
norm_layer = get_norm_layer(norm_type=norm)
nl_layer = get_non_linearity(layer_type=nl)
if (nz == 0):
where_add = 'input'
if ((netG == 'unet_128') and (where_add == 'input')):
net = G_Unet_add_input(input_nc, output_nc, nz, 7, ngf, norm_layer=norm_layer, nl_layer=nl_layer, use_dropout=use_dropout, upsample=upsample)
elif ((netG == 'unet_256') and (where_add == 'input')):
net = G_Unet_add_input(input_nc, output_nc, nz, 8, ngf, norm_layer=norm_layer, nl_layer=nl_layer, use_dropout=use_dropout, upsample=upsample)
elif ((netG == 'unet_128') and (where_add == 'all')):
net = G_Unet_add_all(input_nc, output_nc, nz, 7, ngf, norm_layer=norm_layer, nl_layer=nl_layer, use_dropout=use_dropout, upsample=upsample)
elif ((netG == 'unet_256') and (where_add == 'all')):
net = G_Unet_add_all(input_nc, output_nc, nz, 8, ngf, norm_layer=norm_layer, nl_layer=nl_layer, use_dropout=use_dropout, upsample=upsample)
else:
raise NotImplementedError(('Generator model name [%s] is not recognized' % net))
return init_net(net, init_type, init_gain, gpu_ids)
|
def define_D(input_nc, ndf, netD, norm='batch', nl='lrelu', init_type='xavier', init_gain=0.02, num_Ds=1, gpu_ids=[]):
net = None
norm_layer = get_norm_layer(norm_type=norm)
nl = 'lrelu'
nl_layer = get_non_linearity(layer_type=nl)
if (netD == 'basic_128'):
net = D_NLayers(input_nc, ndf, n_layers=2, norm_layer=norm_layer, nl_layer=nl_layer)
elif (netD == 'basic_256'):
net = D_NLayers(input_nc, ndf, n_layers=3, norm_layer=norm_layer, nl_layer=nl_layer)
elif (netD == 'basic_128_multi'):
net = D_NLayersMulti(input_nc=input_nc, ndf=ndf, n_layers=2, norm_layer=norm_layer, num_D=num_Ds)
elif (netD == 'basic_256_multi'):
net = D_NLayersMulti(input_nc=input_nc, ndf=ndf, n_layers=3, norm_layer=norm_layer, num_D=num_Ds)
else:
raise NotImplementedError(('Discriminator model name [%s] is not recognized' % net))
return init_net(net, init_type, init_gain, gpu_ids)
|
def define_E(input_nc, output_nc, ndf, netE, norm='batch', nl='lrelu', init_type='xavier', init_gain=0.02, gpu_ids=[], vaeLike=False):
net = None
norm_layer = get_norm_layer(norm_type=norm)
nl = 'lrelu'
nl_layer = get_non_linearity(layer_type=nl)
if (netE == 'resnet_128'):
net = E_ResNet(input_nc, output_nc, ndf, n_blocks=4, norm_layer=norm_layer, nl_layer=nl_layer, vaeLike=vaeLike)
elif (netE == 'resnet_256'):
net = E_ResNet(input_nc, output_nc, ndf, n_blocks=5, norm_layer=norm_layer, nl_layer=nl_layer, vaeLike=vaeLike)
elif (netE == 'conv_128'):
net = E_NLayers(input_nc, output_nc, ndf, n_layers=4, norm_layer=norm_layer, nl_layer=nl_layer, vaeLike=vaeLike)
elif (netE == 'conv_256'):
net = E_NLayers(input_nc, output_nc, ndf, n_layers=5, norm_layer=norm_layer, nl_layer=nl_layer, vaeLike=vaeLike)
else:
raise NotImplementedError(('Encoder model name [%s] is not recognized' % net))
return init_net(net, init_type, init_gain, gpu_ids)
|
class D_NLayersMulti(nn.Module):
def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d, num_D=1):
super(D_NLayersMulti, self).__init__()
self.num_D = num_D
if (num_D == 1):
layers = self.get_layers(input_nc, ndf, n_layers, norm_layer)
self.model = nn.Sequential(*layers)
else:
layers = self.get_layers(input_nc, ndf, n_layers, norm_layer)
self.add_module('model_0', nn.Sequential(*layers))
self.down = nn.AvgPool2d(3, stride=2, padding=[1, 1], count_include_pad=False)
for i in range(1, num_D):
ndf_i = int(round((ndf / (2 ** i))))
layers = self.get_layers(input_nc, ndf_i, n_layers, norm_layer)
self.add_module(('model_%d' % i), nn.Sequential(*layers))
def get_layers(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d):
kw = 4
padw = 1
sequence = [nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw), nn.LeakyReLU(0.2, True)]
nf_mult = 1
nf_mult_prev = 1
for n in range(1, n_layers):
nf_mult_prev = nf_mult
nf_mult = min((2 ** n), 8)
sequence += [nn.Conv2d((ndf * nf_mult_prev), (ndf * nf_mult), kernel_size=kw, stride=2, padding=padw), norm_layer((ndf * nf_mult)), nn.LeakyReLU(0.2, True)]
nf_mult_prev = nf_mult
nf_mult = min((2 ** n_layers), 8)
sequence += [nn.Conv2d((ndf * nf_mult_prev), (ndf * nf_mult), kernel_size=kw, stride=1, padding=padw), norm_layer((ndf * nf_mult)), nn.LeakyReLU(0.2, True)]
sequence += [nn.Conv2d((ndf * nf_mult), 1, kernel_size=kw, stride=1, padding=padw)]
return sequence
def forward(self, input):
if (self.num_D == 1):
return self.model(input)
result = []
down = input
for i in range(self.num_D):
model = getattr(self, ('model_%d' % i))
result.append(model(down))
if (i != (self.num_D - 1)):
down = self.down(down)
return result
|
class D_NLayers(nn.Module):
'Defines a PatchGAN discriminator'
def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d):
'Construct a PatchGAN discriminator\n Parameters:\n input_nc (int) -- the number of channels in input images\n ndf (int) -- the number of filters in the last conv layer\n n_layers (int) -- the number of conv layers in the discriminator\n norm_layer -- normalization layer\n '
super(D_NLayers, self).__init__()
if (type(norm_layer) == functools.partial):
use_bias = (norm_layer.func != nn.BatchNorm2d)
else:
use_bias = (norm_layer != nn.BatchNorm2d)
kw = 4
padw = 1
sequence = [nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw), nn.LeakyReLU(0.2, True)]
nf_mult = 1
nf_mult_prev = 1
for n in range(1, n_layers):
nf_mult_prev = nf_mult
nf_mult = min((2 ** n), 8)
sequence += [nn.Conv2d((ndf * nf_mult_prev), (ndf * nf_mult), kernel_size=kw, stride=2, padding=padw, bias=use_bias), norm_layer((ndf * nf_mult)), nn.LeakyReLU(0.2, True)]
nf_mult_prev = nf_mult
nf_mult = min((2 ** n_layers), 8)
sequence += [nn.Conv2d((ndf * nf_mult_prev), (ndf * nf_mult), kernel_size=kw, stride=1, padding=padw, bias=use_bias), norm_layer((ndf * nf_mult)), nn.LeakyReLU(0.2, True)]
sequence += [nn.Conv2d((ndf * nf_mult), 1, kernel_size=kw, stride=1, padding=padw)]
self.model = nn.Sequential(*sequence)
def forward(self, input):
'Standard forward.'
return self.model(input)
|
class RecLoss(nn.Module):
def __init__(self, use_L2=True):
super(RecLoss, self).__init__()
self.use_L2 = use_L2
def __call__(self, input, target, batch_mean=True):
if self.use_L2:
diff = ((input - target) ** 2)
else:
diff = torch.abs((input - target))
if batch_mean:
return torch.mean(diff)
else:
return torch.mean(torch.mean(torch.mean(diff, dim=1), dim=2), dim=3)
|
class GANLoss(nn.Module):
'Define different GAN objectives.\n\n The GANLoss class abstracts away the need to create the target label tensor\n that has the same size as the input.\n '
def __init__(self, gan_mode, target_real_label=1.0, target_fake_label=0.0):
' Initialize the GANLoss class.\n\n Parameters:\n gan_mode (str) - - the type of GAN objective. It currently supports vanilla, lsgan, and wgangp.\n target_real_label (bool) - - label for a real image\n target_fake_label (bool) - - label of a fake image\n\n Note: Do not use sigmoid as the last layer of Discriminator.\n LSGAN needs no sigmoid. vanilla GANs will handle it with BCEWithLogitsLoss.\n '
super(GANLoss, self).__init__()
self.register_buffer('real_label', torch.tensor(target_real_label))
self.register_buffer('fake_label', torch.tensor(target_fake_label))
self.gan_mode = gan_mode
if (gan_mode == 'lsgan'):
self.loss = nn.MSELoss()
elif (gan_mode == 'vanilla'):
self.loss = nn.BCEWithLogitsLoss()
elif (gan_mode in ['wgangp']):
self.loss = None
else:
raise NotImplementedError(('gan mode %s not implemented' % gan_mode))
def get_target_tensor(self, prediction, target_is_real):
'Create label tensors with the same size as the input.\n\n Parameters:\n prediction (tensor) - - tpyically the prediction from a discriminator\n target_is_real (bool) - - if the ground truth label is for real images or fake images\n\n Returns:\n A label tensor filled with ground truth label, and with the size of the input\n '
if target_is_real:
target_tensor = self.real_label
else:
target_tensor = self.fake_label
return target_tensor.expand_as(prediction)
def __call__(self, predictions, target_is_real):
"Calculate loss given Discriminator's output and grount truth labels.\n\n Parameters:\n prediction (tensor list) - - tpyically the prediction output from a discriminator; supports multi Ds.\n target_is_real (bool) - - if the ground truth label is for real images or fake images\n\n Returns:\n the calculated loss.\n "
all_losses = []
for prediction in predictions:
if (self.gan_mode in ['lsgan', 'vanilla']):
target_tensor = self.get_target_tensor(prediction, target_is_real)
loss = self.loss(prediction, target_tensor)
elif (self.gan_mode == 'wgangp'):
if target_is_real:
loss = (- prediction.mean())
else:
loss = prediction.mean()
all_losses.append(loss)
total_loss = sum(all_losses)
return (total_loss, all_losses)
|
def cal_gradient_penalty(netD, real_data, fake_data, device, type='mixed', constant=1.0, lambda_gp=10.0):
"Calculate the gradient penalty loss, used in WGAN-GP paper https://arxiv.org/abs/1704.00028\n Arguments:\n netD (network) -- discriminator network\n real_data (tensor array) -- real images\n fake_data (tensor array) -- generated images from the generator\n device (str) -- GPU / CPU: from torch.device('cuda:{}'.format(self.gpu_ids[0])) if self.gpu_ids else torch.device('cpu')\n type (str) -- if we mix real and fake data or not [real | fake | mixed].\n constant (float) -- the constant used in formula ( | |gradient||_2 - constant)^2\n lambda_gp (float) -- weight for this loss\n Returns the gradient penalty loss\n "
if (lambda_gp > 0.0):
if (type == 'real'):
interpolatesv = real_data
elif (type == 'fake'):
interpolatesv = fake_data
elif (type == 'mixed'):
alpha = torch.rand(real_data.shape[0], 1)
alpha = alpha.expand(real_data.shape[0], (real_data.nelement() // real_data.shape[0])).contiguous().view(*real_data.shape)
alpha = alpha.to(device)
interpolatesv = ((alpha * real_data) + ((1 - alpha) * fake_data))
else:
raise NotImplementedError('{} not implemented'.format(type))
interpolatesv.requires_grad_(True)
disc_interpolates = netD(interpolatesv)
gradients = torch.autograd.grad(outputs=disc_interpolates, inputs=interpolatesv, grad_outputs=torch.ones(disc_interpolates.size()).to(device), create_graph=True, retain_graph=True, only_inputs=True)
gradients = gradients[0].view(real_data.size(0), (- 1))
gradient_penalty = ((((gradients + 1e-16).norm(2, dim=1) - constant) ** 2).mean() * lambda_gp)
return (gradient_penalty, gradients)
else:
return (0.0, None)
|
class G_Unet_add_input(nn.Module):
def __init__(self, input_nc, output_nc, nz, num_downs, ngf=64, norm_layer=None, nl_layer=None, use_dropout=False, upsample='basic'):
super(G_Unet_add_input, self).__init__()
self.nz = nz
max_nchn = 8
unet_block = UnetBlock((ngf * max_nchn), (ngf * max_nchn), (ngf * max_nchn), innermost=True, norm_layer=norm_layer, nl_layer=nl_layer, upsample=upsample)
for i in range((num_downs - 5)):
unet_block = UnetBlock((ngf * max_nchn), (ngf * max_nchn), (ngf * max_nchn), unet_block, norm_layer=norm_layer, nl_layer=nl_layer, use_dropout=use_dropout, upsample=upsample)
unet_block = UnetBlock((ngf * 4), (ngf * 4), (ngf * max_nchn), unet_block, norm_layer=norm_layer, nl_layer=nl_layer, upsample=upsample)
unet_block = UnetBlock((ngf * 2), (ngf * 2), (ngf * 4), unet_block, norm_layer=norm_layer, nl_layer=nl_layer, upsample=upsample)
unet_block = UnetBlock(ngf, ngf, (ngf * 2), unet_block, norm_layer=norm_layer, nl_layer=nl_layer, upsample=upsample)
unet_block = UnetBlock((input_nc + nz), output_nc, ngf, unet_block, outermost=True, norm_layer=norm_layer, nl_layer=nl_layer, upsample=upsample)
self.model = unet_block
def forward(self, x, z=None):
if (self.nz > 0):
z_img = z.view(z.size(0), z.size(1), 1, 1).expand(z.size(0), z.size(1), x.size(2), x.size(3))
x_with_z = torch.cat([x, z_img], 1)
else:
x_with_z = x
return self.model(x_with_z)
|
def upsampleLayer(inplanes, outplanes, upsample='basic', padding_type='zero'):
if (upsample == 'basic'):
upconv = [nn.ConvTranspose2d(inplanes, outplanes, kernel_size=4, stride=2, padding=1)]
elif (upsample == 'bilinear'):
upconv = [nn.Upsample(scale_factor=2, mode='bilinear'), nn.ReflectionPad2d(1), nn.Conv2d(inplanes, outplanes, kernel_size=3, stride=1, padding=0)]
else:
raise NotImplementedError(('upsample layer [%s] not implemented' % upsample))
return upconv
|
class UnetBlock(nn.Module):
def __init__(self, input_nc, outer_nc, inner_nc, submodule=None, outermost=False, innermost=False, norm_layer=None, nl_layer=None, use_dropout=False, upsample='basic', padding_type='zero'):
super(UnetBlock, self).__init__()
self.outermost = outermost
p = 0
downconv = []
if (padding_type == 'reflect'):
downconv += [nn.ReflectionPad2d(1)]
elif (padding_type == 'replicate'):
downconv += [nn.ReplicationPad2d(1)]
elif (padding_type == 'zero'):
p = 1
else:
raise NotImplementedError(('padding [%s] is not implemented' % padding_type))
downconv += [nn.Conv2d(input_nc, inner_nc, kernel_size=4, stride=2, padding=p)]
downrelu = nn.LeakyReLU(0.2, True)
downnorm = (norm_layer(inner_nc) if (norm_layer is not None) else None)
uprelu = nl_layer()
upnorm = (norm_layer(outer_nc) if (norm_layer is not None) else None)
if outermost:
upconv = upsampleLayer((inner_nc * 2), outer_nc, upsample=upsample, padding_type=padding_type)
down = downconv
up = (([uprelu] + upconv) + [nn.Tanh()])
model = ((down + [submodule]) + up)
elif innermost:
upconv = upsampleLayer(inner_nc, outer_nc, upsample=upsample, padding_type=padding_type)
down = ([downrelu] + downconv)
up = ([uprelu] + upconv)
if (upnorm is not None):
up += [upnorm]
model = (down + up)
else:
upconv = upsampleLayer((inner_nc * 2), outer_nc, upsample=upsample, padding_type=padding_type)
down = ([downrelu] + downconv)
if (downnorm is not None):
down += [downnorm]
up = ([uprelu] + upconv)
if (upnorm is not None):
up += [upnorm]
if use_dropout:
model = (((down + [submodule]) + up) + [nn.Dropout(0.5)])
else:
model = ((down + [submodule]) + up)
self.model = nn.Sequential(*model)
def forward(self, x):
if self.outermost:
return self.model(x)
else:
return torch.cat([self.model(x), x], 1)
|
def conv3x3(in_planes, out_planes):
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=1, padding=1, bias=True)
|
def upsampleConv(inplanes, outplanes, kw, padw):
sequence = []
sequence += [nn.Upsample(scale_factor=2, mode='nearest')]
sequence += [nn.Conv2d(inplanes, outplanes, kernel_size=kw, stride=1, padding=padw, bias=True)]
return nn.Sequential(*sequence)
|
def meanpoolConv(inplanes, outplanes):
sequence = []
sequence += [nn.AvgPool2d(kernel_size=2, stride=2)]
sequence += [nn.Conv2d(inplanes, outplanes, kernel_size=1, stride=1, padding=0, bias=True)]
return nn.Sequential(*sequence)
|
def convMeanpool(inplanes, outplanes):
sequence = []
sequence += [conv3x3(inplanes, outplanes)]
sequence += [nn.AvgPool2d(kernel_size=2, stride=2)]
return nn.Sequential(*sequence)
|
class BasicBlockUp(nn.Module):
def __init__(self, inplanes, outplanes, norm_layer=None, nl_layer=None):
super(BasicBlockUp, self).__init__()
layers = []
if (norm_layer is not None):
layers += [norm_layer(inplanes)]
layers += [nl_layer()]
layers += [upsampleConv(inplanes, outplanes, kw=3, padw=1)]
if (norm_layer is not None):
layers += [norm_layer(outplanes)]
layers += [conv3x3(outplanes, outplanes)]
self.conv = nn.Sequential(*layers)
self.shortcut = upsampleConv(inplanes, outplanes, kw=1, padw=0)
def forward(self, x):
out = (self.conv(x) + self.shortcut(x))
return out
|
class BasicBlock(nn.Module):
def __init__(self, inplanes, outplanes, norm_layer=None, nl_layer=None):
super(BasicBlock, self).__init__()
layers = []
if (norm_layer is not None):
layers += [norm_layer(inplanes)]
layers += [nl_layer()]
layers += [conv3x3(inplanes, inplanes)]
if (norm_layer is not None):
layers += [norm_layer(inplanes)]
layers += [nl_layer()]
layers += [convMeanpool(inplanes, outplanes)]
self.conv = nn.Sequential(*layers)
self.shortcut = meanpoolConv(inplanes, outplanes)
def forward(self, x):
out = (self.conv(x) + self.shortcut(x))
return out
|
class E_ResNet(nn.Module):
def __init__(self, input_nc=3, output_nc=1, ndf=64, n_blocks=4, norm_layer=None, nl_layer=None, vaeLike=False):
super(E_ResNet, self).__init__()
self.vaeLike = vaeLike
max_ndf = 4
conv_layers = [nn.Conv2d(input_nc, ndf, kernel_size=4, stride=2, padding=1, bias=True)]
for n in range(1, n_blocks):
input_ndf = (ndf * min(max_ndf, n))
output_ndf = (ndf * min(max_ndf, (n + 1)))
conv_layers += [BasicBlock(input_ndf, output_ndf, norm_layer, nl_layer)]
conv_layers += [nl_layer(), nn.AvgPool2d(8)]
if vaeLike:
self.fc = nn.Sequential(*[nn.Linear(output_ndf, output_nc)])
self.fcVar = nn.Sequential(*[nn.Linear(output_ndf, output_nc)])
else:
self.fc = nn.Sequential(*[nn.Linear(output_ndf, output_nc)])
self.conv = nn.Sequential(*conv_layers)
def forward(self, x):
x_conv = self.conv(x)
conv_flat = x_conv.view(x.size(0), (- 1))
output = self.fc(conv_flat)
if self.vaeLike:
outputVar = self.fcVar(conv_flat)
return (output, outputVar)
else:
return output
return output
|
class G_Unet_add_all(nn.Module):
def __init__(self, input_nc, output_nc, nz, num_downs, ngf=64, norm_layer=None, nl_layer=None, use_dropout=False, upsample='basic'):
super(G_Unet_add_all, self).__init__()
self.nz = nz
unet_block = UnetBlock_with_z((ngf * 8), (ngf * 8), (ngf * 8), nz, None, innermost=True, norm_layer=norm_layer, nl_layer=nl_layer, upsample=upsample)
unet_block = UnetBlock_with_z((ngf * 8), (ngf * 8), (ngf * 8), nz, unet_block, norm_layer=norm_layer, nl_layer=nl_layer, use_dropout=use_dropout, upsample=upsample)
for i in range((num_downs - 6)):
unet_block = UnetBlock_with_z((ngf * 8), (ngf * 8), (ngf * 8), nz, unet_block, norm_layer=norm_layer, nl_layer=nl_layer, use_dropout=use_dropout, upsample=upsample)
unet_block = UnetBlock_with_z((ngf * 4), (ngf * 4), (ngf * 8), nz, unet_block, norm_layer=norm_layer, nl_layer=nl_layer, upsample=upsample)
unet_block = UnetBlock_with_z((ngf * 2), (ngf * 2), (ngf * 4), nz, unet_block, norm_layer=norm_layer, nl_layer=nl_layer, upsample=upsample)
unet_block = UnetBlock_with_z(ngf, ngf, (ngf * 2), nz, unet_block, norm_layer=norm_layer, nl_layer=nl_layer, upsample=upsample)
unet_block = UnetBlock_with_z(input_nc, output_nc, ngf, nz, unet_block, outermost=True, norm_layer=norm_layer, nl_layer=nl_layer, upsample=upsample)
self.model = unet_block
def forward(self, x, z):
return self.model(x, z)
|
class UnetBlock_with_z(nn.Module):
def __init__(self, input_nc, outer_nc, inner_nc, nz=0, submodule=None, outermost=False, innermost=False, norm_layer=None, nl_layer=None, use_dropout=False, upsample='basic', padding_type='zero'):
super(UnetBlock_with_z, self).__init__()
p = 0
downconv = []
if (padding_type == 'reflect'):
downconv += [nn.ReflectionPad2d(1)]
elif (padding_type == 'replicate'):
downconv += [nn.ReplicationPad2d(1)]
elif (padding_type == 'zero'):
p = 1
else:
raise NotImplementedError(('padding [%s] is not implemented' % padding_type))
self.outermost = outermost
self.innermost = innermost
self.nz = nz
input_nc = (input_nc + nz)
downconv += [nn.Conv2d(input_nc, inner_nc, kernel_size=4, stride=2, padding=p)]
downrelu = nn.LeakyReLU(0.2, True)
uprelu = nl_layer()
if outermost:
upconv = upsampleLayer((inner_nc * 2), outer_nc, upsample=upsample, padding_type=padding_type)
down = downconv
up = (([uprelu] + upconv) + [nn.Tanh()])
elif innermost:
upconv = upsampleLayer(inner_nc, outer_nc, upsample=upsample, padding_type=padding_type)
down = ([downrelu] + downconv)
up = ([uprelu] + upconv)
if (norm_layer is not None):
up += [norm_layer(outer_nc)]
else:
upconv = upsampleLayer((inner_nc * 2), outer_nc, upsample=upsample, padding_type=padding_type)
down = ([downrelu] + downconv)
if (norm_layer is not None):
down += [norm_layer(inner_nc)]
up = ([uprelu] + upconv)
if (norm_layer is not None):
up += [norm_layer(outer_nc)]
if use_dropout:
up += [nn.Dropout(0.5)]
self.down = nn.Sequential(*down)
self.submodule = submodule
self.up = nn.Sequential(*up)
def forward(self, x, z):
if (self.nz > 0):
z_img = z.view(z.size(0), z.size(1), 1, 1).expand(z.size(0), z.size(1), x.size(2), x.size(3))
x_and_z = torch.cat([x, z_img], 1)
else:
x_and_z = x
if self.outermost:
x1 = self.down(x_and_z)
x2 = self.submodule(x1, z)
return self.up(x2)
elif self.innermost:
x1 = self.up(self.down(x_and_z))
return torch.cat([x1, x], 1)
else:
x1 = self.down(x_and_z)
x2 = self.submodule(x1, z)
return torch.cat([self.up(x2), x], 1)
|
class E_NLayers(nn.Module):
def __init__(self, input_nc, output_nc=1, ndf=64, n_layers=3, norm_layer=None, nl_layer=None, vaeLike=False):
super(E_NLayers, self).__init__()
self.vaeLike = vaeLike
(kw, padw) = (4, 1)
sequence = [nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw), nl_layer()]
nf_mult = 1
nf_mult_prev = 1
for n in range(1, n_layers):
nf_mult_prev = nf_mult
nf_mult = min((2 ** n), 4)
sequence += [nn.Conv2d((ndf * nf_mult_prev), (ndf * nf_mult), kernel_size=kw, stride=2, padding=padw)]
if (norm_layer is not None):
sequence += [norm_layer((ndf * nf_mult))]
sequence += [nl_layer()]
sequence += [nn.AvgPool2d(8)]
self.conv = nn.Sequential(*sequence)
self.fc = nn.Sequential(*[nn.Linear((ndf * nf_mult), output_nc)])
if vaeLike:
self.fcVar = nn.Sequential(*[nn.Linear((ndf * nf_mult), output_nc)])
def forward(self, x):
x_conv = self.conv(x)
conv_flat = x_conv.view(x.size(0), (- 1))
output = self.fc(conv_flat)
if self.vaeLike:
outputVar = self.fcVar(conv_flat)
return (output, outputVar)
return output
|
class BaseOptions():
def __init__(self):
self.initialized = False
def initialize(self, parser):
'This class defines options used during both training and test time.\n\n It also implements several helper functions such as parsing, printing, and saving the options.\n It also gathers additional options defined in <modify_commandline_options> functions in both dataset class and model class.\n '
parser.add_argument('--dataroot', required=True, help='path to images (should have subfolders trainA, trainB, valA, valB, etc)')
parser.add_argument('--batch_size', type=int, default=2, help='input batch size')
parser.add_argument('--load_size', type=int, default=286, help='scale images to this size')
parser.add_argument('--crop_size', type=int, default=256, help='then crop to this size')
parser.add_argument('--input_nc', type=int, default=3, help='# of input image channels')
parser.add_argument('--output_nc', type=int, default=3, help='# of output image channels')
parser.add_argument('--nz', type=int, default=8, help='#latent vector')
parser.add_argument('--gpu_ids', type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2, -1 for CPU mode')
parser.add_argument('--name', type=str, default='', help='name of the experiment. It decides where to store samples and models')
parser.add_argument('--preprocess', type=str, default='resize_and_crop', help='not implemented')
parser.add_argument('--dataset_mode', type=str, default='aligned', help='aligned,single')
parser.add_argument('--model', type=str, default='divco', help='chooses which model to use. bicycle,, ...')
parser.add_argument('--direction', type=str, default='BtoA', help='AtoB or BtoA')
parser.add_argument('--epoch', type=str, default='latest', help='which epoch to load? set to latest to use latest cached model')
parser.add_argument('--num_threads', default=4, type=int, help='# sthreads for loading data')
parser.add_argument('--checkpoints_dir', type=str, default='./results', help='models are saved here')
parser.add_argument('--serial_batches', action='store_true', help='if true, takes images in order to make batches, otherwise takes them randomly')
parser.add_argument('--use_dropout', action='store_true', help='use dropout for the generator')
parser.add_argument('--max_dataset_size', type=int, default=float('inf'), help='Maximum number of samples allowed per dataset. If the dataset directory contains more than max_dataset_size, only a subset is loaded.')
parser.add_argument('--no_flip', action='store_true', help='if specified, do not flip the images for data argumentation')
parser.add_argument('--num_Ds', type=int, default=2, help='number of Discrminators')
parser.add_argument('--netD', type=str, default='basic_256_multi', help='selects model to use for netD')
parser.add_argument('--netD2', type=str, default='basic_256_multi', help='selects model to use for netD2')
parser.add_argument('--netG', type=str, default='unet_256', help='selects model to use for netG')
parser.add_argument('--netE', type=str, default='resnet_256', help='selects model to use for netE')
parser.add_argument('--nef', type=int, default=64, help='# of encoder filters in the first conv layer')
parser.add_argument('--ngf', type=int, default=64, help='# of gen filters in the last conv layer')
parser.add_argument('--ndf', type=int, default=64, help='# of discrim filters in the first conv layer')
parser.add_argument('--norm', type=str, default='instance', help='instance normalization or batch normalization')
parser.add_argument('--upsample', type=str, default='basic', help='basic | bilinear')
parser.add_argument('--nl', type=str, default='relu', help='non-linearity activation: relu | lrelu | elu')
parser.add_argument('--where_add', type=str, default='all', help='input|all|middle; where to add z in the network G')
parser.add_argument('--conditional_D', action='store_true', help='if use conditional GAN for D')
parser.add_argument('--init_type', type=str, default='xavier', help='network initialization [normal | xavier | kaiming | orthogonal]')
parser.add_argument('--init_gain', type=float, default=0.02, help='scaling factor for normal, xavier and orthogonal.')
parser.add_argument('--center_crop', action='store_true', help='if apply for center cropping for the test')
parser.add_argument('--verbose', action='store_true', help='if specified, print more debugging information')
parser.add_argument('--suffix', default='', type=str, help='customized suffix: opt.name = opt.name + suffix: e.g., {model}_{netG}_size{load_size}')
parser.add_argument('--display_winsize', type=int, default=256, help='display window size')
self.initialized = True
return parser
def gather_options(self):
'Initialize our parser with basic options(only once).\n Add additional model-specific and dataset-specific options.\n These options are difined in the <modify_commandline_options> function\n in model and dataset classes.\n '
if (not self.initialized):
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser = self.initialize(parser)
(opt, _) = parser.parse_known_args()
model_name = opt.model
model_option_setter = models.get_option_setter(model_name)
parser = model_option_setter(parser, self.isTrain)
(opt, _) = parser.parse_known_args()
dataset_name = opt.dataset_mode
dataset_option_setter = data.get_option_setter(dataset_name)
parser = dataset_option_setter(parser, self.isTrain)
self.parser = parser
return parser.parse_args()
def print_options(self, opt):
'Print and save options\n\n It will print both current options and default values(if different).\n It will save options into a text file / [checkpoints_dir] / opt.txt\n '
message = ''
message += '----------------- Options ---------------\n'
for (k, v) in sorted(vars(opt).items()):
comment = ''
default = self.parser.get_default(k)
if (v != default):
comment = ('\t[default: %s]' % str(default))
message += '{:>25}: {:<30}{}\n'.format(str(k), str(v), comment)
message += '----------------- End -------------------'
print(message)
expr_dir = os.path.join(opt.checkpoints_dir, opt.name)
util.mkdirs(expr_dir)
file_name = os.path.join(expr_dir, 'opt.txt')
with open(file_name, 'wt') as opt_file:
opt_file.write(message)
opt_file.write('\n')
def parse(self):
'Parse our options, create checkpoints directory suffix, and set up gpu device.'
opt = self.gather_options()
opt.isTrain = self.isTrain
if opt.suffix:
suffix = (('_' + opt.suffix.format(**vars(opt))) if (opt.suffix != '') else '')
opt.name = (opt.name + suffix)
self.print_options(opt)
str_ids = opt.gpu_ids.split(',')
opt.gpu_ids = []
for str_id in str_ids:
id = int(str_id)
if (id >= 0):
opt.gpu_ids.append(id)
if (len(opt.gpu_ids) > 0):
torch.cuda.set_device(opt.gpu_ids[0])
self.opt = opt
return self.opt
|
class TestOptions(BaseOptions):
def initialize(self, parser):
BaseOptions.initialize(self, parser)
parser.add_argument('--results_dir', type=str, default='../results/', help='saves results here.')
parser.add_argument('--phase', type=str, default='val', help='train, val, test, etc')
parser.add_argument('--num_test', type=int, default=50, help='how many test images to run')
parser.add_argument('--n_samples', type=int, default=5, help='#samples')
parser.add_argument('--no_encode', action='store_true', help='do not produce encoded image')
parser.add_argument('--sync', action='store_true', help='use the same latent code for different input images')
parser.add_argument('--aspect_ratio', type=float, default=1.0, help='aspect ratio for the results')
parser.add_argument('--eval', action='store_true', help='use eval mode during test time.')
self.isTrain = False
return parser
|
class TrainOptions(BaseOptions):
def initialize(self, parser):
BaseOptions.initialize(self, parser)
parser.add_argument('--display_freq', type=int, default=400, help='frequency of showing training results on screen')
parser.add_argument('--display_ncols', type=int, default=4, help='if positive, display all images in a single visdom web panel with certain number of images per row.')
parser.add_argument('--display_id', type=int, default=1, help='window id of the web display')
parser.add_argument('--display_port', type=int, default=8097, help='visdom display port')
parser.add_argument('--display_env', type=str, default='main', help='visdom display environment name (default is "main")')
parser.add_argument('--display_server', type=str, default='http://localhost', help='visdom server of the web display')
parser.add_argument('--update_html_freq', type=int, default=4000, help='frequency of saving training results to html')
parser.add_argument('--print_freq', type=int, default=100, help='frequency of showing training results on console')
parser.add_argument('--save_latest_freq', type=int, default=10000, help='frequency of saving the latest results')
parser.add_argument('--save_epoch_freq', type=int, default=5, help='frequency of saving checkpoints at the end of epochs')
parser.add_argument('--phase', type=str, default='train', help='train, val, test, etc')
parser.add_argument('--no_html', action='store_true', help='do not save intermediate training results to [opt.checkpoints_dir]/[opt.name]/web/')
parser.add_argument('--gan_mode', type=str, default='lsgan', help='the type of GAN objective. [vanilla | lsgan | wgangp]. vanilla GAN loss is the cross-entropy objective used in the original GAN paper.')
parser.add_argument('--niter', type=int, default=100, help='# of iter at starting learning rate')
parser.add_argument('--niter_decay', type=int, default=100, help='# of iter to linearly decay learning rate to zero')
parser.add_argument('--continue_train', action='store_true', help='continue training: load the latest model')
parser.add_argument('--epoch_count', type=int, default=1, help='the starting epoch count, we save the model by <epoch_count>, <epoch_count>+<save_latest_freq>, ...')
parser.add_argument('--lr', type=float, default=0.0002, help='initial learning rate for adam')
parser.add_argument('--lr_policy', type=str, default='linear', help='learning rate policy: linear | step | plateau | cosine')
parser.add_argument('--beta1', type=float, default=0.5, help='momentum term of adam')
parser.add_argument('--lr_decay_iters', type=int, default=100, help='multiply by a gamma every lr_decay_iters iterations')
parser.add_argument('--lambda_L1', type=float, default=10.0, help='weight for |B-G(A, E(B))|')
parser.add_argument('--lambda_GAN', type=float, default=1.0, help='weight on D loss. D(G(A, E(B)))')
parser.add_argument('--lambda_GAN2', type=float, default=1.0, help='weight on D2 loss, D(G(A, random_z))')
parser.add_argument('--lambda_z', type=float, default=0.0, help='weight for ||E(G(random_z)) - random_z||')
parser.add_argument('--lambda_kl', type=float, default=0.01, help='weight for KL loss')
parser.add_argument('--use_same_D', action='store_true', help='if two Ds share the weights or not')
parser.add_argument('--lambda_contra', type=float, default=0.3, help='weight for contrastive loss')
parser.add_argument('--num_negative', type=int, default=10, help='number of latent negative samples')
parser.add_argument('--radius', type=float, default=0.01, help='positive sample - distance threshold')
parser.add_argument('--tau', type=float, default=1.0, help='temperature')
parser.add_argument('--featnorm', action='store_true', help='whether featnorm')
self.isTrain = True
return parser
|
class HTML():
"This HTML class allows us to save images and write texts into a single HTML file.\n\n It consists of functions such as <add_header> (add a text header to the HTML file),\n <add_images> (add a row of images to the HTML file), and <save> (save the HTML to the disk).\n It is based on Python library 'dominate', a Python library for creating and manipulating HTML documents using a DOM API.\n "
def __init__(self, web_dir, title, refresh=0):
'Initialize the HTML classes\n\n Parameters:\n web_dir (str) -- a directory that stores the webpage. HTML file will be created at <web_dir>/index.html; images will be saved at <web_dir/images/\n title (str) -- the webpage name\n reflect (int) -- how often the website refresh itself; if 0; no refreshing\n '
self.title = title
self.web_dir = web_dir
self.img_dir = os.path.join(self.web_dir, 'images')
if (not os.path.exists(self.web_dir)):
os.makedirs(self.web_dir)
if (not os.path.exists(self.img_dir)):
os.makedirs(self.img_dir)
self.doc = dominate.document(title=title)
if (refresh > 0):
with self.doc.head:
meta(http_equiv='refresh', content=str(refresh))
def get_image_dir(self):
'Return the directory that stores images'
return self.img_dir
def add_header(self, text):
'Insert a header to the HTML file\n\n Parameters:\n text (str) -- the header text\n '
with self.doc:
h3(text)
def add_images(self, ims, txts, links, width=400):
'add images to the HTML file\n\n Parameters:\n ims (str list) -- a list of image paths\n txts (str list) -- a list of image names shown on the website\n links (str list) -- a list of hyperref links; when you click an image, it will redirect you to a new page\n '
self.t = table(border=1, style='table-layout: fixed;')
self.doc.add(self.t)
with self.t:
with tr():
for (im, txt, link) in zip(ims, txts, links):
with td(style='word-wrap: break-word;', halign='center', valign='top'):
with p():
with a(href=os.path.join('images', link)):
img(style=('width:%dpx' % width), src=os.path.join('images', im))
br()
p(txt)
def save(self):
'save the current content to the HMTL file'
html_file = ('%s/index.html' % self.web_dir)
f = open(html_file, 'wt')
f.write(self.doc.render())
f.close()
|
def save_images(webpage, images, names, image_path, aspect_ratio=1.0, width=256):
"Save images to the disk.\n Parameters:\n webpage (the HTML class) -- the HTML webpage class that stores these imaegs (see html.py for more details)\n images (numpy array list) -- a list of numpy array that stores images\n names (str list) -- a str list stores the names of the images above\n image_path (str) -- the string is used to create image paths\n aspect_ratio (float) -- the aspect ratio of saved images\n width (int) -- the images will be resized to width x width\n This function will save images stored in 'visuals' to the HTML file specified by 'webpage'.\n "
image_dir = webpage.get_image_dir()
name = ntpath.basename(image_path)
webpage.add_header(name)
(ims, txts, links) = ([], [], [])
for (label, im_data) in zip(names, images):
im = util.tensor2im(im_data)
image_name = ('%s_%s.png' % (name, label))
save_path = os.path.join(image_dir, image_name)
util.save_image(im, save_path, aspect_ratio=aspect_ratio)
ims.append(image_name)
txts.append(label)
links.append(image_name)
webpage.add_images(ims, txts, links, width=width)
|
class Visualizer():
"This class includes several functions that can display/save images and print/save logging information.\n It uses a Python library 'visdom' for display, and a Python library 'dominate' (wrapped in 'HTML') for creating HTML files with images.\n "
def __init__(self, opt):
'Initialize the Visualizer class\n Parameters:\n opt -- stores all the experiment flags; needs to be a subclass of BaseOptions\n Step 1: Cache the training/test options\n Step 2: connect to a visdom server\n Step 3: create an HTML object for saveing HTML filters\n Step 4: create a logging file to store training losses\n '
self.opt = opt
self.display_id = opt.display_id
self.use_html = (opt.isTrain and (not opt.no_html))
self.win_size = opt.display_winsize
self.name = opt.name
self.port = opt.display_port
self.saved = False
if (self.display_id > 0):
import visdom
self.ncols = opt.display_ncols
self.vis = visdom.Visdom(server=opt.display_server, port=opt.display_port, env=opt.display_env)
if (not self.vis.check_connection()):
self.create_visdom_connections()
if self.use_html:
self.web_dir = os.path.join(opt.checkpoints_dir, opt.name, 'web')
self.img_dir = os.path.join(self.web_dir, 'images')
print(('create web directory %s...' % self.web_dir))
util.mkdirs([self.web_dir, self.img_dir])
self.log_name = os.path.join(opt.checkpoints_dir, opt.name, 'loss_log.txt')
with open(self.log_name, 'a') as log_file:
now = time.strftime('%c')
log_file.write(('================ Training Loss (%s) ================\n' % now))
def reset(self):
'Reset the self.saved status'
self.saved = False
def create_visdom_connections(self):
'If the program could not connect to Visdom server, this function will start a new server at port < self.port > '
cmd = (sys.executable + (' -m visdom.server -p %d &>/dev/null &' % self.port))
print('\n\nCould not connect to Visdom server. \n Trying to start a server....')
print(('Command: %s' % cmd))
Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE)
def display_current_results(self, visuals, epoch, save_result):
'Display current results on visdom; save current results to an HTML file.\n Parameters:\n visuals (OrderedDict) - - dictionary of images to display or save\n epoch (int) - - the current epoch\n save_result (bool) - - if save the current results to an HTML file\n '
if (self.display_id > 0):
ncols = self.ncols
if (ncols > 0):
ncols = min(ncols, len(visuals))
(h, w) = next(iter(visuals.values())).shape[:2]
table_css = ('<style>\n table {border-collapse: separate; border-spacing: 4px; white-space: nowrap; text-align: center}\n table td {width: % dpx; height: % dpx; padding: 4px; outline: 4px solid black}\n </style>' % (w, h))
title = self.name
label_html = ''
label_html_row = ''
images = []
idx = 0
for (label, image) in visuals.items():
image_numpy = util.tensor2im(image)
label_html_row += ('<td>%s</td>' % label)
images.append(image_numpy.transpose([2, 0, 1]))
idx += 1
if ((idx % ncols) == 0):
label_html += ('<tr>%s</tr>' % label_html_row)
label_html_row = ''
white_image = (np.ones_like(image_numpy.transpose([2, 0, 1])) * 255)
while ((idx % ncols) != 0):
images.append(white_image)
label_html_row += '<td></td>'
idx += 1
if (label_html_row != ''):
label_html += ('<tr>%s</tr>' % label_html_row)
try:
self.vis.images(images, nrow=ncols, win=(self.display_id + 1), padding=2, opts=dict(title=(title + ' images')))
label_html = ('<table>%s</table>' % label_html)
self.vis.text((table_css + label_html), win=(self.display_id + 2), opts=dict(title=(title + ' labels')))
except VisdomExceptionBase:
self.create_visdom_connections()
else:
idx = 1
try:
for (label, image) in visuals.items():
image_numpy = util.tensor2im(image)
self.vis.image(image_numpy.transpose([2, 0, 1]), opts=dict(title=label), win=(self.display_id + idx))
idx += 1
except VisdomExceptionBase:
self.create_visdom_connections()
if (self.use_html and (save_result or (not self.saved))):
self.saved = True
for (label, image) in visuals.items():
image_numpy = util.tensor2im(image)
img_path = os.path.join(self.img_dir, ('epoch%.3d_%s.png' % (epoch, label)))
util.save_image(image_numpy, img_path)
webpage = html.HTML(self.web_dir, ('Experiment name = %s' % self.name), refresh=1)
for n in range(epoch, 0, (- 1)):
webpage.add_header(('epoch [%d]' % n))
(ims, txts, links) = ([], [], [])
for (label, image_numpy) in visuals.items():
image_numpy = util.tensor2im(image)
img_path = ('epoch%.3d_%s.png' % (n, label))
ims.append(img_path)
txts.append(label)
links.append(img_path)
webpage.add_images(ims, txts, links, width=self.win_size)
webpage.save()
def plot_current_losses(self, epoch, counter_ratio, losses):
'display the current losses on visdom display: dictionary of error labels and values\n Parameters:\n epoch (int) -- current epoch\n counter_ratio (float) -- progress (percentage) in the current epoch, between 0 to 1\n losses (OrderedDict) -- training losses stored in the format of (name, float) pairs\n '
if (not hasattr(self, 'plot_data')):
self.plot_data = {'X': [], 'Y': [], 'legend': list(losses.keys())}
self.plot_data['X'].append((epoch + counter_ratio))
self.plot_data['Y'].append([losses[k] for k in self.plot_data['legend']])
try:
self.vis.line(X=np.stack(([np.array(self.plot_data['X'])] * len(self.plot_data['legend'])), 1), Y=np.array(self.plot_data['Y']), opts={'title': (self.name + ' loss over time'), 'legend': self.plot_data['legend'], 'xlabel': 'epoch', 'ylabel': 'loss'}, win=self.display_id)
except VisdomExceptionBase:
self.create_visdom_connections()
def print_current_losses(self, epoch, iters, losses, t_comp, t_data):
'print current losses on console; also save the losses to the disk\n Parameters:\n epoch (int) -- current epoch\n iters (int) -- current training iteration during this epoch (reset to 0 at the end of every epoch)\n losses (OrderedDict) -- training losses stored in the format of (name, float) pairs\n t_comp (float) -- computational time per data point (normalized by batch_size)\n t_data (float) -- data loading time per data point (normalized by batch_size)\n '
message = ('(epoch: %d, iters: %d, time: %.3f, data: %.3f) ' % (epoch, iters, t_comp, t_data))
for (k, v) in losses.items():
message += ('%s: %.3f ' % (k, v))
print(message)
with open(self.log_name, 'a') as log_file:
log_file.write(('%s\n' % message))
|
class generator(nn.Module):
def __init__(self, opts, d=128):
super(generator, self).__init__()
self.deconv1 = nn.ConvTranspose2d((opts.nz + opts.class_num), (d * 4), 4, 1, 0)
self.deconv1_bn = nn.BatchNorm2d((d * 4))
self.relu1 = nn.ReLU()
self.deconv2 = nn.ConvTranspose2d((d * 4), (d * 2), 4, 2, 1)
self.deconv2_bn = nn.BatchNorm2d((d * 2))
self.relu2 = nn.ReLU()
self.deconv3 = nn.ConvTranspose2d((d * 2), d, 4, 2, 1)
self.deconv3_bn = nn.BatchNorm2d(d)
self.relu3 = nn.ReLU()
self.deconv4 = nn.ConvTranspose2d(d, 3, 4, 2, 1)
self.tanh = nn.Tanh()
def weight_init(self):
for m in self._modules:
gaussian_weights_init(self._modules[m])
def forward(self, input, label):
x = torch.cat([input.unsqueeze(2).unsqueeze(3), label], 1)
x = self.relu1(self.deconv1_bn(self.deconv1(x)))
x = self.relu2(self.deconv2_bn(self.deconv2(x)))
x = self.relu3(self.deconv3_bn(self.deconv3(x)))
x = self.tanh(self.deconv4(x))
return x
|
class discriminator(nn.Module):
def __init__(self, opts, d=128):
super(discriminator, self).__init__()
self.conv1 = nn.Conv2d((3 + opts.class_num), d, 4, 2, 1)
self.lrelu1 = nn.LeakyReLU(0.2)
self.conv2 = nn.Conv2d(d, (d * 2), 4, 2, 1)
self.conv2_bn = nn.BatchNorm2d((d * 2))
self.lrelu2 = nn.LeakyReLU(0.2)
self.conv3 = nn.Conv2d((d * 2), (d * 4), 4, 2, 1)
self.conv3_bn = nn.BatchNorm2d((d * 4))
self.lrelu3 = nn.LeakyReLU(0.2)
self.conv4 = nn.Conv2d((d * 4), 1, 4, 1, 0)
self.sigmoid = nn.Sigmoid()
self.enc_feat = nn.Conv2d((d * 4), d, 4, 1, 0)
def weight_init(self):
for m in self._modules:
gaussian_weights_init(self._modules[m])
def forward(self, input, label, enc_feat=False):
label = label.expand(label.shape[0], label.shape[1], input.shape[2], input.shape[3])
x = torch.cat([input, label], 1)
x = self.lrelu1(self.conv1(x))
x = self.lrelu2(self.conv2_bn(self.conv2(x)))
x = self.lrelu3(self.conv3_bn(self.conv3(x)))
y = self.sigmoid(self.conv4(x))
if enc_feat:
feats = self.enc_feat(x)
return (y, feats)
return y
|
def gaussian_weights_init(m):
classname = m.__class__.__name__
if ((classname.find('Conv') != (- 1)) and (classname.find('Conv') == 0)):
m.weight.data.normal_(0.0, 0.02)
|
class BaseOptions():
def __init__(self):
self.parser = argparse.ArgumentParser()
self.parser.add_argument('--dataroot', type=str, required=True, help='path of data')
self.parser.add_argument('--img_size', type=int, default=32, help='resized image size for training')
self.parser.add_argument('--nz', type=int, default=100, help='dimensions of z')
self.parser.add_argument('--class_num', type=int, default=10, help='class number of the dataset')
|
class TrainOptions(BaseOptions):
def __init__(self):
super(TrainOptions, self).__init__()
self.parser.add_argument('--phase', type=str, default='train', help='phase for dataloading')
self.parser.add_argument('--batch_size', type=int, default=32, help='batch size')
self.parser.add_argument('--nThreads', type=int, default=0, help='# of threads for data loader')
self.parser.add_argument('--name', type=str, default='trial', help='folder name to save outputs')
self.parser.add_argument('--display_dir', type=str, default='./logs', help='path for saving display results')
self.parser.add_argument('--result_dir', type=str, default='./results', help='path for saving result images and models')
self.parser.add_argument('--display_freq', type=int, default=1, help='freq (iteration) of display')
self.parser.add_argument('--img_save_freq', type=int, default=1, help='freq (epoch) of saving images')
self.parser.add_argument('--model_save_freq', type=int, default=50, help='freq (epoch) of saving models')
self.parser.add_argument('--no_display_img', action='store_true', help='specified if no dispaly')
self.parser.add_argument('--n_ep', type=int, default=200, help='number of epochs')
self.parser.add_argument('--resume', type=str, default=None, help='specified the dir of saved models for resume the training')
self.parser.add_argument('--gpu', type=int, default=0, help='gpu')
self.parser.add_argument('--lambda_contra', type=float, default=1.0, help='weight for contrastive loss')
self.parser.add_argument('--num_negative', type=int, default=10, help='number of latent negative samples')
self.parser.add_argument('--radius', type=float, default=0.001, help='positive sample - distance threshold')
self.parser.add_argument('--tau', type=float, default=1, help='temprature')
self.parser.add_argument('--featnorm', action='store_true', help='whether featnorm')
def parse(self):
self.opt = self.parser.parse_args()
args = vars(self.opt)
print('\n--- load options ---')
for (name, value) in sorted(args.items()):
print(('%s: %s' % (str(name), str(value))))
return self.opt
|
class TestOptions(BaseOptions):
def __init__(self):
super(TestOptions, self).__init__()
self.parser.add_argument('--phase', type=str, default='test', help='phase for dataloading')
self.parser.add_argument('--num', type=int, default=5, help='number of outputs per image')
self.parser.add_argument('--name', type=str, default='CIFAR10', help='folder name to save outputs')
self.parser.add_argument('--result_dir', type=str, default='./results', help='path for saving result images and models')
self.parser.add_argument('--resume', type=str, required=True, help='specified the dir of saved models for resume the training')
self.parser.add_argument('--gpu', type=int, default=0, help='gpu')
def parse(self):
self.opt = self.parser.parse_args()
args = vars(self.opt)
print('\n--- load options ---')
for (name, value) in sorted(args.items()):
print(('%s: %s' % (str(name), str(value))))
self.opt.dis_scale = 3
self.opt.dis_norm = 'None'
self.opt.dis_spectral_norm = False
return self.opt
|
def tensor2img(img):
img = img[0].cpu().float().numpy()
if (img.shape[0] == 1):
img = np.tile(img, (3, 1, 1))
img = (((np.transpose(img, (1, 2, 0)) + 1) / 2.0) * 255.0)
return img.astype(np.uint8)
|
def save_img(img, name, path):
if (not os.path.exists(path)):
os.mkdir(path)
img = tensor2img(img)
img = Image.fromarray(img)
img.save(os.path.join(path, (name + '.png')))
|
def save_imgs(imgs, names, path):
if (not os.path.exists(path)):
os.mkdir(path)
for (img, name) in zip(imgs, names):
img = tensor2img(img)
img = Image.fromarray(img)
img.save(os.path.join(path, (name + '.png')))
|
class Saver():
def __init__(self, opts):
self.display_dir = os.path.join(opts.display_dir, opts.name)
self.model_dir = os.path.join(opts.result_dir, opts.name)
self.image_dir = os.path.join(self.model_dir, 'images')
self.display_freq = opts.display_freq
self.img_save_freq = opts.img_save_freq
self.model_save_freq = opts.model_save_freq
if (not os.path.exists(self.display_dir)):
os.makedirs(self.display_dir)
if (not os.path.exists(self.model_dir)):
os.makedirs(self.model_dir)
if (not os.path.exists(self.image_dir)):
os.makedirs(self.image_dir)
self.writer = SummaryWriter(log_dir=self.display_dir)
def write_display(self, total_it, model):
if (((total_it + 1) % self.display_freq) == 0):
members = [attr for attr in dir(model) if ((not callable(getattr(model, attr))) and (not attr.startswith('__')) and ('loss' in attr))]
for m in members:
self.writer.add_scalar(m, getattr(model, m), total_it)
image_dis = ((torchvision.utils.make_grid(model.image_display, nrow=(model.image_display.size(0) // 2)) / 2) + 0.5)
self.writer.add_image('Image', image_dis, total_it)
def write_img(self, ep, model):
if (((ep + 1) % self.img_save_freq) == 0):
assembled_images = model.assemble_outputs()
img_filename = ('%s/gen_%05d.jpg' % (self.image_dir, ep))
torchvision.utils.save_image(((assembled_images / 2) + 0.5), img_filename, nrow=8)
elif (ep == (- 1)):
assembled_images = model.assemble_outputs()
img_filename = ('%s/gen_last.jpg' % (self.image_dir, ep))
torchvision.utils.save_image(((assembled_images / 2) + 0.5), img_filename, nrow=8)
def write_model(self, ep, total_it, model):
if (((ep + 1) % self.model_save_freq) == 0):
print(('--- save the model @ ep %d ---' % ep))
model.save(('%s/%05d.pth' % (self.model_dir, ep)), ep, total_it)
elif (ep == (- 1)):
model.save(('%s/last.pth' % self.model_dir), ep, total_it)
|
def main():
parser = TestOptions()
opts = parser.parse()
print('\n--- load dataset ---')
dataset = torchvision.datasets.CIFAR10(opts.dataroot, train=False, download=True, transform=transforms.Compose([transforms.Resize(opts.img_size), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]))
loader = torch.utils.data.DataLoader(dataset, batch_size=1, num_workers=0)
print('\n--- load model ---')
model = DivCo_DCGAN(opts)
model.eval()
model.setgpu(opts.gpu)
model.resume(opts.resume, train=False)
result_dir = os.path.join(opts.result_dir, opts.name)
if (not os.path.exists(result_dir)):
os.makedirs(result_dir)
print('\n--- testing ---')
test_class_images = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
generate_class_images = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
for (idx1, (img1, label)) in enumerate(loader):
print('{}/{}'.format(idx1, len(loader)))
imgs = []
names = []
label_id = label[0].numpy()
test_class_images[label_id] += 1
for idx2 in range(opts.num):
generate_class_images[label_id] += 1
with torch.no_grad():
img = model.test_forward(label)
imgs.append(img)
names.append('img_{}'.format(generate_class_images[label_id]))
save_imgs(imgs, names, os.path.join(result_dir, '{}'.format(label_names[label_id])))
return
|
def main():
parser = TrainOptions()
opts = parser.parse()
print('\n--- load dataset ---')
os.makedirs(opts.dataroot, exist_ok=True)
dataset = torchvision.datasets.CIFAR10(opts.dataroot, train=True, download=True, transform=transforms.Compose([transforms.Resize(opts.img_size), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]))
train_loader = torch.utils.data.DataLoader(dataset, batch_size=opts.batch_size, shuffle=True, num_workers=opts.nThreads)
print('\n--- load model ---')
model = DivCo_DCGAN(opts)
model.setgpu(opts.gpu)
if (opts.resume is None):
model.initialize()
ep0 = (- 1)
total_it = 0
else:
(ep0, total_it) = model.resume(opts.resume)
ep0 += 1
print(('start the training at epoch %d' % ep0))
saver = Saver(opts)
print('\n--- train ---')
for ep in range(ep0, opts.n_ep):
for (it, (images, label)) in enumerate(train_loader):
if (images.size(0) != opts.batch_size):
continue
images = images.cuda(opts.gpu).detach()
model.update(images, label)
if (not opts.no_display_img):
saver.write_display(total_it, model)
print(('total_it: %d (ep %d, it %d), lr %08f' % (total_it, ep, it, model.gen_opt.param_groups[0]['lr'])))
total_it += 1
saver.write_img(ep, model)
saver.write_model(ep, total_it, model)
return
|
class dataset_single(data.Dataset):
def __init__(self, opts, setname, input_dim):
self.dataroot = opts.dataroot
images = os.listdir(os.path.join(self.dataroot, (opts.phase + setname)))
self.img = [os.path.join(self.dataroot, (opts.phase + setname), x) for x in images]
self.size = len(self.img)
self.input_dim = input_dim
transforms = [Resize((opts.resize_size, opts.resize_size), Image.BICUBIC)]
transforms.append(CenterCrop(opts.crop_size))
transforms.append(ToTensor())
transforms.append(Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]))
self.transforms = Compose(transforms)
print(('%s: %d images' % (setname, self.size)))
return
def __getitem__(self, index):
data = self.load_img(self.img[index], self.input_dim)
return (data, self.img[index].split('/')[(- 1)])
def load_img(self, img_name, input_dim):
img = Image.open(img_name).convert('RGB')
img = self.transforms(img)
if (input_dim == 1):
img = (((img[(0, ...)] * 0.299) + (img[(1, ...)] * 0.587)) + (img[(2, ...)] * 0.114))
img = img.unsqueeze(0)
return img
def __len__(self):
return self.size
|
class dataset_unpair(data.Dataset):
def __init__(self, opts):
self.dataroot = opts.dataroot
images_A = os.listdir(os.path.join(self.dataroot, (opts.phase + 'A')))
self.A = [os.path.join(self.dataroot, (opts.phase + 'A'), x) for x in images_A]
images_B = os.listdir(os.path.join(self.dataroot, (opts.phase + 'B')))
self.B = [os.path.join(self.dataroot, (opts.phase + 'B'), x) for x in images_B]
self.A_size = len(self.A)
self.B_size = len(self.B)
self.dataset_size = max(self.A_size, self.B_size)
self.input_dim_A = opts.input_dim_a
self.input_dim_B = opts.input_dim_b
transforms = [Resize((opts.resize_size, opts.resize_size), Image.BICUBIC)]
if (opts.phase == 'train'):
transforms.append(RandomCrop(opts.crop_size))
else:
transforms.append(CenterCrop(opts.crop_size))
if (not opts.no_flip):
transforms.append(RandomHorizontalFlip())
transforms.append(ToTensor())
transforms.append(Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]))
self.transforms = Compose(transforms)
print(('A: %d, B: %d images' % (self.A_size, self.B_size)))
return
def __getitem__(self, index):
if (self.dataset_size == self.A_size):
data_A = self.load_img(self.A[index], self.input_dim_A)
data_B = self.load_img(self.B[random.randint(0, (self.B_size - 1))], self.input_dim_B)
else:
data_A = self.load_img(self.A[random.randint(0, (self.A_size - 1))], self.input_dim_A)
data_B = self.load_img(self.B[index], self.input_dim_B)
return (data_A, data_B)
def load_img(self, img_name, input_dim):
img = Image.open(img_name).convert('RGB')
img = self.transforms(img)
if (input_dim == 1):
img = (((img[(0, ...)] * 0.299) + (img[(1, ...)] * 0.587)) + (img[(2, ...)] * 0.114))
img = img.unsqueeze(0)
return img
def __len__(self):
return self.dataset_size
|
class Dis_content(nn.Module):
def __init__(self):
super(Dis_content, self).__init__()
model = []
model += [LeakyReLUConv2d(256, 256, kernel_size=7, stride=2, padding=1, norm='Instance')]
model += [LeakyReLUConv2d(256, 256, kernel_size=7, stride=2, padding=1, norm='Instance')]
model += [LeakyReLUConv2d(256, 256, kernel_size=7, stride=2, padding=1, norm='Instance')]
model += [LeakyReLUConv2d(256, 256, kernel_size=4, stride=1, padding=0)]
model += [nn.Conv2d(256, 1, kernel_size=1, stride=1, padding=0)]
self.model = nn.Sequential(*model)
def forward(self, x):
out = self.model(x)
out = out.view((- 1))
outs = []
outs.append(out)
return outs
|
class MultiScaleDis(nn.Module):
def __init__(self, input_dim, n_scale=3, n_layer=4, norm='None', sn=False):
super(MultiScaleDis, self).__init__()
ch = 64
self.downsample = nn.AvgPool2d(3, stride=2, padding=1, count_include_pad=False)
self.Diss = nn.ModuleList()
for _ in range(n_scale):
self.Diss.append(self._make_net(ch, input_dim, n_layer, norm, sn))
def _make_net(self, ch, input_dim, n_layer, norm, sn):
model = []
model += [LeakyReLUConv2d(input_dim, ch, 4, 2, 1, norm, sn)]
tch = ch
for _ in range(1, n_layer):
model += [LeakyReLUConv2d(tch, (tch * 2), 4, 2, 1, norm, sn)]
tch *= 2
if sn:
model += [spectral_norm(nn.Conv2d(tch, 1, 1, 1, 0))]
else:
model += [nn.Conv2d(tch, 1, 1, 1, 0)]
return nn.Sequential(*model)
def forward(self, x):
outs = []
for Dis in self.Diss:
outs.append(Dis(x))
x = self.downsample(x)
return outs
|
class Dis(nn.Module):
def __init__(self, input_dim, norm='None', sn=False):
super(Dis, self).__init__()
ch = 64
n_layer = 6
self.model = self._make_net(ch, input_dim, n_layer, norm, sn)
def _make_net(self, ch, input_dim, n_layer, norm, sn):
model = []
model += [LeakyReLUConv2d(input_dim, ch, kernel_size=3, stride=2, padding=1, norm=norm, sn=sn)]
tch = ch
for i in range(1, (n_layer - 1)):
model += [LeakyReLUConv2d(tch, (tch * 2), kernel_size=3, stride=2, padding=1, norm=norm, sn=sn)]
tch *= 2
model += [LeakyReLUConv2d(tch, (tch * 2), kernel_size=3, stride=2, padding=1, norm='None', sn=sn)]
tch *= 2
if sn:
model += [spectral_norm(nn.Conv2d(tch, 1, kernel_size=1, stride=1, padding=0))]
else:
model += [nn.Conv2d(tch, 1, kernel_size=1, stride=1, padding=0)]
return nn.Sequential(*model)
def cuda(self, gpu):
self.model.cuda(gpu)
def forward(self, x_A):
out_A = self.model(x_A)
out_A = out_A.view((- 1))
outs_A = []
outs_A.append(out_A)
return outs_A
|
class E_content(nn.Module):
def __init__(self, input_dim_a, input_dim_b):
super(E_content, self).__init__()
encA_c = []
tch = 64
encA_c += [LeakyReLUConv2d(input_dim_a, tch, kernel_size=7, stride=1, padding=3)]
for i in range(1, 3):
encA_c += [ReLUINSConv2d(tch, (tch * 2), kernel_size=3, stride=2, padding=1)]
tch *= 2
for i in range(0, 3):
encA_c += [INSResBlock(tch, tch)]
encB_c = []
tch = 64
encB_c += [LeakyReLUConv2d(input_dim_b, tch, kernel_size=7, stride=1, padding=3)]
for i in range(1, 3):
encB_c += [ReLUINSConv2d(tch, (tch * 2), kernel_size=3, stride=2, padding=1)]
tch *= 2
for i in range(0, 3):
encB_c += [INSResBlock(tch, tch)]
enc_share = []
for i in range(0, 1):
enc_share += [INSResBlock(tch, tch)]
enc_share += [GaussianNoiseLayer()]
self.conv_share = nn.Sequential(*enc_share)
self.convA = nn.Sequential(*encA_c)
self.convB = nn.Sequential(*encB_c)
def forward(self, xa, xb):
outputA = self.convA(xa)
outputB = self.convB(xb)
outputA = self.conv_share(outputA)
outputB = self.conv_share(outputB)
return (outputA, outputB)
def forward_a(self, xa):
outputA = self.convA(xa)
outputA = self.conv_share(outputA)
return outputA
def forward_b(self, xb):
outputB = self.convB(xb)
outputB = self.conv_share(outputB)
return outputB
|
class E_attr(nn.Module):
def __init__(self, input_dim_a, input_dim_b, output_nc=8):
super(E_attr, self).__init__()
dim = 64
self.model_a = nn.Sequential(nn.ReflectionPad2d(3), nn.Conv2d(input_dim_a, dim, 7, 1), nn.ReLU(inplace=True), nn.ReflectionPad2d(1), nn.Conv2d(dim, (dim * 2), 4, 2), nn.ReLU(inplace=True), nn.ReflectionPad2d(1), nn.Conv2d((dim * 2), (dim * 4), 4, 2), nn.ReLU(inplace=True), nn.ReflectionPad2d(1), nn.Conv2d((dim * 4), (dim * 4), 4, 2), nn.ReLU(inplace=True), nn.ReflectionPad2d(1), nn.Conv2d((dim * 4), (dim * 4), 4, 2), nn.ReLU(inplace=True), nn.AdaptiveAvgPool2d(1), nn.Conv2d((dim * 4), output_nc, 1, 1, 0))
self.model_b = nn.Sequential(nn.ReflectionPad2d(3), nn.Conv2d(input_dim_b, dim, 7, 1), nn.ReLU(inplace=True), nn.ReflectionPad2d(1), nn.Conv2d(dim, (dim * 2), 4, 2), nn.ReLU(inplace=True), nn.ReflectionPad2d(1), nn.Conv2d((dim * 2), (dim * 4), 4, 2), nn.ReLU(inplace=True), nn.ReflectionPad2d(1), nn.Conv2d((dim * 4), (dim * 4), 4, 2), nn.ReLU(inplace=True), nn.ReflectionPad2d(1), nn.Conv2d((dim * 4), (dim * 4), 4, 2), nn.ReLU(inplace=True), nn.AdaptiveAvgPool2d(1), nn.Conv2d((dim * 4), output_nc, 1, 1, 0))
return
def forward(self, xa, xb):
xa = self.model_a(xa)
xb = self.model_b(xb)
output_A = xa.view(xa.size(0), (- 1))
output_B = xb.view(xb.size(0), (- 1))
return (output_A, output_B)
def forward_a(self, xa):
xa = self.model_a(xa)
output_A = xa.view(xa.size(0), (- 1))
return output_A
def forward_b(self, xb):
xb = self.model_b(xb)
output_B = xb.view(xb.size(0), (- 1))
return output_B
|
class E_attr_concat(nn.Module):
def __init__(self, input_dim_a, input_dim_b, output_nc=8, norm_layer=None, nl_layer=None):
super(E_attr_concat, self).__init__()
ndf = 64
n_blocks = 4
max_ndf = 4
conv_layers_A = [nn.ReflectionPad2d(1)]
conv_layers_A += [nn.Conv2d(input_dim_a, ndf, kernel_size=4, stride=2, padding=0, bias=True)]
for n in range(1, n_blocks):
input_ndf = (ndf * min(max_ndf, n))
output_ndf = (ndf * min(max_ndf, (n + 1)))
conv_layers_A += [BasicBlock(input_ndf, output_ndf, norm_layer, nl_layer)]
conv_layers_A += [nl_layer(), nn.AdaptiveAvgPool2d(1)]
self.fc_A = nn.Sequential(*[nn.Linear(output_ndf, output_nc)])
self.fcVar_A = nn.Sequential(*[nn.Linear(output_ndf, output_nc)])
self.conv_A = nn.Sequential(*conv_layers_A)
conv_layers_B = [nn.ReflectionPad2d(1)]
conv_layers_B += [nn.Conv2d(input_dim_b, ndf, kernel_size=4, stride=2, padding=0, bias=True)]
for n in range(1, n_blocks):
input_ndf = (ndf * min(max_ndf, n))
output_ndf = (ndf * min(max_ndf, (n + 1)))
conv_layers_B += [BasicBlock(input_ndf, output_ndf, norm_layer, nl_layer)]
conv_layers_B += [nl_layer(), nn.AdaptiveAvgPool2d(1)]
self.fc_B = nn.Sequential(*[nn.Linear(output_ndf, output_nc)])
self.fcVar_B = nn.Sequential(*[nn.Linear(output_ndf, output_nc)])
self.conv_B = nn.Sequential(*conv_layers_B)
def forward(self, xa, xb):
x_conv_A = self.conv_A(xa)
conv_flat_A = x_conv_A.view(xa.size(0), (- 1))
output_A = self.fc_A(conv_flat_A)
outputVar_A = self.fcVar_A(conv_flat_A)
x_conv_B = self.conv_B(xb)
conv_flat_B = x_conv_B.view(xb.size(0), (- 1))
output_B = self.fc_B(conv_flat_B)
outputVar_B = self.fcVar_B(conv_flat_B)
return (output_A, outputVar_A, output_B, outputVar_B)
def forward_a(self, xa):
x_conv_A = self.conv_A(xa)
conv_flat_A = x_conv_A.view(xa.size(0), (- 1))
output_A = self.fc_A(conv_flat_A)
outputVar_A = self.fcVar_A(conv_flat_A)
return (output_A, outputVar_A)
def forward_b(self, xb):
x_conv_B = self.conv_B(xb)
conv_flat_B = x_conv_B.view(xb.size(0), (- 1))
output_B = self.fc_B(conv_flat_B)
outputVar_B = self.fcVar_B(conv_flat_B)
return (output_B, outputVar_B)
|
class G(nn.Module):
def __init__(self, output_dim_a, output_dim_b, nz):
super(G, self).__init__()
self.nz = nz
ini_tch = 256
tch_add = ini_tch
tch = ini_tch
self.tch_add = tch_add
self.decA1 = MisINSResBlock(tch, tch_add)
self.decA2 = MisINSResBlock(tch, tch_add)
self.decA3 = MisINSResBlock(tch, tch_add)
self.decA4 = MisINSResBlock(tch, tch_add)
decA5 = []
decA5 += [ReLUINSConvTranspose2d(tch, (tch // 2), kernel_size=3, stride=2, padding=1, output_padding=1)]
tch = (tch // 2)
decA5 += [ReLUINSConvTranspose2d(tch, (tch // 2), kernel_size=3, stride=2, padding=1, output_padding=1)]
tch = (tch // 2)
decA5 += [nn.ConvTranspose2d(tch, output_dim_a, kernel_size=1, stride=1, padding=0)]
decA5 += [nn.Tanh()]
self.decA5 = nn.Sequential(*decA5)
tch = ini_tch
self.decB1 = MisINSResBlock(tch, tch_add)
self.decB2 = MisINSResBlock(tch, tch_add)
self.decB3 = MisINSResBlock(tch, tch_add)
self.decB4 = MisINSResBlock(tch, tch_add)
decB5 = []
decB5 += [ReLUINSConvTranspose2d(tch, (tch // 2), kernel_size=3, stride=2, padding=1, output_padding=1)]
tch = (tch // 2)
decB5 += [ReLUINSConvTranspose2d(tch, (tch // 2), kernel_size=3, stride=2, padding=1, output_padding=1)]
tch = (tch // 2)
decB5 += [nn.ConvTranspose2d(tch, output_dim_b, kernel_size=1, stride=1, padding=0)]
decB5 += [nn.Tanh()]
self.decB5 = nn.Sequential(*decB5)
self.mlpA = nn.Sequential(nn.Linear(8, 256), nn.ReLU(inplace=True), nn.Linear(256, 256), nn.ReLU(inplace=True), nn.Linear(256, (tch_add * 4)))
self.mlpB = nn.Sequential(nn.Linear(8, 256), nn.ReLU(inplace=True), nn.Linear(256, 256), nn.ReLU(inplace=True), nn.Linear(256, (tch_add * 4)))
return
def forward(self, x, z, for_a):
if for_a:
return self.forward_a(x, z)
else:
return self.forward_b(x, z)
def forward_a(self, x, z):
z = self.mlpA(z)
(z1, z2, z3, z4) = torch.split(z, self.tch_add, dim=1)
(z1, z2, z3, z4) = (z1.contiguous(), z2.contiguous(), z3.contiguous(), z4.contiguous())
out1 = self.decA1(x, z1)
out2 = self.decA2(out1, z2)
out3 = self.decA3(out2, z3)
out4 = self.decA4(out3, z4)
out = self.decA5(out4)
return out
def forward_b(self, x, z):
z = self.mlpB(z)
(z1, z2, z3, z4) = torch.split(z, self.tch_add, dim=1)
(z1, z2, z3, z4) = (z1.contiguous(), z2.contiguous(), z3.contiguous(), z4.contiguous())
out1 = self.decB1(x, z1)
out2 = self.decB2(out1, z2)
out3 = self.decB3(out2, z3)
out4 = self.decB4(out3, z4)
out = self.decB5(out4)
return out
|
class G_concat(nn.Module):
def __init__(self, output_dim_a, output_dim_b, nz):
super(G_concat, self).__init__()
self.nz = nz
tch = 256
dec_share = []
dec_share += [INSResBlock(tch, tch)]
self.dec_share = nn.Sequential(*dec_share)
tch = (256 + self.nz)
decA1 = []
for i in range(0, 3):
decA1 += [INSResBlock(tch, tch)]
tch = (tch + self.nz)
decA2 = ReLUINSConvTranspose2d(tch, (tch // 2), kernel_size=3, stride=2, padding=1, output_padding=1)
tch = (tch // 2)
tch = (tch + self.nz)
decA3 = ReLUINSConvTranspose2d(tch, (tch // 2), kernel_size=3, stride=2, padding=1, output_padding=1)
tch = (tch // 2)
tch = (tch + self.nz)
decA4 = ([nn.ConvTranspose2d(tch, output_dim_a, kernel_size=1, stride=1, padding=0)] + [nn.Tanh()])
self.decA1 = nn.Sequential(*decA1)
self.decA2 = nn.Sequential(*[decA2])
self.decA3 = nn.Sequential(*[decA3])
self.decA4 = nn.Sequential(*decA4)
tch = (256 + self.nz)
decB1 = []
for i in range(0, 3):
decB1 += [INSResBlock(tch, tch)]
tch = (tch + self.nz)
decB2 = ReLUINSConvTranspose2d(tch, (tch // 2), kernel_size=3, stride=2, padding=1, output_padding=1)
tch = (tch // 2)
tch = (tch + self.nz)
decB3 = ReLUINSConvTranspose2d(tch, (tch // 2), kernel_size=3, stride=2, padding=1, output_padding=1)
tch = (tch // 2)
tch = (tch + self.nz)
decB4 = ([nn.ConvTranspose2d(tch, output_dim_b, kernel_size=1, stride=1, padding=0)] + [nn.Tanh()])
self.decB1 = nn.Sequential(*decB1)
self.decB2 = nn.Sequential(*[decB2])
self.decB3 = nn.Sequential(*[decB3])
self.decB4 = nn.Sequential(*decB4)
def forward(self, x, z, for_a):
if for_a:
return self.forward_a(x, z)
else:
return self.forward_b(x, z)
def forward_a(self, x, z):
out0 = self.dec_share(x)
z_img = z.view(z.size(0), z.size(1), 1, 1).expand(z.size(0), z.size(1), x.size(2), x.size(3))
x_and_z = torch.cat([out0, z_img], 1)
out1 = self.decA1(x_and_z)
z_img2 = z.view(z.size(0), z.size(1), 1, 1).expand(z.size(0), z.size(1), out1.size(2), out1.size(3))
x_and_z2 = torch.cat([out1, z_img2], 1)
out2 = self.decA2(x_and_z2)
z_img3 = z.view(z.size(0), z.size(1), 1, 1).expand(z.size(0), z.size(1), out2.size(2), out2.size(3))
x_and_z3 = torch.cat([out2, z_img3], 1)
out3 = self.decA3(x_and_z3)
z_img4 = z.view(z.size(0), z.size(1), 1, 1).expand(z.size(0), z.size(1), out3.size(2), out3.size(3))
x_and_z4 = torch.cat([out3, z_img4], 1)
out4 = self.decA4(x_and_z4)
return out4
def forward_b(self, x, z):
out0 = self.dec_share(x)
z_img = z.view(z.size(0), z.size(1), 1, 1).expand(z.size(0), z.size(1), x.size(2), x.size(3))
x_and_z = torch.cat([out0, z_img], 1)
out1 = self.decB1(x_and_z)
z_img2 = z.view(z.size(0), z.size(1), 1, 1).expand(z.size(0), z.size(1), out1.size(2), out1.size(3))
x_and_z2 = torch.cat([out1, z_img2], 1)
out2 = self.decB2(x_and_z2)
z_img3 = z.view(z.size(0), z.size(1), 1, 1).expand(z.size(0), z.size(1), out2.size(2), out2.size(3))
x_and_z3 = torch.cat([out2, z_img3], 1)
out3 = self.decB3(x_and_z3)
z_img4 = z.view(z.size(0), z.size(1), 1, 1).expand(z.size(0), z.size(1), out3.size(2), out3.size(3))
x_and_z4 = torch.cat([out3, z_img4], 1)
out4 = self.decB4(x_and_z4)
return out4
|
def get_scheduler(optimizer, opts, cur_ep=(- 1)):
if (opts.lr_policy == 'lambda'):
def lambda_rule(ep):
lr_l = (1.0 - (max(0, (ep - opts.n_ep_decay)) / float(((opts.n_ep - opts.n_ep_decay) + 1))))
return lr_l
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule, last_epoch=cur_ep)
elif (opts.lr_policy == 'step'):
scheduler = lr_scheduler.StepLR(optimizer, step_size=opts.n_ep_decay, gamma=0.1, last_epoch=cur_ep)
else:
return NotImplementedError('no such learn rate policy')
return scheduler
|
def meanpoolConv(inplanes, outplanes):
sequence = []
sequence += [nn.AvgPool2d(kernel_size=2, stride=2)]
sequence += [nn.Conv2d(inplanes, outplanes, kernel_size=1, stride=1, padding=0, bias=True)]
return nn.Sequential(*sequence)
|
def convMeanpool(inplanes, outplanes):
sequence = []
sequence += conv3x3(inplanes, outplanes)
sequence += [nn.AvgPool2d(kernel_size=2, stride=2)]
return nn.Sequential(*sequence)
|
def get_norm_layer(layer_type='instance'):
if (layer_type == 'batch'):
norm_layer = functools.partial(nn.BatchNorm2d, affine=True)
elif (layer_type == 'instance'):
norm_layer = functools.partial(nn.InstanceNorm2d, affine=False)
elif (layer_type == 'none'):
norm_layer = None
else:
raise NotImplementedError(('normalization layer [%s] is not found' % layer_type))
return norm_layer
|
def get_non_linearity(layer_type='relu'):
if (layer_type == 'relu'):
nl_layer = functools.partial(nn.ReLU, inplace=True)
elif (layer_type == 'lrelu'):
nl_layer = functools.partial(nn.LeakyReLU, negative_slope=0.2, inplace=False)
elif (layer_type == 'elu'):
nl_layer = functools.partial(nn.ELU, inplace=True)
else:
raise NotImplementedError(('nonlinearity activitation [%s] is not found' % layer_type))
return nl_layer
|
def conv3x3(in_planes, out_planes):
return [nn.ReflectionPad2d(1), nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=1, padding=0, bias=True)]
|
def gaussian_weights_init(m):
classname = m.__class__.__name__
if ((classname.find('Conv') != (- 1)) and (classname.find('Conv') == 0)):
m.weight.data.normal_(0.0, 0.02)
|
class LayerNorm(nn.Module):
def __init__(self, n_out, eps=1e-05, affine=True):
super(LayerNorm, self).__init__()
self.n_out = n_out
self.affine = affine
if self.affine:
self.weight = nn.Parameter(torch.ones(n_out, 1, 1))
self.bias = nn.Parameter(torch.zeros(n_out, 1, 1))
return
def forward(self, x):
normalized_shape = x.size()[1:]
if self.affine:
return F.layer_norm(x, normalized_shape, self.weight.expand(normalized_shape), self.bias.expand(normalized_shape))
else:
return F.layer_norm(x, normalized_shape)
|
class BasicBlock(nn.Module):
def __init__(self, inplanes, outplanes, norm_layer=None, nl_layer=None):
super(BasicBlock, self).__init__()
layers = []
if (norm_layer is not None):
layers += [norm_layer(inplanes)]
layers += [nl_layer()]
layers += conv3x3(inplanes, inplanes)
if (norm_layer is not None):
layers += [norm_layer(inplanes)]
layers += [nl_layer()]
layers += [convMeanpool(inplanes, outplanes)]
self.conv = nn.Sequential(*layers)
self.shortcut = meanpoolConv(inplanes, outplanes)
def forward(self, x):
out = (self.conv(x) + self.shortcut(x))
return out
|
class LeakyReLUConv2d(nn.Module):
def __init__(self, n_in, n_out, kernel_size, stride, padding=0, norm='None', sn=False):
super(LeakyReLUConv2d, self).__init__()
model = []
model += [nn.ReflectionPad2d(padding)]
if sn:
model += [spectral_norm(nn.Conv2d(n_in, n_out, kernel_size=kernel_size, stride=stride, padding=0, bias=True))]
else:
model += [nn.Conv2d(n_in, n_out, kernel_size=kernel_size, stride=stride, padding=0, bias=True)]
if ('norm' == 'Instance'):
model += [nn.InstanceNorm2d(n_out, affine=False)]
model += [nn.LeakyReLU(inplace=True)]
self.model = nn.Sequential(*model)
self.model.apply(gaussian_weights_init)
def forward(self, x):
return self.model(x)
|
class ReLUINSConv2d(nn.Module):
def __init__(self, n_in, n_out, kernel_size, stride, padding=0):
super(ReLUINSConv2d, self).__init__()
model = []
model += [nn.ReflectionPad2d(padding)]
model += [nn.Conv2d(n_in, n_out, kernel_size=kernel_size, stride=stride, padding=0, bias=True)]
model += [nn.InstanceNorm2d(n_out, affine=False)]
model += [nn.ReLU(inplace=True)]
self.model = nn.Sequential(*model)
self.model.apply(gaussian_weights_init)
def forward(self, x):
return self.model(x)
|
class INSResBlock(nn.Module):
def conv3x3(self, inplanes, out_planes, stride=1):
return [nn.ReflectionPad2d(1), nn.Conv2d(inplanes, out_planes, kernel_size=3, stride=stride)]
def __init__(self, inplanes, planes, stride=1, dropout=0.0):
super(INSResBlock, self).__init__()
model = []
model += self.conv3x3(inplanes, planes, stride)
model += [nn.InstanceNorm2d(planes)]
model += [nn.ReLU(inplace=True)]
model += self.conv3x3(planes, planes)
model += [nn.InstanceNorm2d(planes)]
if (dropout > 0):
model += [nn.Dropout(p=dropout)]
self.model = nn.Sequential(*model)
self.model.apply(gaussian_weights_init)
def forward(self, x):
residual = x
out = self.model(x)
out += residual
return out
|
class MisINSResBlock(nn.Module):
def conv3x3(self, dim_in, dim_out, stride=1):
return nn.Sequential(nn.ReflectionPad2d(1), nn.Conv2d(dim_in, dim_out, kernel_size=3, stride=stride))
def conv1x1(self, dim_in, dim_out):
return nn.Conv2d(dim_in, dim_out, kernel_size=1, stride=1, padding=0)
def __init__(self, dim, dim_extra, stride=1, dropout=0.0):
super(MisINSResBlock, self).__init__()
self.conv1 = nn.Sequential(self.conv3x3(dim, dim, stride), nn.InstanceNorm2d(dim))
self.conv2 = nn.Sequential(self.conv3x3(dim, dim, stride), nn.InstanceNorm2d(dim))
self.blk1 = nn.Sequential(self.conv1x1((dim + dim_extra), (dim + dim_extra)), nn.ReLU(inplace=False), self.conv1x1((dim + dim_extra), dim), nn.ReLU(inplace=False))
self.blk2 = nn.Sequential(self.conv1x1((dim + dim_extra), (dim + dim_extra)), nn.ReLU(inplace=False), self.conv1x1((dim + dim_extra), dim), nn.ReLU(inplace=False))
model = []
if (dropout > 0):
model += [nn.Dropout(p=dropout)]
self.model = nn.Sequential(*model)
self.model.apply(gaussian_weights_init)
self.conv1.apply(gaussian_weights_init)
self.conv2.apply(gaussian_weights_init)
self.blk1.apply(gaussian_weights_init)
self.blk2.apply(gaussian_weights_init)
def forward(self, x, z):
residual = x
z_expand = z.view(z.size(0), z.size(1), 1, 1).expand(z.size(0), z.size(1), x.size(2), x.size(3))
o1 = self.conv1(x)
o2 = self.blk1(torch.cat([o1, z_expand], dim=1))
o3 = self.conv2(o2)
out = self.blk2(torch.cat([o3, z_expand], dim=1))
out += residual
return out
|
class GaussianNoiseLayer(nn.Module):
def __init__(self):
super(GaussianNoiseLayer, self).__init__()
def forward(self, x):
if (self.training == False):
return x
noise = Variable(torch.randn(x.size()).cuda(x.get_device()))
return (x + noise)
|
class ReLUINSConvTranspose2d(nn.Module):
def __init__(self, n_in, n_out, kernel_size, stride, padding, output_padding):
super(ReLUINSConvTranspose2d, self).__init__()
model = []
model += [nn.ConvTranspose2d(n_in, n_out, kernel_size=kernel_size, stride=stride, padding=padding, output_padding=output_padding, bias=True)]
model += [LayerNorm(n_out)]
model += [nn.ReLU(inplace=True)]
self.model = nn.Sequential(*model)
self.model.apply(gaussian_weights_init)
def forward(self, x):
return self.model(x)
|
class SpectralNorm(object):
def __init__(self, name='weight', n_power_iterations=1, dim=0, eps=1e-12):
self.name = name
self.dim = dim
if (n_power_iterations <= 0):
raise ValueError('Expected n_power_iterations to be positive, but got n_power_iterations={}'.format(n_power_iterations))
self.n_power_iterations = n_power_iterations
self.eps = eps
def compute_weight(self, module):
weight = getattr(module, (self.name + '_orig'))
u = getattr(module, (self.name + '_u'))
weight_mat = weight
if (self.dim != 0):
weight_mat = weight_mat.permute(self.dim, *[d for d in range(weight_mat.dim()) if (d != self.dim)])
height = weight_mat.size(0)
weight_mat = weight_mat.reshape(height, (- 1))
with torch.no_grad():
for _ in range(self.n_power_iterations):
v = F.normalize(torch.matmul(weight_mat.t(), u), dim=0, eps=self.eps)
u = F.normalize(torch.matmul(weight_mat, v), dim=0, eps=self.eps)
sigma = torch.dot(u, torch.matmul(weight_mat, v))
weight = (weight / sigma)
return (weight, u)
def remove(self, module):
weight = getattr(module, self.name)
delattr(module, self.name)
delattr(module, (self.name + '_u'))
delattr(module, (self.name + '_orig'))
module.register_parameter(self.name, torch.nn.Parameter(weight))
def __call__(self, module, inputs):
if module.training:
(weight, u) = self.compute_weight(module)
setattr(module, self.name, weight)
setattr(module, (self.name + '_u'), u)
else:
r_g = getattr(module, (self.name + '_orig')).requires_grad
getattr(module, self.name).detach_().requires_grad_(r_g)
@staticmethod
def apply(module, name, n_power_iterations, dim, eps):
fn = SpectralNorm(name, n_power_iterations, dim, eps)
weight = module._parameters[name]
height = weight.size(dim)
u = F.normalize(weight.new_empty(height).normal_(0, 1), dim=0, eps=fn.eps)
delattr(module, fn.name)
module.register_parameter((fn.name + '_orig'), weight)
module.register_buffer(fn.name, weight.data)
module.register_buffer((fn.name + '_u'), u)
module.register_forward_pre_hook(fn)
return fn
|
def spectral_norm(module, name='weight', n_power_iterations=1, eps=1e-12, dim=None):
if (dim is None):
if isinstance(module, (torch.nn.ConvTranspose1d, torch.nn.ConvTranspose2d, torch.nn.ConvTranspose3d)):
dim = 1
else:
dim = 0
SpectralNorm.apply(module, name, n_power_iterations, dim, eps)
return module
|
def remove_spectral_norm(module, name='weight'):
for (k, hook) in module._forward_pre_hooks.items():
if (isinstance(hook, SpectralNorm) and (hook.name == name)):
hook.remove(module)
del module._forward_pre_hooks[k]
return module
raise ValueError("spectral_norm of '{}' not found in {}".format(name, module))
|
class TrainOptions():
def __init__(self):
self.parser = argparse.ArgumentParser()
self.parser.add_argument('--gpu_ids', type=str, default='0', help='path of data')
self.parser.add_argument('--dataroot', type=str, required=True, help='path of data')
self.parser.add_argument('--phase', type=str, default='train', help='phase for dataloading')
self.parser.add_argument('--batch_size', type=int, default=2, help='batch size')
self.parser.add_argument('--resize_size', type=int, default=256, help='resized image size for training')
self.parser.add_argument('--crop_size', type=int, default=216, help='cropped image size for training')
self.parser.add_argument('--input_dim_a', type=int, default=3, help='# of input channels for domain A')
self.parser.add_argument('--input_dim_b', type=int, default=3, help='# of input channels for domain B')
self.parser.add_argument('--nThreads', type=int, default=2, help='# of threads for data loader')
self.parser.add_argument('--no_flip', action='store_true', help='specified if no flipping')
self.parser.add_argument('--name', type=str, default='trial', help='folder name to save outputs')
self.parser.add_argument('--display_dir', type=str, default='./logs', help='path for saving display results')
self.parser.add_argument('--result_dir', type=str, default='./results', help='path for saving result images and models')
self.parser.add_argument('--display_freq', type=int, default=100, help='freq (iteration) of display')
self.parser.add_argument('--img_save_freq', type=int, default=1, help='freq (epoch) of saving images')
self.parser.add_argument('--model_save_freq', type=int, default=10, help='freq (epoch) of saving models')
self.parser.add_argument('--no_display_img', action='store_true', help='specified if no dispaly')
self.parser.add_argument('--concat', type=int, default=1, help='concatenate attribute features for translation, set 0 for using feature-wise transform')
self.parser.add_argument('--dis_scale', type=int, default=3, help='scale of discriminator')
self.parser.add_argument('--dis_norm', type=str, default='None', help='normalization layer in discriminator [None, Instance]')
self.parser.add_argument('--dis_spectral_norm', action='store_true', help='use spectral normalization in discriminator')
self.parser.add_argument('--lr_policy', type=str, default='lambda', help='type of learn rate decay')
self.parser.add_argument('--n_ep', type=int, default=1200, help='number of epochs')
self.parser.add_argument('--n_ep_decay', type=int, default=600, help='epoch start decay learning rate, set -1 if no decay')
self.parser.add_argument('--resume', type=str, default=None, help='specified the dir of saved models for resume the training')
self.parser.add_argument('--d_iter', type=int, default=3, help='# of iterations for updating content discriminator')
self.parser.add_argument('--gpu', type=int, default=0, help='gpu')
self.parser.add_argument('--with_latent_reg', action='store_true', help='whether use latent regression regularization term')
self.parser.add_argument('--featnorm', action='store_true', help='whether use feature normalization')
self.parser.add_argument('--lambda_contra', type=float, default=1.0, help='# weight for latent-augmented contrastive loss')
self.parser.add_argument('--tau', type=float, default=1.0, help='# temperature scaling parameter')
self.parser.add_argument('--radius', type=float, default=0.01, help='positive sample - distance threshold')
self.parser.add_argument('--num_negative', type=int, default=10, help='# of negative samples of z')
def parse(self):
self.opt = self.parser.parse_args()
args = vars(self.opt)
print('\n--- load options ---')
for (name, value) in sorted(args.items()):
print(('%s: %s' % (str(name), str(value))))
return self.opt
|
class TestOptions():
def __init__(self):
self.parser = argparse.ArgumentParser()
self.parser.add_argument('--dataroot', type=str, required=True, help='path of data')
self.parser.add_argument('--phase', type=str, default='test', help='phase for dataloading')
self.parser.add_argument('--resize_size', type=int, default=256, help='resized image size for training')
self.parser.add_argument('--crop_size', type=int, default=216, help='cropped image size for training')
self.parser.add_argument('--nThreads', type=int, default=4, help='for data loader')
self.parser.add_argument('--input_dim_a', type=int, default=3, help='# of input channels for domain A')
self.parser.add_argument('--input_dim_b', type=int, default=3, help='# of input channels for domain B')
self.parser.add_argument('--a2b', type=int, default=1, help='translation direction, 1 for a2b, 0 for b2a')
self.parser.add_argument('--num', type=int, default=5, help='number of outputs per image')
self.parser.add_argument('--name', type=str, default='trial', help='folder name to save outputs')
self.parser.add_argument('--result_dir', type=str, default='results', help='path for saving result images and models')
self.parser.add_argument('--concat', type=int, default=1, help='concatenate attribute features for translation, set 0 for using feature-wise transform')
self.parser.add_argument('--resume', type=str, required=True, help='specified the dir of saved models for resume the training')
self.parser.add_argument('--gpu', type=int, default=0, help='gpu')
self.parser.add_argument('--random', action='store_true', help='random gen')
def parse(self):
self.opt = self.parser.parse_args()
args = vars(self.opt)
print('\n--- load options ---')
for (name, value) in sorted(args.items()):
print(('%s: %s' % (str(name), str(value))))
self.opt.dis_scale = 3
self.opt.dis_norm = 'None'
self.opt.dis_spectral_norm = False
return self.opt
|
def tensor2img(img):
img = img[0].cpu().float().numpy()
if (img.shape[0] == 1):
img = np.tile(img, (3, 1, 1))
img = (((np.transpose(img, (1, 2, 0)) + 1) / 2.0) * 255.0)
return img.astype(np.uint8)
|
def save_imgs(imgs, names, path):
if (not os.path.exists(path)):
os.makedirs(path)
for (img, name) in zip(imgs, names):
img = tensor2img(img)
img = Image.fromarray(img)
img.save(os.path.join(path, (name + '.png')))
|
class Saver():
def __init__(self, opts):
self.display_dir = os.path.join(opts.display_dir, opts.name)
self.model_dir = os.path.join(opts.result_dir, opts.name)
self.image_dir = os.path.join(self.model_dir, 'images')
self.display_freq = opts.display_freq
self.img_save_freq = opts.img_save_freq
self.model_save_freq = opts.model_save_freq
if (not os.path.exists(self.display_dir)):
os.makedirs(self.display_dir)
if (not os.path.exists(self.model_dir)):
os.makedirs(self.model_dir)
if (not os.path.exists(self.image_dir)):
os.makedirs(self.image_dir)
self.writer = SummaryWriter(log_dir=self.display_dir)
def write_display(self, total_it, model):
if (((total_it + 1) % self.display_freq) == 0):
members = [attr for attr in dir(model) if ((not callable(getattr(model, attr))) and (not attr.startswith('__')) and ('loss' in attr))]
for m in members:
self.writer.add_scalar(m, getattr(model, m), total_it)
image_dis = ((torchvision.utils.make_grid(model.image_display, nrow=(model.image_display.size(0) // 2)) / 2) + 0.5)
"image_dis = np.transpose(image_dis.numpy(), (1, 2, 0)) * 255\n #image_dis = image_dis.astype('uint8')"
self.writer.add_image('Image', image_dis, total_it)
def write_img(self, ep, model):
if (((ep + 1) % self.img_save_freq) == 0):
assembled_images = model.assemble_outputs()
img_filename = ('%s/gen_%05d.jpg' % (self.image_dir, ep))
torchvision.utils.save_image(((assembled_images / 2) + 0.5), img_filename, nrow=1)
elif (ep == (- 1)):
assembled_images = model.assemble_outputs()
img_filename = ('%s/gen_last.jpg' % (self.image_dir, ep))
torchvision.utils.save_image(((assembled_images / 2) + 0.5), img_filename, nrow=1)
def write_model(self, ep, total_it, model):
if (((ep + 1) % self.model_save_freq) == 0):
print(('--- save the model @ ep %d ---' % ep))
model.save(('%s/%05d.pth' % (self.model_dir, ep)), ep, total_it)
else:
model.save(('%s/last.pth' % self.model_dir), ep, total_it)
|
def main():
parser = TestOptions()
opts = parser.parse()
print('\n--- load dataset ---')
if opts.a2b:
dataset = dataset_single(opts, 'A', opts.input_dim_a)
else:
dataset = dataset_single(opts, 'B', opts.input_dim_b)
loader = torch.utils.data.DataLoader(dataset, batch_size=1, num_workers=opts.nThreads)
print('\n--- load model ---')
model = DivCo_DRIT(opts)
model.setgpu(opts.gpu)
model.resume(opts.resume, train=False)
model.eval()
result_dir = os.path.join(opts.result_dir, opts.name)
if (not os.path.exists(result_dir)):
os.mkdir(result_dir)
print('\n--- testing ---')
ep_name = opts.resume.split('/')[(- 1)].split('.')[0]
for (idx1, (img1, name1)) in enumerate(loader):
print('{}/{}'.format(idx1, len(loader)))
img1 = img1.cuda()
imgs = [img1]
names = ['input']
for idx2 in range(opts.num):
with torch.no_grad():
if opts.random:
img = model.test_forward(img1, a2b=opts.a2b)
else:
latent = (((float(idx2) / (opts.num - 1)) * 2) - 1)
img = model.test_givenz_forward(img1, latent, a2b=opts.a2b)
imgs.append(img)
names.append('output_{}'.format(idx2))
save_imgs(imgs, names, os.path.join(result_dir, 'test_ep{}_num{}_a2b{}_random{}'.format(ep_name, opts.num, opts.a2b, int(opts.random)), name1[0].split('.')[0]))
return
|
def main():
parser = TrainOptions()
opts = parser.parse()
print('\n--- load dataset ---')
dataset = dataset_unpair(opts)
train_loader = torch.utils.data.DataLoader(dataset, batch_size=opts.batch_size, shuffle=True, num_workers=opts.nThreads)
print('\n--- load model ---')
model = DivCo_DRIT(opts)
if (opts.resume is None):
model.initialize()
ep0 = (- 1)
total_it = 0
else:
(ep0, total_it) = model.resume(opts.resume)
model.setgpu(opts.gpu)
model.set_scheduler(opts, last_ep=ep0)
ep0 += 1
print(('start the training at epoch %d' % ep0))
saver = Saver(opts)
print('\n--- train ---')
max_it = 500000
for ep in range(ep0, opts.n_ep):
for (it, (images_a, images_b)) in enumerate(train_loader):
if ((images_a.size(0) != opts.batch_size) or (images_b.size(0) != opts.batch_size)):
continue
images_a = images_a.cuda(opts.gpu).detach()
images_b = images_b.cuda(opts.gpu).detach()
if ((((it + 1) % opts.d_iter) != 0) and (it < (len(train_loader) - 2))):
model.update_D_content(images_a, images_b)
continue
else:
model.update_D(images_a, images_b)
model.update_EG()
if (not opts.no_display_img):
saver.write_display(total_it, model)
print(('total_it: %d (ep %d, it %d), lr %08f' % (total_it, ep, it, model.gen_opt.param_groups[0]['lr'])))
total_it += 1
if (total_it >= max_it):
saver.write_img((- 1), model)
saver.write_model((- 1), max_it, model)
break
if (opts.n_ep_decay > (- 1)):
model.update_lr()
saver.write_img(ep, model)
saver.write_model(ep, total_it, model)
return
|
def create_model(args, maxlen, vocab):
def ortho_reg(weight_matrix):
w_n = (weight_matrix / K.cast((K.epsilon() + K.sqrt(K.sum(K.square(weight_matrix), axis=(- 1), keepdims=True))), K.floatx()))
reg = K.sum(K.square((K.dot(w_n, K.transpose(w_n)) - K.eye(w_n.shape[0].eval()))))
return (args.ortho_reg * reg)
vocab_size = len(vocab)
sentence_input = Input(shape=(maxlen,), dtype='int32', name='sentence_input')
neg_input = Input(shape=(args.neg_size, maxlen), dtype='int32', name='neg_input')
word_emb = Embedding(vocab_size, args.emb_dim, mask_zero=True, name='word_emb')
e_w = word_emb(sentence_input)
y_s = Average()(e_w)
att_weights = Attention(name='att_weights')([e_w, y_s])
z_s = WeightedSum()([e_w, att_weights])
e_neg = word_emb(neg_input)
z_n = Average()(e_neg)
p_t = Dense(args.aspect_size)(z_s)
p_t = Activation('softmax', name='p_t')(p_t)
r_s = WeightedAspectEmb(args.aspect_size, args.emb_dim, name='aspect_emb', W_regularizer=ortho_reg)(p_t)
loss = MaxMargin(name='max_margin')([z_s, z_n, r_s])
model = Model(input=[sentence_input, neg_input], output=loss)
if args.emb_path:
from w2vEmbReader import W2VEmbReader as EmbReader
emb_reader = EmbReader(args.emb_path, emb_dim=args.emb_dim)
logger.info('Initializing word embedding matrix')
model.get_layer('word_emb').W.set_value(emb_reader.get_emb_matrix_given_vocab(vocab, model.get_layer('word_emb').W.get_value()))
logger.info('Initializing aspect embedding matrix as centroid of kmean clusters')
model.get_layer('aspect_emb').W.set_value(emb_reader.get_aspect_matrix(args.aspect_size))
return model
|
class Attention(Layer):
def __init__(self, W_regularizer=None, b_regularizer=None, W_constraint=None, b_constraint=None, bias=True, **kwargs):
'\n Keras Layer that implements an Content Attention mechanism.\n Supports Masking.\n '
self.supports_masking = True
self.init = initializations.get('glorot_uniform')
self.W_regularizer = regularizers.get(W_regularizer)
self.b_regularizer = regularizers.get(b_regularizer)
self.W_constraint = constraints.get(W_constraint)
self.b_constraint = constraints.get(b_constraint)
self.bias = bias
super(Attention, self).__init__(**kwargs)
def build(self, input_shape):
assert (type(input_shape) == list)
assert (len(input_shape) == 2)
self.steps = input_shape[0][1]
self.W = self.add_weight((input_shape[0][(- 1)], input_shape[1][(- 1)]), initializer=self.init, name='{}_W'.format(self.name), regularizer=self.W_regularizer, constraint=self.W_constraint)
if self.bias:
self.b = self.add_weight((1,), initializer='zero', name='{}_b'.format(self.name), regularizer=self.b_regularizer, constraint=self.b_constraint)
self.built = True
def compute_mask(self, input_tensor, mask=None):
return None
def call(self, input_tensor, mask=None):
x = input_tensor[0]
y = input_tensor[1]
mask = mask[0]
y = K.transpose(K.dot(self.W, K.transpose(y)))
y = K.expand_dims(y, dim=(- 2))
y = K.repeat_elements(y, self.steps, axis=1)
eij = K.sum((x * y), axis=(- 1))
if self.bias:
b = K.repeat_elements(self.b, self.steps, axis=0)
eij += b
eij = K.tanh(eij)
a = K.exp(eij)
if (mask is not None):
a *= K.cast(mask, K.floatx())
a /= K.cast((K.sum(a, axis=1, keepdims=True) + K.epsilon()), K.floatx())
return a
def get_output_shape_for(self, input_shape):
return (input_shape[0][0], input_shape[0][1])
|
class WeightedSum(Layer):
def __init__(self, **kwargs):
self.supports_masking = True
super(WeightedSum, self).__init__(**kwargs)
def call(self, input_tensor, mask=None):
assert (type(input_tensor) == list)
assert (type(mask) == list)
x = input_tensor[0]
a = input_tensor[1]
a = K.expand_dims(a)
weighted_input = (x * a)
return K.sum(weighted_input, axis=1)
def get_output_shape_for(self, input_shape):
return (input_shape[0][0], input_shape[0][(- 1)])
def compute_mask(self, x, mask=None):
return None
|
class WeightedAspectEmb(Layer):
def __init__(self, input_dim, output_dim, init='uniform', input_length=None, W_regularizer=None, activity_regularizer=None, W_constraint=None, weights=None, dropout=0.0, **kwargs):
self.input_dim = input_dim
self.output_dim = output_dim
self.init = initializations.get(init)
self.input_length = input_length
self.dropout = dropout
self.W_constraint = constraints.get(W_constraint)
self.W_regularizer = regularizers.get(W_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
if (0.0 < self.dropout < 1.0):
self.uses_learning_phase = True
self.initial_weights = weights
kwargs['input_shape'] = (self.input_length,)
kwargs['input_dtype'] = K.floatx()
super(WeightedAspectEmb, self).__init__(**kwargs)
def build(self, input_shape):
self.W = self.add_weight((self.input_dim, self.output_dim), initializer=self.init, name='{}_W'.format(self.name), regularizer=self.W_regularizer, constraint=self.W_constraint)
if (self.initial_weights is not None):
self.set_weights(self.initial_weights)
self.built = True
def compute_mask(self, x, mask=None):
return None
def get_output_shape_for(self, input_shape):
return (input_shape[0], self.output_dim)
def call(self, x, mask=None):
return K.dot(x, self.W)
|
class Average(Layer):
def __init__(self, **kwargs):
self.supports_masking = True
super(Average, self).__init__(**kwargs)
def call(self, x, mask=None):
if (mask is not None):
mask = K.cast(mask, K.floatx())
mask = K.expand_dims(mask)
x = (x * mask)
return (K.sum(x, axis=(- 2)) / K.sum(mask, axis=(- 2)))
def get_output_shape_for(self, input_shape):
return (input_shape[0:(- 2)] + input_shape[(- 1):])
def compute_mask(self, x, mask=None):
return None
|
class MaxMargin(Layer):
def __init__(self, **kwargs):
super(MaxMargin, self).__init__(**kwargs)
def call(self, input_tensor, mask=None):
z_s = input_tensor[0]
z_n = input_tensor[1]
r_s = input_tensor[2]
z_s = (z_s / K.cast((K.epsilon() + K.sqrt(K.sum(K.square(z_s), axis=(- 1), keepdims=True))), K.floatx()))
z_n = (z_n / K.cast((K.epsilon() + K.sqrt(K.sum(K.square(z_n), axis=(- 1), keepdims=True))), K.floatx()))
r_s = (r_s / K.cast((K.epsilon() + K.sqrt(K.sum(K.square(r_s), axis=(- 1), keepdims=True))), K.floatx()))
steps = z_n.shape[1]
pos = K.sum((z_s * r_s), axis=(- 1), keepdims=True)
pos = K.repeat_elements(pos, steps, axis=(- 1))
r_s = K.expand_dims(r_s, dim=(- 2))
r_s = K.repeat_elements(r_s, steps, axis=1)
neg = K.sum((z_n * r_s), axis=(- 1))
loss = K.cast(K.sum(T.maximum(0.0, ((1.0 - pos) + neg)), axis=(- 1), keepdims=True), K.floatx())
return loss
def compute_mask(self, input_tensor, mask=None):
return None
def get_output_shape_for(self, input_shape):
return (input_shape[0][0], 1)
|
def get_optimizer(args):
clipvalue = 0
clipnorm = 10
if (args.algorithm == 'rmsprop'):
optimizer = opt.RMSprop(lr=0.001, rho=0.9, epsilon=1e-06, clipnorm=clipnorm, clipvalue=clipvalue)
elif (args.algorithm == 'sgd'):
optimizer = opt.SGD(lr=0.01, momentum=0.0, decay=0.0, nesterov=False, clipnorm=clipnorm, clipvalue=clipvalue)
elif (args.algorithm == 'adagrad'):
optimizer = opt.Adagrad(lr=0.01, epsilon=1e-06, clipnorm=clipnorm, clipvalue=clipvalue)
elif (args.algorithm == 'adadelta'):
optimizer = opt.Adadelta(lr=1.0, rho=0.95, epsilon=1e-06, clipnorm=clipnorm, clipvalue=clipvalue)
elif (args.algorithm == 'adam'):
optimizer = opt.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, clipnorm=clipnorm, clipvalue=clipvalue)
elif (args.algorithm == 'adamax'):
optimizer = opt.Adamax(lr=0.002, beta_1=0.9, beta_2=0.999, epsilon=1e-08, clipnorm=clipnorm, clipvalue=clipvalue)
return optimizer
|
class W2VEmbReader():
def __init__(self, emb_path, emb_dim=None):
logger.info(('Loading embeddings from: ' + emb_path))
self.embeddings = {}
emb_matrix = []
model = gensim.models.Word2Vec.load(emb_path)
self.emb_dim = emb_dim
for word in model.vocab:
self.embeddings[word] = list(model[word])
emb_matrix.append(list(model[word]))
if (emb_dim != None):
assert (self.emb_dim == len(self.embeddings['nice']))
self.vector_size = len(self.embeddings)
self.emb_matrix = np.asarray(emb_matrix)
logger.info((' #vectors: %i, #dimensions: %i' % (self.vector_size, self.emb_dim)))
def get_emb_given_word(self, word):
try:
return self.embeddings[word]
except KeyError:
return None
def get_emb_matrix_given_vocab(self, vocab, emb_matrix):
counter = 0.0
for (word, index) in vocab.iteritems():
try:
emb_matrix[index] = self.embeddings[word]
counter += 1
except KeyError:
pass
logger.info(('%i/%i word vectors initialized (hit rate: %.2f%%)' % (counter, len(vocab), ((100 * counter) / len(vocab)))))
norm_emb_matrix = (emb_matrix / np.linalg.norm(emb_matrix, axis=(- 1), keepdims=True))
return norm_emb_matrix
def get_aspect_matrix(self, n_clusters):
km = KMeans(n_clusters=n_clusters)
km.fit(self.emb_matrix)
clusters = km.cluster_centers_
norm_aspect_matrix = (clusters / np.linalg.norm(clusters, axis=(- 1), keepdims=True))
return norm_aspect_matrix.astype(np.float32)
def get_emb_dim(self):
return self.emb_dim
|
class CheckpointIO(object):
def __init__(self, fname_template, **kwargs):
os.makedirs(os.path.dirname(fname_template), exist_ok=True)
self.fname_template = fname_template
self.module_dict = kwargs
def register(self, **kwargs):
self.module_dict.update(kwargs)
def save(self, step):
fname = self.fname_template.format(step)
print(('Saving checkpoint into %s...' % fname))
outdict = {}
for (name, module) in self.module_dict.items():
outdict[name] = module.state_dict()
torch.save(outdict, fname)
def load(self, step):
fname = self.fname_template.format(step)
assert os.path.exists(fname), (fname + ' does not exist!')
print(('Loading checkpoint from %s...' % fname))
if torch.cuda.is_available():
module_dict = torch.load(fname)
else:
module_dict = torch.load(fname, map_location=torch.device('cpu'))
for (name, module) in self.module_dict.items():
module.load_state_dict(module_dict[name])
|
def listdir(dname):
fnames = list(chain(*[list(Path(dname).rglob(('*.' + ext))) for ext in ['png', 'jpg', 'jpeg', 'JPG']]))
return fnames
|
class DefaultDataset(data.Dataset):
def __init__(self, root, transform=None):
self.samples = listdir(root)
self.samples.sort()
self.transform = transform
self.targets = None
def __getitem__(self, index):
fname = self.samples[index]
img = Image.open(fname).convert('RGB')
if (self.transform is not None):
img = self.transform(img)
return img
def __len__(self):
return len(self.samples)
|
class ReferenceDataset(data.Dataset):
def __init__(self, root, transform=None):
(self.samples, self.targets) = self._make_dataset(root)
self.transform = transform
def _make_dataset(self, root):
domains = os.listdir(root)
(fnames, fnames2, labels) = ([], [], [])
for (idx, domain) in enumerate(sorted(domains)):
class_dir = os.path.join(root, domain)
cls_fnames = listdir(class_dir)
fnames += cls_fnames
fnames2 += random.sample(cls_fnames, len(cls_fnames))
labels += ([idx] * len(cls_fnames))
return (list(zip(fnames, fnames2)), labels)
def __getitem__(self, index):
(fname, fname2) = self.samples[index]
label = self.targets[index]
img = Image.open(fname).convert('RGB')
img2 = Image.open(fname2).convert('RGB')
if (self.transform is not None):
img = self.transform(img)
img2 = self.transform(img2)
return (img, img2, label)
def __len__(self):
return len(self.targets)
|
def _make_balanced_sampler(labels):
class_counts = np.bincount(labels)
class_weights = (1.0 / class_counts)
weights = class_weights[labels]
return WeightedRandomSampler(weights, len(weights))
|
def get_train_loader(root, which='source', img_size=256, batch_size=8, prob=0.5, num_workers=4):
print(('Preparing DataLoader to fetch %s images during the training phase...' % which))
crop = transforms.RandomResizedCrop(img_size, scale=[0.8, 1.0], ratio=[0.9, 1.1])
rand_crop = transforms.Lambda((lambda x: (crop(x) if (random.random() < prob) else x)))
transform = transforms.Compose([rand_crop, transforms.Resize([img_size, img_size]), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])])
if (which == 'source'):
dataset = ImageFolder(root, transform)
elif (which == 'reference'):
dataset = ReferenceDataset(root, transform)
else:
raise NotImplementedError
sampler = _make_balanced_sampler(dataset.targets)
return data.DataLoader(dataset=dataset, batch_size=batch_size, sampler=sampler, num_workers=num_workers, pin_memory=True, drop_last=True)
|
def get_eval_loader(root, img_size=256, batch_size=32, imagenet_normalize=True, shuffle=True, num_workers=4, drop_last=False):
print('Preparing DataLoader for the evaluation phase...')
if imagenet_normalize:
(height, width) = (299, 299)
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
else:
(height, width) = (img_size, img_size)
mean = [0.5, 0.5, 0.5]
std = [0.5, 0.5, 0.5]
transform = transforms.Compose([transforms.Resize([img_size, img_size]), transforms.Resize([height, width]), transforms.ToTensor(), transforms.Normalize(mean=mean, std=std)])
dataset = DefaultDataset(root, transform=transform)
return data.DataLoader(dataset=dataset, batch_size=batch_size, shuffle=shuffle, num_workers=num_workers, pin_memory=True, drop_last=drop_last)
|
def get_test_loader(root, img_size=256, batch_size=32, shuffle=True, num_workers=4):
print('Preparing DataLoader for the generation phase...')
transform = transforms.Compose([transforms.Resize([img_size, img_size]), transforms.ToTensor(), transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])])
dataset = ImageFolder(root, transform)
return data.DataLoader(dataset=dataset, batch_size=batch_size, shuffle=shuffle, num_workers=num_workers, pin_memory=True)
|
class InputFetcher():
def __init__(self, loader, loader_ref=None, latent_dim=16, mode=''):
self.loader = loader
self.loader_ref = loader_ref
self.latent_dim = latent_dim
self.device = torch.device(('cuda' if torch.cuda.is_available() else 'cpu'))
self.mode = mode
def _fetch_inputs(self):
try:
(x, y) = next(self.iter)
except (AttributeError, StopIteration):
self.iter = iter(self.loader)
(x, y) = next(self.iter)
return (x, y)
def _fetch_refs(self):
try:
(x, x2, y) = next(self.iter_ref)
except (AttributeError, StopIteration):
self.iter_ref = iter(self.loader_ref)
(x, x2, y) = next(self.iter_ref)
return (x, x2, y)
def __next__(self):
(x, y) = self._fetch_inputs()
if (self.mode == 'train'):
(x_ref, x_ref2, y_ref) = self._fetch_refs()
z_trg = torch.randn(x.size(0), self.latent_dim)
z_trg2 = torch.randn(x.size(0), self.latent_dim)
inputs = Munch(x_src=x, y_src=y, y_ref=y_ref, x_ref=x_ref, x_ref2=x_ref2, z_trg=z_trg, z_trg2=z_trg2)
elif (self.mode == 'val'):
(x_ref, y_ref) = self._fetch_inputs()
inputs = Munch(x_src=x, y_src=y, x_ref=x_ref, y_ref=y_ref)
elif (self.mode == 'test'):
inputs = Munch(x=x, y=y)
else:
raise NotImplementedError
return Munch({k: v.to(self.device) for (k, v) in inputs.items()})
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.