code stringlengths 17 6.64M |
|---|
class SelectiveLoadModule(torch.nn.Module):
'Only load layers in trained models with the same name.'
def __init__(self):
super(SelectiveLoadModule, self).__init__()
def forward(self, x):
return x
def load_state_dict(self, state_dict):
'Override the function to ignore redundant weights.'
own_state = self.state_dict()
for (name, param) in state_dict.items():
if (name in own_state):
own_state[name].copy_(param)
|
class ConvLayer(torch.nn.Module):
'Reflection padded convolution layer.'
def __init__(self, in_channels, out_channels, kernel_size, stride, bias=True):
super(ConvLayer, self).__init__()
reflection_padding = int(np.floor((kernel_size / 2)))
self.reflection_pad = torch.nn.ReflectionPad2d(reflection_padding)
self.conv2d = torch.nn.Conv2d(in_channels, out_channels, kernel_size, stride=stride, bias=bias)
def forward(self, x):
out = self.reflection_pad(x)
out = self.conv2d(out)
return out
|
class ConvTanh(ConvLayer):
def __init__(self, in_channels, out_channels, kernel_size, stride):
super(ConvTanh, self).__init__(in_channels, out_channels, kernel_size, stride)
self.tanh = torch.nn.Tanh()
def forward(self, x):
out = super(ConvTanh, self).forward(x)
return ((self.tanh((out / 255)) * 150) + (255 / 2))
|
class ConvInstRelu(ConvLayer):
def __init__(self, in_channels, out_channels, kernel_size, stride):
super(ConvInstRelu, self).__init__(in_channels, out_channels, kernel_size, stride)
self.instance = torch.nn.InstanceNorm2d(out_channels, affine=True)
self.relu = torch.nn.ReLU()
def forward(self, x):
out = super(ConvInstRelu, self).forward(x)
out = self.instance(out)
out = self.relu(out)
return out
|
class UpsampleConvLayer(torch.nn.Module):
'Upsamples the input and then does a convolution.\n This method gives better results compared to ConvTranspose2d.\n ref: http://distill.pub/2016/deconv-checkerboard/\n '
def __init__(self, in_channels, out_channels, kernel_size, stride, upsample=None):
super(UpsampleConvLayer, self).__init__()
self.upsample = upsample
reflection_padding = int(np.floor((kernel_size / 2)))
self.reflection_pad = torch.nn.ReflectionPad2d(reflection_padding)
self.conv2d = torch.nn.Conv2d(in_channels, out_channels, kernel_size, stride)
def forward(self, x):
x_in = x
if self.upsample:
x_in = torch.nn.functional.interpolate(x_in, scale_factor=self.upsample)
out = self.reflection_pad(x_in)
out = self.conv2d(out)
return out
|
class UpsampleConvInstRelu(UpsampleConvLayer):
def __init__(self, in_channels, out_channels, kernel_size, stride, upsample=None):
super(UpsampleConvInstRelu, self).__init__(in_channels, out_channels, kernel_size, stride, upsample)
self.instance = torch.nn.InstanceNorm2d(out_channels, affine=True)
self.relu = torch.nn.ReLU()
def forward(self, x):
out = super(UpsampleConvInstRelu, self).forward(x)
out = self.instance(out)
out = self.relu(out)
return out
|
class ResidualBlock(torch.nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=3, stride=1):
super(ResidualBlock, self).__init__()
self.conv1 = ConvLayer(in_channels, out_channels, kernel_size, stride)
self.in1 = torch.nn.InstanceNorm2d(out_channels, affine=True)
self.conv2 = ConvLayer(out_channels, out_channels, kernel_size, stride)
self.in2 = torch.nn.InstanceNorm2d(out_channels, affine=True)
self.relu = torch.nn.ReLU()
def forward(self, x):
residual = x
out = self.relu(self.in1(self.conv1(x)))
out = self.in2(self.conv2(out))
out = (out + residual)
return out
|
class ReCoNet(SelectiveLoadModule):
def __init__(self):
super(ReCoNet, self).__init__()
self.conv1 = ConvInstRelu(3, 32, kernel_size=9, stride=1)
self.conv2 = ConvInstRelu(32, 64, kernel_size=3, stride=2)
self.conv3 = ConvInstRelu(64, 128, kernel_size=3, stride=2)
self.res1 = ResidualBlock(128, 128)
self.res2 = ResidualBlock(128, 128)
self.res3 = ResidualBlock(128, 128)
self.res4 = ResidualBlock(128, 128)
self.res5 = ResidualBlock(128, 128)
self.deconv1 = UpsampleConvInstRelu(128, 64, kernel_size=3, stride=1, upsample=2)
self.deconv2 = UpsampleConvInstRelu(64, 32, kernel_size=3, stride=1, upsample=2)
self.deconv3 = ConvTanh(32, 3, kernel_size=9, stride=1)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
x = self.res1(x)
x = self.res2(x)
x = self.res3(x)
x = self.res4(x)
x = self.res5(x)
features = x
x = self.deconv1(x)
x = self.deconv2(x)
x = self.deconv3(x)
return (features, x)
|
class BaseModel(object):
def name(self):
return 'BaseModel'
def get_image_paths(self):
pass
def initialize(self, opt):
self.opt = opt
self.gpu_ids = opt.gpu_ids
self.isTrain = opt.isTrain
self.Tensor = (torch.cuda.FloatTensor if self.gpu_ids else torch.Tensor)
self.save_dir = os.path.join(opt.checkpoints_dir, opt.name)
def set_input(self, input):
self.input = input
def forward(self):
pass
def test(self):
pass
def optimize_parameters(self):
pass
def get_current_visuals(self):
return self.input
def get_current_errors(self):
return {}
def save(self, label):
pass
def save_network(self, network, network_label, epoch_label, gpu_ids):
save_filename = ('%s_net_%s.pth' % (epoch_label, network_label))
save_path = os.path.join(self.save_dir, save_filename)
torch.save(network.cpu().state_dict(), save_path)
if (len(gpu_ids) and torch.cuda.is_available()):
network.cuda(device_id=gpu_ids[0])
def load_network(self, network, network_label, epoch_label):
save_filename = ('%s_net_%s.pth' % (epoch_label, network_label))
save_path = os.path.join(self.save_dir, save_filename)
network.load_state_dict(torch.load(save_path))
def update_learning_rate(self):
pass
def as_np(self, data):
return data.cpu().data.numpy()
|
class CycleGANModel(GANModel):
def name(self):
return 'CycleGANModel'
def initialize(self, opt):
GANModel.initialize(self, opt)
if self.isTrain:
self.criterionCycle = torch.nn.L1Loss()
self.criterionIdt = torch.nn.L1Loss()
def test(self):
self.real_A = Variable(self.input_A, volatile=True)
self.fake_B = self.netG_A.forward(self.real_A)
self.rec_A = self.netG_B.forward(self.fake_B)
self.real_B = Variable(self.input_B, volatile=True)
self.fake_A = self.netG_B.forward(self.real_B)
self.rec_B = self.netG_A.forward(self.fake_A)
def backward_G(self):
lambda_idt = self.opt.identity
lambda_A = self.opt.lambda_A
lambda_B = self.opt.lambda_B
if (lambda_idt > 0):
self.idt_A = self.netG_A.forward(self.real_B)
self.loss_idt_A = ((self.criterionIdt(self.idt_A, self.real_B) * lambda_B) * lambda_idt)
self.idt_B = self.netG_B.forward(self.real_A)
self.loss_idt_B = ((self.criterionIdt(self.idt_B, self.real_A) * lambda_A) * lambda_idt)
else:
self.loss_idt_A = 0
self.loss_idt_B = 0
self.fake_B = self.netG_A.forward(self.real_A)
pred_fake = self.netD_A.forward(self.fake_B)
self.loss_G_A = self.criterionGAN(pred_fake, True)
self.fake_A = self.netG_B.forward(self.real_B)
pred_fake = self.netD_B.forward(self.fake_A)
self.loss_G_B = self.criterionGAN(pred_fake, True)
self.rec_A = self.netG_B.forward(self.fake_B)
self.loss_cycle_A = (self.criterionCycle(self.rec_A, self.real_A) * lambda_A)
self.rec_B = self.netG_A.forward(self.fake_A)
self.loss_cycle_B = (self.criterionCycle(self.rec_B, self.real_B) * lambda_B)
self.loss_G = (((((self.loss_G_A + self.loss_G_B) + self.loss_cycle_A) + self.loss_cycle_B) + self.loss_idt_A) + self.loss_idt_B)
self.loss_G.backward()
def get_current_errors(self):
D_A = self.loss_D_A.data[0]
G_A = self.loss_G_A.data[0]
Cyc_A = self.loss_cycle_A.data[0]
D_B = self.loss_D_B.data[0]
G_B = self.loss_G_B.data[0]
Cyc_B = self.loss_cycle_B.data[0]
if (self.opt.identity > 0.0):
idt_A = self.loss_idt_A.data[0]
idt_B = self.loss_idt_B.data[0]
return OrderedDict([('D_A', D_A), ('G_A', G_A), ('Cyc_A', Cyc_A), ('idt_A', idt_A), ('D_B', D_B), ('G_B', G_B), ('Cyc_B', Cyc_B), ('idt_B', idt_B)])
else:
return OrderedDict([('D_A', D_A), ('G_A', G_A), ('Cyc_A', Cyc_A), ('D_B', D_B), ('G_B', G_B), ('Cyc_B', Cyc_B)])
def get_current_visuals(self):
real_A = util.tensor2im(self.real_A.data)
fake_B = util.tensor2im(self.fake_B.data)
rec_A = util.tensor2im(self.rec_A.data)
real_B = util.tensor2im(self.real_B.data)
fake_A = util.tensor2im(self.fake_A.data)
rec_B = util.tensor2im(self.rec_B.data)
if (self.opt.identity > 0.0):
idt_A = util.tensor2im(self.idt_A.data)
idt_B = util.tensor2im(self.idt_B.data)
return OrderedDict([('real_A', real_A), ('fake_B', fake_B), ('rec_A', rec_A), ('idt_B', idt_B), ('real_B', real_B), ('fake_A', fake_A), ('rec_B', rec_B), ('idt_A', idt_A)])
else:
return OrderedDict([('real_A', real_A), ('fake_B', fake_B), ('rec_A', rec_A), ('real_B', real_B), ('fake_A', fake_A), ('rec_B', rec_B)])
|
class BaseOptions(object):
def __init__(self):
self.parser = argparse.ArgumentParser()
self.initialized = False
def initialize(self):
self.parser.add_argument('--dataroot', required=True, help='path to images (should have subfolders trainA, trainB, valA, valB, etc)')
self.parser.add_argument('--batchSize', type=int, default=2, help='input batch size')
self.parser.add_argument('--loadSize', type=int, default=286, help='scale images to this size')
self.parser.add_argument('--fineSize', type=int, default=256, help='then crop to this size')
self.parser.add_argument('--input_nc', type=int, default=3, help='# of input image channels')
self.parser.add_argument('--output_nc', type=int, default=3, help='# of output image channels')
self.parser.add_argument('--ngf', type=int, default=64, help='# of gen filters in first conv layer')
self.parser.add_argument('--ndf', type=int, default=64, help='# of discrim filters in first conv layer')
self.parser.add_argument('--which_model_netD', type=str, default='basic', help='selects model to use for netD')
self.parser.add_argument('--which_model_netG', type=str, default='resnet_9blocks', help='selects model to use for netG')
self.parser.add_argument('--n_layers_D', type=int, default=3, help='only used if which_model_netD==n_layers')
self.parser.add_argument('--gpu_ids', type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2')
self.parser.add_argument('--name', type=str, default='experiment_name', help='name of the experiment. It decides where to store samples and models')
self.parser.add_argument('--align_data', action='store_true', help='if True, the datasets are loaded from "test" and "train" directories and the data pairs are aligned')
self.parser.add_argument('--model', type=str, default='distance_gan', help='chooses which model to use. cycle_gan, one_direction_test, pix2pix, ...')
self.parser.add_argument('--which_direction', type=str, default='AtoB', help='AtoB or BtoA')
self.parser.add_argument('--nThreads', default=2, type=int, help='# threads for loading data')
self.parser.add_argument('--checkpoints_dir', type=str, default='./checkpoints', help='models are saved here')
self.parser.add_argument('--norm', type=str, default='instance', help='instance normalization or batch normalization')
self.parser.add_argument('--serial_batches', action='store_true', help='if true, takes images in order to make batches, otherwise takes them randomly')
self.parser.add_argument('--display_winsize', type=int, default=256, help='display window size')
self.parser.add_argument('--display_id', type=int, default=1, help='window id of the web display')
self.parser.add_argument('--identity', type=float, default=0.0, help='use identity mapping. Setting identity other than 1 has an effect of scaling the weight of the identity mapping loss. For example, if the weight of the identity loss should be 10 times smaller than the weight of the reconstruction loss, please set optidentity = 0.1')
self.parser.add_argument('--use_dropout', action='store_true', help='use dropout for the generator')
self.parser.add_argument('--max_dataset_size', type=int, default=float('inf'), help='Maximum number of samples allowed per dataset. If the dataset directory contains more than max_dataset_size, only a subset is loaded.')
self.parser.add_argument('--use_self_distance', action='store_true', help='use distance for top and bottom half of the image')
self.parser.add_argument('--A_to_B', action='store_true', help='only train from A to B')
self.parser.add_argument('--B_to_A', action='store_true', help='only train from B to A')
self.parser.add_argument('--unnormalized_distances', action='store_true', help='do not normalize distances by expecatation and std')
self.parser.add_argument('--use_cycle_loss', action='store_true', help='add cycle loss in addition to distance loss')
self.initialized = True
def parse(self):
if (not self.initialized):
self.initialize()
self.opt = self.parser.parse_args()
self.opt.isTrain = self.isTrain
str_ids = self.opt.gpu_ids.split(',')
self.opt.gpu_ids = []
for str_id in str_ids:
id = int(str_id)
if (id >= 0):
self.opt.gpu_ids.append(id)
args = vars(self.opt)
print('------------ Options -------------')
for (k, v) in sorted(args.items()):
print(('%s: %s' % (str(k), str(v))))
print('-------------- End ----------------')
expr_dir = os.path.join(self.opt.checkpoints_dir, self.opt.name)
util.mkdirs(expr_dir)
file_name = os.path.join(expr_dir, 'opt.txt')
with open(file_name, 'wt') as opt_file:
opt_file.write('------------ Options -------------\n')
for (k, v) in sorted(args.items()):
opt_file.write(('%s: %s\n' % (str(k), str(v))))
opt_file.write('-------------- End ----------------\n')
return self.opt
|
class TestOptions(BaseOptions):
def initialize(self):
BaseOptions.initialize(self)
self.parser.add_argument('--ntest', type=int, default=float('inf'), help='# of test examples.')
self.parser.add_argument('--results_dir', type=str, default='./results', help='saves results_cycle here.')
self.parser.add_argument('--aspect_ratio', type=float, default=1.0, help='aspect ratio of result images')
self.parser.add_argument('--phase', type=str, default='test', help='train, val, test, etc')
self.parser.add_argument('--which_epoch', type=str, default='latest', help='which epoch to load? set to latest to use latest cached model')
self.parser.add_argument('--how_many', type=int, default=50, help='how many test images to run')
self.isTrain = False
|
class TrainOptions(BaseOptions):
def initialize(self):
BaseOptions.initialize(self)
self.parser.add_argument('--display_freq', type=int, default=100, help='frequency of showing training results_cycle on screen')
self.parser.add_argument('--print_freq', type=int, default=100, help='frequency of showing training results_cycle on console')
self.parser.add_argument('--save_latest_freq', type=int, default=5000, help='frequency of saving the latest results_cycle')
self.parser.add_argument('--save_epoch_freq', type=int, default=5, help='frequency of saving checkpoints at the end of epochs')
self.parser.add_argument('--continue_train', action='store_true', help='continue training: load the latest model')
self.parser.add_argument('--phase', type=str, default='train', help='train, val, test, etc')
self.parser.add_argument('--which_epoch', type=str, default='latest', help='which epoch to load? set to latest to use latest cached model')
self.parser.add_argument('--niter', type=int, default=100, help='# of iter at starting learning rate')
self.parser.add_argument('--niter_decay', type=int, default=100, help='# of iter to linearly decay learning rate to zero')
self.parser.add_argument('--beta1', type=float, default=0.5, help='momentum term of adam')
self.parser.add_argument('--lr', type=float, default=0.0002, help='initial learning rate for adam')
self.parser.add_argument('--no_lsgan', action='store_true', help='do *not* use least square GAN, if false, use vanilla GAN')
self.parser.add_argument('--lambda_A', type=float, default=10.0, help='weight for cycle loss (A -> B -> A)')
self.parser.add_argument('--lambda_B', type=float, default=10.0, help='weight for cycle loss (B -> A -> B)')
self.parser.add_argument('--lambda_distance_A', type=float, default=1, help='weight for distance loss (A -> B)')
self.parser.add_argument('--lambda_distance_B', type=float, default=1, help='weight for distance loss (B -> A)')
self.parser.add_argument('--pool_size', type=int, default=50, help='the size of image buffer that stores previously generated images')
self.parser.add_argument('--no_html', action='store_true', help='do not save intermediate training results_cycle to [opt.checkpoints_dir]/[opt.name]/web/')
self.parser.add_argument('--no_flip', action='store_true', help='if specified, do not flip the images for data argumentation')
self.parser.add_argument('--max_items', type=int, default=900, help='maximum number of items to use for expectation and std calculation')
self.isTrain = True
|
class BaseDataLoader(object):
def __init__(self):
pass
def initialize(self, opt):
self.opt = opt
pass
def load_data(self):
return None
|
def CreateDataLoader(opt):
if (opt.align_data > 0):
from cyclegan_arch.data.aligned_data_loader import AlignedDataLoader
data_loader = AlignedDataLoader()
else:
from unaligned_data_loader import UnalignedDataLoader
data_loader = UnalignedDataLoader()
print(data_loader.name())
data_loader.initialize(opt)
return data_loader
|
def is_image_file(filename):
return any((filename.endswith(extension) for extension in IMG_EXTENSIONS))
|
def make_dataset(dir):
images = []
assert os.path.isdir(dir), ('%s is not a valid directory' % dir)
for (root, _, fnames) in sorted(os.walk(dir)):
for fname in fnames:
if is_image_file(fname):
path = os.path.join(root, fname)
images.append(path)
return images
|
def default_loader(path):
return Image.open(path).convert('RGB')
|
class ImageFolder(data.Dataset):
def __init__(self, root, transform=None, return_paths=False, loader=default_loader):
imgs = make_dataset(root)
if (len(imgs) == 0):
raise RuntimeError(((('Found 0 images in: ' + root) + '\nSupported image extensions are: ') + ','.join(IMG_EXTENSIONS)))
self.root = root
self.imgs = imgs
self.transform = transform
self.return_paths = return_paths
self.loader = loader
def __getitem__(self, index):
path = self.imgs[index]
img = self.loader(path)
if (self.transform is not None):
img = self.transform(img)
if self.return_paths:
return (img, path)
else:
return img
def __len__(self):
return len(self.imgs)
|
class PairedData(object):
def __init__(self, data_loader_A, data_loader_B, max_dataset_size, flip):
self.data_loader_A = data_loader_A
self.data_loader_B = data_loader_B
self.stop_A = False
self.stop_B = False
self.max_dataset_size = max_dataset_size
self.flip = flip
def __iter__(self):
self.stop_A = False
self.stop_B = False
self.data_loader_A_iter = iter(self.data_loader_A)
self.data_loader_B_iter = iter(self.data_loader_B)
self.iter = 0
return self
def __next__(self):
(A, A_paths) = (None, None)
(B, B_paths) = (None, None)
try:
(A, A_paths) = next(self.data_loader_A_iter)
except StopIteration:
if ((A is None) or (A_paths is None)):
self.stop_A = True
self.data_loader_A_iter = iter(self.data_loader_A)
(A, A_paths) = next(self.data_loader_A_iter)
try:
(B, B_paths) = next(self.data_loader_B_iter)
except StopIteration:
if ((B is None) or (B_paths is None)):
self.stop_B = True
self.data_loader_B_iter = iter(self.data_loader_B)
(B, B_paths) = next(self.data_loader_B_iter)
if ((self.stop_A and self.stop_B) or (self.iter > self.max_dataset_size)):
self.stop_A = False
self.stop_B = False
raise StopIteration()
else:
self.iter += 1
if (self.flip and (random.random() < 0.5)):
idx = [i for i in range((A.size(3) - 1), (- 1), (- 1))]
idx = torch.LongTensor(idx)
A = A.index_select(3, idx)
B = B.index_select(3, idx)
return {'A': A, 'A_paths': A_paths, 'B': B, 'B_paths': B_paths}
|
class UnalignedDataLoader(BaseDataLoader):
def initialize(self, opt):
BaseDataLoader.initialize(self, opt)
transformations = [transforms.Scale(opt.loadSize), transforms.RandomCrop(opt.fineSize), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]
transform = transforms.Compose(transformations)
dataset_A = ImageFolder(root=(((opt.dataroot + '/') + opt.phase) + 'A'), transform=transform, return_paths=True)
data_loader_A = torch.utils.data.DataLoader(dataset_A, batch_size=self.opt.batchSize, shuffle=(not self.opt.serial_batches), num_workers=int(self.opt.nThreads))
dataset_B = ImageFolder(root=(((opt.dataroot + '/') + opt.phase) + 'B'), transform=transform, return_paths=True)
data_loader_B = torch.utils.data.DataLoader(dataset_B, batch_size=self.opt.batchSize, shuffle=(not self.opt.serial_batches), num_workers=int(self.opt.nThreads))
self.dataset_A = dataset_A
self.dataset_B = dataset_B
flip = (opt.isTrain and (not opt.no_flip))
self.paired_data = PairedData(data_loader_A, data_loader_B, self.opt.max_dataset_size, flip)
def name(self):
return 'UnalignedDataLoader'
def load_data(self):
return self.paired_data
def __len__(self):
return min(max(len(self.dataset_A), len(self.dataset_B)), self.opt.max_dataset_size)
|
class DistanceGANModel(CycleGANModel):
def __init__(self, dataset):
super(CycleGANModel, self).__init__()
self.dataset = dataset
def name(self):
return 'DistanceGANModel'
def initialize(self, opt):
CycleGANModel.initialize(self, opt)
self.use_self_distance = opt.use_self_distance
self.normalize_distances = (not opt.unnormalized_distances)
if (self.isTrain and self.normalize_distances):
self.set_expectation_and_std()
def distance(self, A, B):
return torch.mean(torch.abs((A - B)))
def get_individual_distance_loss(self, A_i, A_j, AB_i, AB_j, B_i, B_j, BA_i, BA_j):
distance_in_A = self.distance(A_i, A_j)
distance_in_AB = self.distance(AB_i, AB_j)
distance_in_B = self.distance(B_i, B_j)
distance_in_BA = self.distance(BA_i, BA_j)
if self.normalize_distances:
distance_in_A = ((distance_in_A - self.expectation_A) / self.std_A)
distance_in_AB = ((distance_in_AB - self.expectation_B) / self.std_B)
distance_in_B = ((distance_in_B - self.expectation_B) / self.std_B)
distance_in_BA = ((distance_in_BA - self.expectation_A) / self.std_A)
return (torch.abs((distance_in_A - distance_in_AB)), torch.abs((distance_in_B - distance_in_BA)))
def get_self_distances(self):
(A_half_1, A_half_2) = torch.chunk(self.real_A, 2, dim=2)
(B_half_1, B_half_2) = torch.chunk(self.real_B, 2, dim=2)
(AB_half_1, AB_half_2) = torch.chunk(self.fake_B, 2, dim=2)
(BA_half_1, BA_half_2) = torch.chunk(self.fake_A, 2, dim=2)
(l_distance_A, l_distance_B) = self.get_individual_distance_loss(A_half_1, A_half_2, AB_half_1, AB_half_2, B_half_1, B_half_2, BA_half_1, BA_half_2)
return (l_distance_A, l_distance_B)
def get_distance_losses(self):
As = torch.split(self.real_A, 1)
Bs = torch.split(self.real_B, 1)
ABs = torch.split(self.fake_B, 1)
BAs = torch.split(self.fake_A, 1)
loss_distance_A = 0.0
loss_distance_B = 0.0
num_pairs = 0
min_length = min(len(As), len(Bs))
for i in xrange((min_length - 1)):
for j in xrange((i + 1), min_length):
num_pairs += 1
(loss_distance_A_ij, loss_distance_B_ij) = self.get_individual_distance_loss(As[i], As[j], ABs[i], ABs[j], Bs[i], Bs[j], BAs[i], BAs[j])
loss_distance_A += loss_distance_A_ij
loss_distance_B += loss_distance_B_ij
loss_distance_A = (loss_distance_A / num_pairs)
loss_distance_B = (loss_distance_B / num_pairs)
return (loss_distance_A, loss_distance_B)
def get_std(self, num_items, vars, expectation):
num_pairs = 0
std_sum = 0.0
if self.use_self_distance:
for i in xrange(num_items):
(var_half_1, var_half_2) = torch.chunk(vars[i], 2, dim=2)
std_sum += np.square((self.as_np(self.distance(var_half_1, var_half_2)) - expectation))
return np.sqrt((std_sum / num_items))
for i in xrange((num_items - 1)):
for j in xrange((i + 1), num_items):
num_pairs += 1
std_sum += np.square((self.as_np(self.distance(vars[i], vars[j])) - expectation))
return np.sqrt((std_sum / num_pairs))
def get_expectation(self, num_items, vars):
num_pairs = 0
distance_sum = 0.0
if self.use_self_distance:
for i in xrange(num_items):
(var_half_1, var_half_2) = torch.chunk(vars[i], 2, dim=2)
distance_sum += self.as_np(self.distance(var_half_1, var_half_2))
return (distance_sum / num_items)
for i in xrange((num_items - 1)):
for j in xrange((i + 1), num_items):
num_pairs += 1
distance_sum += self.as_np(self.distance(vars[i], vars[j]))
return (distance_sum / num_pairs)
def set_expectation_and_std(self):
max_items = self.opt.max_items
A_vars = []
B_vars = []
num_vars_A = 0
num_vars_B = 0
for (i, data) in enumerate(self.dataset):
if ((self.dataset.stop_A and self.dataset.stop_B) or (i >= max_items)):
break
if (not self.dataset.stop_A):
A = Variable(data['A'], volatile=True)
if (A.size()[0] != self.opt.batchSize):
continue
A_vars.append(A)
num_vars_A += 1
if (not self.dataset.stop_B):
B = Variable(data['B'], volatile=True)
if (B.size()[0] != self.opt.batchSize):
continue
B_vars.append(B)
num_vars_B += 1
self.expectation_A = self.get_expectation(num_vars_A, A_vars)[0].astype(float)
self.expectation_B = self.get_expectation(num_vars_B, B_vars)[0].astype(float)
self.std_A = self.get_std(num_vars_A, A_vars, self.expectation_A)[0].astype(float)
self.std_B = self.get_std(num_vars_B, B_vars, self.expectation_B)[0].astype(float)
print(('Expectation for dataset A: %f' % self.expectation_A))
print(('Expectation for dataset B: %f' % self.expectation_B))
print(('Std for dataset A: %f' % self.std_A))
print(('Std for dataset B: %f' % self.std_B))
def backward_G(self):
self.fake_B = self.netG_A.forward(self.real_A)
pred_fake = self.netD_A.forward(self.fake_B)
self.loss_G_A = self.criterionGAN(pred_fake, True)
self.fake_A = self.netG_B.forward(self.real_B)
pred_fake = self.netD_B.forward(self.fake_A)
self.loss_G_B = self.criterionGAN(pred_fake, True)
if self.use_self_distance:
(self.loss_distance_A, self.loss_distance_B) = self.get_self_distances()
else:
(self.loss_distance_A, self.loss_distance_B) = self.get_distance_losses()
self.loss_distance_A *= self.opt.lambda_distance_A
self.loss_distance_B *= self.opt.lambda_distance_B
if self.A_to_B:
self.loss_G = (self.loss_G_A + self.loss_distance_A)
elif self.B_to_A:
self.loss_G = (self.loss_G_B + self.loss_distance_B)
else:
self.loss_G = (((self.loss_G_A + self.loss_G_B) + self.loss_distance_A) + self.loss_distance_B)
if self.opt.use_cycle_loss:
self.rec_A = self.netG_B.forward(self.fake_B)
self.loss_cycle_A = (self.criterionCycle(self.rec_A, self.real_A) * self.opt.lambda_A)
self.rec_B = self.netG_A.forward(self.fake_A)
self.loss_cycle_B = (self.criterionCycle(self.rec_B, self.real_B) * self.opt.lambda_B)
self.loss_G += (self.loss_cycle_A + self.loss_cycle_B)
self.loss_G.backward()
def get_current_errors(self):
if self.opt.use_cycle_loss:
return CycleGANModel.get_current_errors(self)
D_A = self.loss_D_A.data[0]
G_A = self.loss_G_A.data[0]
Dist_A = self.loss_distance_A.data[0]
D_B = self.loss_D_B.data[0]
G_B = self.loss_G_B.data[0]
Dist_B = self.loss_distance_B.data[0]
if self.A_to_B:
return OrderedDict([('D_A', D_A), ('G_A', G_A), ('Dist_A', Dist_A)])
elif self.B_to_A:
return OrderedDict([('D_B', D_B), ('G_B', G_B), ('Dist_B', Dist_B)])
return OrderedDict([('D_A', D_A), ('G_A', G_A), ('Dist_A', Dist_A), ('D_B', D_B), ('G_B', G_B), ('Dist_B', Dist_B)])
def get_current_visuals(self):
if self.opt.use_cycle_loss:
return CycleGANModel.get_current_visuals(self)
real_A = util.tensor2im(self.real_A.data)
fake_B = util.tensor2im(self.fake_B.data)
real_B = util.tensor2im(self.real_B.data)
fake_A = util.tensor2im(self.fake_A.data)
if self.A_to_B:
return OrderedDict([('real_A', real_A), ('fake_B', fake_B)])
elif self.B_to_A:
return OrderedDict([('real_B', real_B), ('fake_A', fake_A)])
return OrderedDict([('real_A', real_A), ('fake_B', fake_B), ('real_B', real_B), ('fake_A', fake_A)])
def test(self):
if self.opt.use_cycle_loss:
return CycleGANModel.test(self)
self.real_A = Variable(self.input_A, volatile=True)
self.fake_B = self.netG_A.forward(self.real_A)
self.real_B = Variable(self.input_B, volatile=True)
self.fake_A = self.netG_B.forward(self.real_B)
|
class GANModel(BaseModel):
def name(self):
return 'GANModel'
def initialize(self, opt):
BaseModel.initialize(self, opt)
self.A_to_B = opt.A_to_B
self.B_to_A = opt.B_to_A
nb = opt.batchSize
size = opt.fineSize
self.input_A = self.Tensor(nb, opt.input_nc, size, size)
self.input_B = self.Tensor(nb, opt.output_nc, size, size)
self.netG_A = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf, opt.which_model_netG, opt.norm, opt.use_dropout, self.gpu_ids)
self.netG_B = networks.define_G(opt.output_nc, opt.input_nc, opt.ngf, opt.which_model_netG, opt.norm, opt.use_dropout, self.gpu_ids)
if self.isTrain:
use_sigmoid = opt.no_lsgan
self.netD_A = networks.define_D(opt.output_nc, opt.ndf, opt.which_model_netD, opt.n_layers_D, opt.norm, use_sigmoid, self.gpu_ids)
self.netD_B = networks.define_D(opt.input_nc, opt.ndf, opt.which_model_netD, opt.n_layers_D, opt.norm, use_sigmoid, self.gpu_ids)
if ((not self.isTrain) or opt.continue_train):
which_epoch = opt.which_epoch
self.load_network(self.netG_A, 'G_A', which_epoch)
self.load_network(self.netG_B, 'G_B', which_epoch)
if self.isTrain:
self.load_network(self.netD_A, 'D_A', which_epoch)
self.load_network(self.netD_B, 'D_B', which_epoch)
if self.isTrain:
self.old_lr = opt.lr
self.fake_A_pool = ImagePool(opt.pool_size)
self.fake_B_pool = ImagePool(opt.pool_size)
self.criterionGAN = networks.GANLoss(use_lsgan=(not opt.no_lsgan), tensor=self.Tensor)
self.optimizer_G = torch.optim.Adam(itertools.chain(self.netG_A.parameters(), self.netG_B.parameters()), lr=opt.lr, betas=(opt.beta1, 0.999))
self.optimizer_D_A = torch.optim.Adam(self.netD_A.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
self.optimizer_D_B = torch.optim.Adam(self.netD_B.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
print('---------- Networks initialized -------------')
networks.print_network(self.netG_A)
networks.print_network(self.netG_B)
networks.print_network(self.netD_A)
networks.print_network(self.netD_B)
print('-----------------------------------------------')
def set_input(self, input):
AtoB = (self.opt.which_direction == 'AtoB')
input_A = input[('A' if AtoB else 'B')]
input_B = input[('B' if AtoB else 'A')]
self.input_A.resize_(input_A.size()).copy_(input_A)
self.input_B.resize_(input_B.size()).copy_(input_B)
self.image_paths = input[('A_paths' if AtoB else 'B_paths')]
def forward(self):
self.real_A = Variable(self.input_A)
self.real_B = Variable(self.input_B)
def test(self):
self.real_A = Variable(self.input_A, volatile=True)
self.fake_B = self.netG_A.forward(self.real_A)
self.real_B = Variable(self.input_B, volatile=True)
self.fake_A = self.netG_B.forward(self.real_B)
def backward_D_basic(self, netD, real, fake):
pred_real = netD.forward(real)
loss_D_real = self.criterionGAN(pred_real, True)
pred_fake = netD.forward(fake.detach())
loss_D_fake = self.criterionGAN(pred_fake, False)
loss_D = ((loss_D_real + loss_D_fake) * 0.5)
loss_D.backward()
return loss_D
def backward_D_A(self):
fake_B = self.fake_B_pool.query(self.fake_B)
self.loss_D_A = self.backward_D_basic(self.netD_A, self.real_B, fake_B)
def backward_D_B(self):
fake_A = self.fake_A_pool.query(self.fake_A)
self.loss_D_B = self.backward_D_basic(self.netD_B, self.real_A, fake_A)
def backward_G(self):
self.fake_B = self.netG_A.forward(self.real_A)
pred_fake = self.netD_A.forward(self.fake_B)
self.loss_G_A = self.criterionGAN(pred_fake, True)
self.fake_A = self.netG_B.forward(self.real_B)
pred_fake = self.netD_B.forward(self.fake_A)
self.loss_G_B = self.criterionGAN(pred_fake, True)
if self.A_to_B:
self.loss_G = self.loss_G_A
elif self.B_to_A:
self.loss_G = self.loss_G_B
else:
self.loss_G = (self.loss_G_A + self.loss_G_B)
self.loss_G.backward()
def optimize_parameters(self):
self.forward()
self.optimizer_G.zero_grad()
self.backward_G()
self.optimizer_G.step()
if (not self.B_to_A):
self.optimizer_D_A.zero_grad()
self.backward_D_A()
self.optimizer_D_A.step()
if (not self.A_to_B):
self.optimizer_D_B.zero_grad()
self.backward_D_B()
self.optimizer_D_B.step()
def get_current_errors(self):
D_A = self.loss_D_A.data[0]
G_A = self.loss_G_A.data[0]
D_B = self.loss_D_B.data[0]
G_B = self.loss_G_B.data[0]
return OrderedDict([('D_A', D_A), ('G_A', G_A), ('D_B', D_B), ('G_B', G_B)])
def get_current_visuals(self):
real_A = util.tensor2im(self.real_A.data)
fake_B = util.tensor2im(self.fake_B.data)
real_B = util.tensor2im(self.real_B.data)
fake_A = util.tensor2im(self.fake_A.data)
return OrderedDict([('real_A', real_A), ('fake_B', fake_B), ('real_B', real_B), ('fake_A', fake_A)])
def save(self, label):
self.save_network(self.netG_A, 'G_A', label, self.gpu_ids)
self.save_network(self.netD_A, 'D_A', label, self.gpu_ids)
self.save_network(self.netG_B, 'G_B', label, self.gpu_ids)
self.save_network(self.netD_B, 'D_B', label, self.gpu_ids)
def update_learning_rate(self):
lrd = (self.opt.lr / self.opt.niter_decay)
lr = (self.old_lr - lrd)
for param_group in self.optimizer_D_A.param_groups:
param_group['lr'] = lr
for param_group in self.optimizer_D_B.param_groups:
param_group['lr'] = lr
for param_group in self.optimizer_G.param_groups:
param_group['lr'] = lr
print(('update learning rate: %f -> %f' % (self.old_lr, lr)))
self.old_lr = lr
def get_image_paths(self):
return self.image_paths
|
def get_loader(config):
'Builds and returns Dataloader for MNIST and SVHN dataset.'
transform = transforms.Compose([transforms.Scale(config.image_size), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
svhn = datasets.SVHN(root=config.svhn_path, download=True, transform=transform, split='train')
mnist = datasets.MNIST(root=config.mnist_path, download=True, transform=transform, train=True)
svhn_test = datasets.SVHN(root=config.svhn_path, download=True, transform=transform, split='test')
mnist_test = datasets.MNIST(root=config.mnist_path, download=True, transform=transform, train=False)
svhn_loader = torch.utils.data.DataLoader(dataset=svhn, batch_size=config.batch_size, shuffle=True, num_workers=config.num_workers)
mnist_loader = torch.utils.data.DataLoader(dataset=mnist, batch_size=config.batch_size, shuffle=True, num_workers=config.num_workers)
svhn_test_loader = torch.utils.data.DataLoader(dataset=svhn_test, batch_size=config.batch_size, shuffle=False, num_workers=config.num_workers)
mnist_test_loader = torch.utils.data.DataLoader(dataset=mnist_test, batch_size=config.batch_size, shuffle=False, num_workers=config.num_workers)
return (svhn_loader, mnist_loader, svhn_test_loader, mnist_test_loader)
|
def str2bool(v):
return (v.lower() in 'true')
|
def main(config):
(svhn_loader, mnist_loader, svhn_test_loader, mnist_test_loader) = get_loader(config)
solver = Solver(config, svhn_loader, mnist_loader)
cudnn.benchmark = True
if (not os.path.exists(config.model_path)):
os.makedirs(config.model_path)
if (not os.path.exists(config.sample_path)):
os.makedirs(config.sample_path)
if (config.mode == 'train'):
solver.train(svhn_test_loader, mnist_test_loader)
elif (config.mode == 'sample'):
solver.sample()
|
def deconv(c_in, c_out, k_size, stride=2, pad=1, bn=True):
'Custom deconvolutional layer for simplicity.'
layers = []
layers.append(nn.ConvTranspose2d(c_in, c_out, k_size, stride, pad, bias=False))
if bn:
layers.append(nn.BatchNorm2d(c_out))
return nn.Sequential(*layers)
|
def conv(c_in, c_out, k_size, stride=2, pad=1, bn=True):
'Custom convolutional layer for simplicity.'
layers = []
layers.append(nn.Conv2d(c_in, c_out, k_size, stride, pad, bias=False))
if bn:
layers.append(nn.BatchNorm2d(c_out))
return nn.Sequential(*layers)
|
class G12(nn.Module):
'Generator for transfering from mnist to svhn'
def __init__(self, conf, conv_dim=64, svhn_input=None):
super(G12, self).__init__()
self.config = conf
self.conv1 = conv(1, conv_dim, 4)
self.conv2 = conv(conv_dim, (conv_dim * 2), 4)
self.conv3 = conv((conv_dim * 2), (conv_dim * 2), 3, 1, 1)
self.conv4 = conv((conv_dim * 2), (conv_dim * 2), 3, 1, 1)
self.deconv1 = deconv((conv_dim * 2), conv_dim, 4)
self.deconv2 = deconv(conv_dim, 3, 4, bn=False)
def forward(self, x):
out_1 = F.leaky_relu(self.conv1(x), 0.05)
out_2 = F.leaky_relu(self.conv2(out_1), 0.05)
out_3 = F.leaky_relu(self.conv3(out_2), 0.05)
out_4 = F.leaky_relu(self.conv4(out_3), 0.05)
out_5 = F.leaky_relu(self.deconv1(out_4), 0.05)
out = F.tanh(self.deconv2(out_5))
return out
|
class G21(nn.Module):
'Generator for transfering from svhn to mnist'
def __init__(self, conf, conv_dim=64, svhn_input=None):
super(G21, self).__init__()
self.config = conf
self.conv1 = conv(3, conv_dim, 4)
self.conv2 = conv(conv_dim, (conv_dim * 2), 4)
self.conv3 = conv((conv_dim * 2), (conv_dim * 2), 3, 1, 1)
self.conv4 = conv((conv_dim * 2), (conv_dim * 2), 3, 1, 1)
self.deconv1 = deconv((conv_dim * 2), conv_dim, 4)
self.deconv2 = deconv(conv_dim, 1, 4, bn=False)
def forward(self, x):
out_1 = F.leaky_relu(self.conv1(x), 0.05)
out_2 = F.leaky_relu(self.conv2(out_1), 0.05)
out_3 = F.leaky_relu(self.conv3(out_2), 0.05)
out_4 = F.leaky_relu(self.conv4(out_3), 0.05)
out_5 = F.leaky_relu(self.deconv1(out_4), 0.05)
out = F.tanh(self.deconv2(out_5))
return out
|
class D1(nn.Module):
'Discriminator for mnist.'
def __init__(self, conv_dim=64):
super(D1, self).__init__()
self.conv1 = conv(1, conv_dim, 4, bn=False)
self.conv2 = conv(conv_dim, (conv_dim * 2), 4)
self.conv3 = conv((conv_dim * 2), (conv_dim * 4), 4)
self.fc = conv((conv_dim * 4), 1, 4, 1, 0, False)
def forward(self, x):
out = F.leaky_relu(self.conv1(x), 0.05)
out = F.leaky_relu(self.conv2(out), 0.05)
out = F.leaky_relu(self.conv3(out), 0.05)
out = self.fc(out).squeeze()
return out
|
class D2(nn.Module):
'Discriminator for svhn.'
def __init__(self, conv_dim=64):
super(D2, self).__init__()
self.conv1 = conv(3, conv_dim, 4, bn=False)
self.conv2 = conv(conv_dim, (conv_dim * 2), 4)
self.conv3 = conv((conv_dim * 2), (conv_dim * 4), 4)
self.fc = conv((conv_dim * 4), 1, 4, 1, 0, False)
def forward(self, x):
out = F.leaky_relu(self.conv1(x), 0.05)
out = F.leaky_relu(self.conv2(out), 0.05)
out = F.leaky_relu(self.conv3(out), 0.05)
out = self.fc(out).squeeze()
return out
|
def create_model(opt, dataset=None):
print(opt.model)
if (opt.model == 'gan'):
from .gan_model import GANModel
model = GANModel()
elif (opt.model == 'cycle_gan'):
from .cycle_gan_model import CycleGANModel
model = CycleGANModel()
elif (opt.model == 'distance_gan'):
from .distance_gan_model import DistanceGANModel
model = DistanceGANModel(dataset)
else:
raise ValueError(('Model [%s] not recognized.' % opt.model))
model.initialize(opt)
print(('model [%s] was created' % model.name()))
return model
|
def weights_init(m):
classname = m.__class__.__name__
if (classname.find('Conv') != (- 1)):
m.weight.data.normal_(0.0, 0.02)
elif ((classname.find('BatchNorm2d') != (- 1)) or (classname.find('InstanceNorm2d') != (- 1))):
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
|
def get_norm_layer(norm_type):
if (norm_type == 'batch'):
norm_layer = nn.BatchNorm2d
elif (norm_type == 'instance'):
norm_layer = nn.InstanceNorm2d
else:
print(('normalization layer [%s] is not found' % norm))
return norm_layer
|
def define_G(input_nc, output_nc, ngf, which_model_netG, norm='batch', use_dropout=False, gpu_ids=[]):
netG = None
use_gpu = (len(gpu_ids) > 0)
norm_layer = get_norm_layer(norm_type=norm)
if use_gpu:
assert torch.cuda.is_available()
if (which_model_netG == 'resnet_9blocks'):
netG = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=9, gpu_ids=gpu_ids)
elif (which_model_netG == 'resnet_6blocks'):
netG = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=6, gpu_ids=gpu_ids)
elif (which_model_netG == 'unet_128'):
netG = UnetGenerator(input_nc, output_nc, 7, ngf, norm_layer=norm_layer, use_dropout=use_dropout, gpu_ids=gpu_ids)
elif (which_model_netG == 'unet_256'):
netG = UnetGenerator(input_nc, output_nc, 8, ngf, norm_layer=norm_layer, use_dropout=use_dropout, gpu_ids=gpu_ids)
else:
print(('Generator model name [%s] is not recognized' % which_model_netG))
if (len(gpu_ids) > 0):
netG.cuda(device_id=gpu_ids[0])
netG.apply(weights_init)
return netG
|
def define_D(input_nc, ndf, which_model_netD, n_layers_D=3, norm='batch', use_sigmoid=False, gpu_ids=[]):
netD = None
use_gpu = (len(gpu_ids) > 0)
norm_layer = get_norm_layer(norm_type=norm)
if use_gpu:
assert torch.cuda.is_available()
if (which_model_netD == 'basic'):
netD = NLayerDiscriminator(input_nc, ndf, n_layers=3, norm_layer=norm_layer, use_sigmoid=use_sigmoid, gpu_ids=gpu_ids)
elif (which_model_netD == 'n_layers'):
netD = NLayerDiscriminator(input_nc, ndf, n_layers_D, norm_layer=norm_layer, use_sigmoid=use_sigmoid, gpu_ids=gpu_ids)
else:
print(('Discriminator model name [%s] is not recognized' % which_model_netD))
if use_gpu:
netD.cuda(device_id=gpu_ids[0])
netD.apply(weights_init)
return netD
|
def print_network(net):
num_params = 0
for param in net.parameters():
num_params += param.numel()
print(net)
print(('Total number of parameters: %d' % num_params))
|
class GANLoss(nn.Module):
def __init__(self, use_lsgan=True, target_real_label=1.0, target_fake_label=0.0, tensor=torch.FloatTensor):
super(GANLoss, self).__init__()
self.real_label = target_real_label
self.fake_label = target_fake_label
self.real_label_var = None
self.fake_label_var = None
self.Tensor = tensor
if use_lsgan:
self.loss = nn.MSELoss()
else:
self.loss = nn.BCELoss()
def get_target_tensor(self, input, target_is_real):
target_tensor = None
if target_is_real:
create_label = ((self.real_label_var is None) or (self.real_label_var.numel() != input.numel()))
if create_label:
real_tensor = self.Tensor(input.size()).fill_(self.real_label)
self.real_label_var = Variable(real_tensor, requires_grad=False)
target_tensor = self.real_label_var
else:
create_label = ((self.fake_label_var is None) or (self.fake_label_var.numel() != input.numel()))
if create_label:
fake_tensor = self.Tensor(input.size()).fill_(self.fake_label)
self.fake_label_var = Variable(fake_tensor, requires_grad=False)
target_tensor = self.fake_label_var
return target_tensor
def __call__(self, input, target_is_real):
target_tensor = self.get_target_tensor(input, target_is_real)
return self.loss(input, target_tensor)
|
class ResnetGenerator(nn.Module):
def __init__(self, input_nc, output_nc, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False, n_blocks=6, gpu_ids=[]):
assert (n_blocks >= 0)
super(ResnetGenerator, self).__init__()
self.input_nc = input_nc
self.output_nc = output_nc
self.ngf = ngf
self.gpu_ids = gpu_ids
model = [nn.Conv2d(input_nc, ngf, kernel_size=7, padding=3), norm_layer(ngf, affine=True), nn.ReLU(True)]
n_downsampling = 2
for i in range(n_downsampling):
mult = (2 ** i)
model += [nn.Conv2d((ngf * mult), ((ngf * mult) * 2), kernel_size=3, stride=2, padding=1), norm_layer(((ngf * mult) * 2), affine=True), nn.ReLU(True)]
mult = (2 ** n_downsampling)
for i in range(n_blocks):
model += [ResnetBlock((ngf * mult), 'zero', norm_layer=norm_layer, use_dropout=use_dropout)]
for i in range(n_downsampling):
mult = (2 ** (n_downsampling - i))
model += [nn.ConvTranspose2d((ngf * mult), int(((ngf * mult) / 2)), kernel_size=3, stride=2, padding=1, output_padding=1), norm_layer(int(((ngf * mult) / 2)), affine=True), nn.ReLU(True)]
model += [nn.Conv2d(ngf, output_nc, kernel_size=7, padding=3)]
model += [nn.Tanh()]
self.model = nn.Sequential(*model)
def forward(self, input):
if (self.gpu_ids and isinstance(input.data, torch.cuda.FloatTensor)):
return nn.parallel.data_parallel(self.model, input, self.gpu_ids)
else:
return self.model(input)
|
class ResnetBlock(nn.Module):
def __init__(self, dim, padding_type, norm_layer, use_dropout):
super(ResnetBlock, self).__init__()
self.conv_block = self.build_conv_block(dim, padding_type, norm_layer, use_dropout)
def build_conv_block(self, dim, padding_type, norm_layer, use_dropout):
conv_block = []
p = 0
assert (padding_type == 'zero')
p = 1
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p), norm_layer(dim, affine=True), nn.ReLU(True)]
if use_dropout:
conv_block += [nn.Dropout(0.5)]
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p), norm_layer(dim, affine=True)]
return nn.Sequential(*conv_block)
def forward(self, x):
out = (x + self.conv_block(x))
return out
|
class UnetGenerator(nn.Module):
def __init__(self, input_nc, output_nc, num_downs, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False, gpu_ids=[]):
super(UnetGenerator, self).__init__()
self.gpu_ids = gpu_ids
assert (input_nc == output_nc)
unet_block = UnetSkipConnectionBlock((ngf * 8), (ngf * 8), innermost=True)
for i in range((num_downs - 5)):
unet_block = UnetSkipConnectionBlock((ngf * 8), (ngf * 8), unet_block, norm_layer=norm_layer, use_dropout=use_dropout)
unet_block = UnetSkipConnectionBlock((ngf * 4), (ngf * 8), unet_block, norm_layer=norm_layer)
unet_block = UnetSkipConnectionBlock((ngf * 2), (ngf * 4), unet_block, norm_layer=norm_layer)
unet_block = UnetSkipConnectionBlock(ngf, (ngf * 2), unet_block, norm_layer=norm_layer)
unet_block = UnetSkipConnectionBlock(output_nc, ngf, unet_block, outermost=True, norm_layer=norm_layer)
self.model = unet_block
def forward(self, input):
if (self.gpu_ids and isinstance(input.data, torch.cuda.FloatTensor)):
return nn.parallel.data_parallel(self.model, input, self.gpu_ids)
else:
return self.model(input)
|
class UnetSkipConnectionBlock(nn.Module):
def __init__(self, outer_nc, inner_nc, submodule=None, outermost=False, innermost=False, norm_layer=nn.BatchNorm2d, use_dropout=False):
super(UnetSkipConnectionBlock, self).__init__()
self.outermost = outermost
downconv = nn.Conv2d(outer_nc, inner_nc, kernel_size=4, stride=2, padding=1)
downrelu = nn.LeakyReLU(0.2, True)
downnorm = norm_layer(inner_nc, affine=True)
uprelu = nn.ReLU(True)
upnorm = norm_layer(outer_nc, affine=True)
if outermost:
upconv = nn.ConvTranspose2d((inner_nc * 2), outer_nc, kernel_size=4, stride=2, padding=1)
down = [downconv]
up = [uprelu, upconv, nn.Tanh()]
model = ((down + [submodule]) + up)
elif innermost:
upconv = nn.ConvTranspose2d(inner_nc, outer_nc, kernel_size=4, stride=2, padding=1)
down = [downrelu, downconv]
up = [uprelu, upconv, upnorm]
model = (down + up)
else:
upconv = nn.ConvTranspose2d((inner_nc * 2), outer_nc, kernel_size=4, stride=2, padding=1)
down = [downrelu, downconv, downnorm]
up = [uprelu, upconv, upnorm]
if use_dropout:
model = (((down + [submodule]) + up) + [nn.Dropout(0.5)])
else:
model = ((down + [submodule]) + up)
self.model = nn.Sequential(*model)
def forward(self, x):
if self.outermost:
return self.model(x)
else:
return torch.cat([self.model(x), x], 1)
|
class NLayerDiscriminator(nn.Module):
def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d, use_sigmoid=False, gpu_ids=[]):
super(NLayerDiscriminator, self).__init__()
self.gpu_ids = gpu_ids
kw = 4
padw = int(np.ceil(((kw - 1) / 2)))
sequence = [nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw), nn.LeakyReLU(0.2, True)]
nf_mult = 1
nf_mult_prev = 1
for n in range(1, n_layers):
nf_mult_prev = nf_mult
nf_mult = min((2 ** n), 8)
sequence += [nn.Conv2d((ndf * nf_mult_prev), (ndf * nf_mult), kernel_size=kw, stride=2, padding=padw), norm_layer((ndf * nf_mult), affine=True), nn.LeakyReLU(0.2, True)]
nf_mult_prev = nf_mult
nf_mult = min((2 ** n_layers), 8)
sequence += [nn.Conv2d((ndf * nf_mult_prev), (ndf * nf_mult), kernel_size=kw, stride=1, padding=padw), norm_layer((ndf * nf_mult), affine=True), nn.LeakyReLU(0.2, True)]
sequence += [nn.Conv2d((ndf * nf_mult), 1, kernel_size=kw, stride=1, padding=padw)]
if use_sigmoid:
sequence += [nn.Sigmoid()]
self.model = nn.Sequential(*sequence)
def forward(self, input):
if (len(self.gpu_ids) and isinstance(input.data, torch.cuda.FloatTensor)):
return nn.parallel.data_parallel(self.model, input, self.gpu_ids)
else:
return self.model(input)
|
class HTML():
def __init__(self, web_dir, title, reflesh=0):
self.title = title
self.web_dir = web_dir
self.img_dir = os.path.join(self.web_dir, 'images')
if (not os.path.exists(self.web_dir)):
os.makedirs(self.web_dir)
if (not os.path.exists(self.img_dir)):
os.makedirs(self.img_dir)
self.doc = dominate.document(title=title)
if (reflesh > 0):
with self.doc.head:
meta(http_equiv='reflesh', content=str(reflesh))
def get_image_dir(self):
return self.img_dir
def add_header(self, str):
with self.doc:
h3(str)
def add_table(self, border=1):
self.t = table(border=border, style='table-layout: fixed;')
self.doc.add(self.t)
def add_images(self, ims, txts, links, width=400):
self.add_table()
with self.t:
with tr():
for (im, txt, link) in zip(ims, txts, links):
with td(style='word-wrap: break-word;', halign='center', valign='top'):
with p():
with a(href=os.path.join('images', link)):
img(style=('width:%dpx' % width), src=os.path.join('images', im))
br()
p(txt)
def save(self):
html_file = ('%s/index.html' % self.web_dir)
f = open(html_file, 'wt')
f.write(self.doc.render())
f.close()
|
class ImagePool():
def __init__(self, pool_size):
self.pool_size = pool_size
if (self.pool_size > 0):
self.num_imgs = 0
self.images = []
def query(self, images):
if (self.pool_size == 0):
return images
return_images = []
for image in images.data:
image = torch.unsqueeze(image, 0)
if (self.num_imgs < self.pool_size):
self.num_imgs = (self.num_imgs + 1)
self.images.append(image)
return_images.append(image)
else:
p = random.uniform(0, 1)
if (p > 0.5):
random_id = random.randint(0, (self.pool_size - 1))
tmp = self.images[random_id].clone()
self.images[random_id] = image
return_images.append(tmp)
else:
return_images.append(image)
return_images = Variable(torch.cat(return_images, 0))
return return_images
|
def encode(buf, width, height):
' buf: must be bytes or a bytearray in py3, a regular string in py2. formatted RGBRGB... '
assert (((width * height) * 3) == len(buf))
bpp = 3
def raw_data():
row_bytes = (width * bpp)
for row_start in range((((height - 1) * width) * bpp), (- 1), (- row_bytes)):
(yield b'\x00')
(yield buf[row_start:(row_start + row_bytes)])
def chunk(tag, data):
return [struct.pack('!I', len(data)), tag, data, struct.pack('!I', (4294967295 & zlib.crc32(data, zlib.crc32(tag))))]
SIGNATURE = b'\x89PNG\r\n\x1a\n'
COLOR_TYPE_RGB = 2
COLOR_TYPE_RGBA = 6
bit_depth = 8
return b''.join(((([SIGNATURE] + chunk(b'IHDR', struct.pack('!2I5B', width, height, bit_depth, COLOR_TYPE_RGB, 0, 0, 0))) + chunk(b'IDAT', zlib.compress(b''.join(raw_data()), 9))) + chunk(b'IEND', b'')))
|
def tensor2im(image_tensor, imtype=np.uint8):
image_numpy = image_tensor[0].cpu().float().numpy()
image_numpy = (((np.transpose(image_numpy, (1, 2, 0)) + 1) / 2.0) * 255.0)
return image_numpy.astype(imtype)
|
def diagnose_network(net, name='network'):
mean = 0.0
count = 0
for param in net.parameters():
if (param.grad is not None):
mean += torch.mean(torch.abs(param.grad.data))
count += 1
if (count > 0):
mean = (mean / count)
print(name)
print(mean)
|
def save_image(image_numpy, image_path):
image_pil = Image.fromarray(image_numpy)
image_pil.save(image_path)
|
def info(object, spacing=10, collapse=1):
'Print methods and doc strings.\n Takes module, class, list, dictionary, or string.'
methodList = [e for e in dir(object) if isinstance(getattr(object, e), collections.Callable)]
processFunc = ((collapse and (lambda s: ' '.join(s.split()))) or (lambda s: s))
print('\n'.join([('%s %s' % (method.ljust(spacing), processFunc(str(getattr(object, method).__doc__)))) for method in methodList]))
|
def varname(p):
for line in inspect.getframeinfo(inspect.currentframe().f_back)[3]:
m = re.search('\\bvarname\\s*\\(\\s*([A-Za-z_][A-Za-z0-9_]*)\\s*\\)', line)
if m:
return m.group(1)
|
def print_numpy(x, val=True, shp=False):
x = x.astype(np.float64)
if shp:
print('shape,', x.shape)
if val:
x = x.flatten()
print(('mean = %3.3f, min = %3.3f, max = %3.3f, median = %3.3f, std=%3.3f' % (np.mean(x), np.min(x), np.max(x), np.median(x), np.std(x))))
|
def mkdirs(paths):
if (isinstance(paths, list) and (not isinstance(paths, str))):
for path in paths:
mkdir(path)
else:
mkdir(paths)
|
def mkdir(path):
if (not os.path.exists(path)):
os.makedirs(path)
|
class Visualizer():
def __init__(self, opt):
self.display_id = opt.display_id
self.use_html = (opt.isTrain and (not opt.no_html))
self.win_size = opt.display_winsize
self.name = opt.name
if (self.display_id > 0):
import visdom
self.vis = visdom.Visdom()
if self.use_html:
self.web_dir = os.path.join(opt.checkpoints_dir, opt.name, 'web')
self.img_dir = os.path.join(self.web_dir, 'images')
print(('create web directory %s...' % self.web_dir))
util.mkdirs([self.web_dir, self.img_dir])
def display_current_results(self, visuals, epoch):
if (self.display_id > 0):
idx = 1
for (label, image_numpy) in visuals.items():
self.vis.image(image_numpy.transpose([2, 0, 1]), opts=dict(title=label), win=(self.display_id + idx))
idx += 1
if self.use_html:
for (label, image_numpy) in visuals.items():
img_path = os.path.join(self.img_dir, ('epoch%.3d_%s.png' % (epoch, label)))
util.save_image(image_numpy, img_path)
webpage = html.HTML(self.web_dir, ('Experiment name = %s' % self.name), reflesh=1)
for n in range(epoch, 0, (- 1)):
webpage.add_header(('epoch [%d]' % n))
ims = []
txts = []
links = []
for (label, image_numpy) in visuals.items():
img_path = ('epoch%.3d_%s.png' % (n, label))
ims.append(img_path)
txts.append(label)
links.append(img_path)
webpage.add_images(ims, txts, links, width=self.win_size)
webpage.save()
def plot_current_errors(self, epoch, counter_ratio, opt, errors):
if (not hasattr(self, 'plot_data')):
self.plot_data = {'X': [], 'Y': [], 'legend': list(errors.keys())}
self.plot_data['X'].append((epoch + counter_ratio))
self.plot_data['Y'].append([errors[k] for k in self.plot_data['legend']])
self.vis.line(X=np.stack(([np.array(self.plot_data['X'])] * len(self.plot_data['legend'])), 1), Y=np.array(self.plot_data['Y']), opts={'title': (self.name + ' loss over time'), 'legend': self.plot_data['legend'], 'xlabel': 'epoch', 'ylabel': 'loss'}, win=self.display_id)
def print_current_errors(self, epoch, i, errors, t):
message = ('(epoch: %d, iters: %d, time: %.3f) ' % (epoch, i, t))
for (k, v) in errors.items():
message += ('%s: %.3f ' % (k, v))
print(message)
def save_images(self, webpage, visuals, image_path):
image_dir = webpage.get_image_dir()
short_path = ntpath.basename(image_path[0])
name = os.path.splitext(short_path)[0]
webpage.add_header(name)
ims = []
txts = []
links = []
for (label, image_numpy) in visuals.items():
image_name = ('%s_%s.png' % (name, label))
save_path = os.path.join(image_dir, image_name)
util.save_image(image_numpy, save_path)
ims.append(image_name)
txts.append(label)
links.append(image_name)
webpage.add_images(ims, txts, links, width=self.win_size)
|
class DiscoGANAnglePairing(DiscoGAN):
def get_data(self):
if (self.args.task_name == 'car2car'):
data_A = get_cars(test=False, ver=180, half='first', image_size=self.args.image_size)
data_B = get_cars(test=False, ver=180, half='last', image_size=self.args.image_size)
test_A = test_B = get_cars(test=True, ver=180, image_size=self.args.image_size)
elif (self.args.task_name == 'face2face'):
data_A = get_faces_3d(test=False, half='first')
data_B = get_faces_3d(test=False, half='last')
test_A = test_B = get_faces_3d(test=True)
elif (self.args.task_name == 'chair2chair'):
data_A = get_chairs(test=False, half='first', ver=360)
data_B = get_chairs(test=False, half='last', ver=360)
test_A = test_B = get_chairs(test=True, ver=360)
elif (self.args.task_name == 'chair2car'):
data_A = get_chairs(test=False, half=None, ver=180)
data_B = get_cars(test=False, half=None, ver=180)
test_A = get_chairs(test=True, ver=180)
test_B = get_cars(test=True, ver=180)
elif (self.args.task_name == 'chair2face'):
data_A = get_chairs(test=False, half=None, ver=180)
data_B = get_faces_3d(test=False, half=None)
test_A = get_chairs(test=True, ver=180)
test_B = get_faces_3d(test=True)
elif (self.args.task_name == 'car2face'):
data_A = get_cars(test=False, ver=180, half=None)
data_B = get_faces_3d(test=False, half=None)
test_A = get_cars(test=True, ver=180)
test_B = get_faces_3d(test=True)
return (data_A, data_B, test_A, test_B)
def get_fm_loss(self, real_feats, fake_feats):
losses = 0
for (real_feat, fake_feat) in zip(real_feats[1:], fake_feats[1:]):
l2 = ((real_feat.mean(0) - fake_feat.mean(0)) * (real_feat.mean(0) - fake_feat.mean(0)))
loss = self.feat_criterion(l2, Variable(torch.ones(l2.size())).cuda())
losses += loss
return losses
def get_test_images(self):
if (self.args.task_name.startswith('car') and self.args.task_name.endswith('car')):
self.test_A = self.test_style_A
self.test_B = self.test_style_B
if (self.args.task_name.startswith('car') and (not self.args.task_name.endswith('car'))):
self.test_A = self.test_style_A
self.test_B = read_images(self.test_style_B, None, self.args.image_size)
if ((not self.args.task_name.startswith('car')) and (not self.args.task_name.endswith('car'))):
self.test_A = read_images(self.test_style_A, None, self.args.image_size)
self.test_B = read_images(self.test_style_B, None, self.args.image_size)
def get_images(self):
if (self.args.task_name.startswith('car') and self.args.task_name.endswith('car')):
A = self.A_path
B = self.B_path
if (self.args.task_name.startswith('car') and (not self.args.task_name.endswith('car'))):
A = self.A_path
B = read_images(self.B_path, None, self.args.image_size)
if ((not self.args.task_name.startswith('car')) and (not self.args.task_name.endswith('car'))):
A = read_images(self.A_path, None, self.args.image_size)
B = read_images(self.B_path, None, self.args.image_size)
return (A, B)
def __init__(self):
options = AnglePairingOptions()
options.initialize()
self.args = options.parser.parse_args()
|
class Options(object):
def __init__(self):
self.parser = argparse.ArgumentParser(description='PyTorch implementation of DistanceGAN based on DiscoGAN')
self.initialized = False
def initialize(self):
self.parser.add_argument('--cuda', type=str, default='true', help='Set cuda usage')
self.parser.add_argument('--task_name', type=str, default='facescrub', help='Set data name')
self.parser.add_argument('--epoch_size', type=int, default=5000, help='Set epoch size')
self.parser.add_argument('--batch_size', type=int, default=64, help='Set batch size')
self.parser.add_argument('--learning_rate', type=float, default=0.0002, help='Set learning rate for optimizer')
self.parser.add_argument('--model_arch', type=str, default='distancegan', help='choose among gan/recongan/discogan. gan - standard GAN, recongan - GAN with reconstruction, discogan - DiscoGAN.')
self.parser.add_argument('--image_size', type=int, default=64, help='Image size. 64 for every experiment in the paper')
self.parser.add_argument('--gan_curriculum', type=int, default=10000, help='Strong GAN loss for certain period at the beginning')
self.parser.add_argument('--starting_rate', type=float, default=0.01, help='Set the lambda weight between GAN loss and Recon loss during curriculum period at the beginning. We used the 0.01 weight.')
self.parser.add_argument('--default_rate', type=float, default=0.5, help='Set the lambda weight between GAN loss and Recon loss after curriculum period. We used the 0.5 weight.')
self.parser.add_argument('--style_A', type=str, default=None, help='Style for CelebA dataset. Could be any attributes in celebA (Young, Male, Blond_Hair, Wearing_Hat ...)')
self.parser.add_argument('--style_B', type=str, default=None, help='Style for CelebA dataset. Could be any attributes in celebA (Young, Male, Blond_Hair, Wearing_Hat ...)')
self.parser.add_argument('--constraint', type=str, default=None, help='Constraint for celebA dataset. Only images satisfying this constraint is used. For example, if --constraint=Male, and --constraint_type=1, only male images are used for both style/domain.')
self.parser.add_argument('--constraint_type', type=str, default=None, help='Used along with --constraint. If --constraint_type=1, only images satisfying the constraint are used. If --constraint_type=-1, only images not satisfying the constraint are used.')
self.parser.add_argument('--n_test', type=int, default=200, help='Number of test data.')
self.parser.add_argument('--update_interval', type=int, default=3, help='')
self.parser.add_argument('--log_interval', type=int, default=50, help='Print loss values every log_interval iterations.')
self.parser.add_argument('--image_save_interval', type=int, default=1000, help='Save test results every log_interval iterations.')
self.parser.add_argument('--model_save_interval', type=int, default=10000, help='Save models every log_interval iterations.')
self.parser.add_argument('--result_path', type=str, default='./results/')
self.parser.add_argument('--model_path', type=str, default='./models/')
self.parser.add_argument('--use_self_distance', action='store_true', help='use distance for top and bottom half of the image')
self.parser.add_argument('--unnormalized_distances', action='store_true', help='do not normalize distances by expecatation and std')
self.parser.add_argument('--max_items', type=int, default=900, help='maximum number of items to use for expectation and std calculation')
self.parser.add_argument('--use_reconst_loss', action='store_true', help='add reconstruction loss in addition to distance loss')
self.parser.add_argument('--num_layers', type=int, default=4, help='Number of convolutional layers in G (equal number of deconvolutional layers exist)')
|
class AnglePairingOptions(Options):
def initialize(self):
self.parser.add_argument('--cuda', type=str, default='true', help='Set cuda usage')
self.parser.add_argument('--task_name', type=str, default='car2car', help='Set data name')
self.parser.add_argument('--epoch_size', type=int, default=10000, help='Set epoch size')
self.parser.add_argument('--batch_size', type=int, default=64, help='Set batch size')
self.parser.add_argument('--learning_rate', type=float, default=0.0002, help='Set learning rate for optimizer')
self.parser.add_argument('--model_arch', type=str, default='distancegan', help='choose among gan/recongan/discogan. gan - standard GAN, recongan - GAN with reconstruction, discogan - DiscoGAN.')
self.parser.add_argument('--image_size', type=int, default=64, help='Image size. 64 for every experiment in the paper')
self.parser.add_argument('--gan_curriculum', type=int, default=10000, help='Strong GAN loss for certain period at the beginning')
self.parser.add_argument('--starting_rate', type=float, default=0.9, help='Set the lambda weight between GAN loss and Recon loss during curriculum period at the beginning. We used the 0.01 weight.')
self.parser.add_argument('--default_rate', type=float, default=0.9, help='Set the lambda weight between GAN loss and Recon loss after curriculum period. We used the 0.5 weight.')
self.parser.add_argument('--style_A', type=str, default=None, help='Style for CelebA dataset. Could be any attributes in celebA (Young, Male, Blond_Hair, Wearing_Hat ...)')
self.parser.add_argument('--style_B', type=str, default=None, help='Style for CelebA dataset. Could be any attributes in celebA (Young, Male, Blond_Hair, Wearing_Hat ...)')
self.parser.add_argument('--constraint', type=str, default=None, help='Constraint for celebA dataset. Only images satisfying this constraint is used. For example, if --constraint=Male, and --constraint_type=1, only male images are used for both style/domain.')
self.parser.add_argument('--constraint_type', type=str, default=None, help='Used along with --constraint. If --constraint_type=1, only images satisfying the constraint are used. If --constraint_type=-1, only images not satisfying the constraint are used.')
self.parser.add_argument('--n_test', type=int, default=200, help='Number of test data.')
self.parser.add_argument('--update_interval', type=int, default=3, help='')
self.parser.add_argument('--log_interval', type=int, default=50, help='Print loss values every log_interval iterations.')
self.parser.add_argument('--image_save_interval', type=int, default=500, help='Save test results every log_interval iterations.')
self.parser.add_argument('--model_save_interval', type=int, default=10000, help='Save models every log_interval iterations.')
self.parser.add_argument('--result_path', type=str, default='./results/')
self.parser.add_argument('--model_path', type=str, default='./models/')
self.parser.add_argument('--log_path', type=str, default='./logs/')
self.parser.add_argument('--use_self_distance', action='store_true', help='use distance for top and bottom half of the image')
self.parser.add_argument('--unnormalized_distances', action='store_true', help='do not normalize distances by expecatation and std')
self.parser.add_argument('--max_items', type=int, default=900, help='maximum number of items to use for expectation and std calculation')
self.parser.add_argument('--use_reconst_loss', action='store_true', help='add reconstruction loss in addition to distance loss')
self.parser.add_argument('--num_layers', type=int, default=5, help='Number of convolutional layers in G (equal number of deconvolutional layers exist)')
|
class DistanceGANAnglePairing(DistanceGAN):
def get_data(self):
if (self.args.task_name == 'car2car'):
data_A = get_cars(test=False, ver=180, half='first', image_size=self.args.image_size)
data_B = get_cars(test=False, ver=180, half='last', image_size=self.args.image_size)
test_A = test_B = get_cars(test=True, ver=180, image_size=self.args.image_size)
elif (self.args.task_name == 'face2face'):
data_A = get_faces_3d(test=False, half='first')
data_B = get_faces_3d(test=False, half='last')
test_A = test_B = get_faces_3d(test=True)
elif (self.args.task_name == 'chair2chair'):
data_A = get_chairs(test=False, half='first', ver=360)
data_B = get_chairs(test=False, half='last', ver=360)
test_A = test_B = get_chairs(test=True, ver=360)
elif (self.args.task_name == 'chair2car'):
data_A = get_chairs(test=False, half=None, ver=180)
data_B = get_cars(test=False, half=None, ver=180)
test_A = get_chairs(test=True, ver=180)
test_B = get_cars(test=True, ver=180)
elif (self.args.task_name == 'chair2face'):
data_A = get_chairs(test=False, half=None, ver=180)
data_B = get_faces_3d(test=False, half=None)
test_A = get_chairs(test=True, ver=180)
test_B = get_faces_3d(test=True)
elif (self.args.task_name == 'car2face'):
data_A = get_cars(test=False, ver=180, half=None)
data_B = get_faces_3d(test=False, half=None)
test_A = get_cars(test=True, ver=180)
test_B = get_faces_3d(test=True)
return (data_A, data_B, test_A, test_B)
def get_fm_loss(self, real_feats, fake_feats):
losses = 0
for (real_feat, fake_feat) in zip(real_feats[1:], fake_feats[1:]):
l2 = ((real_feat.mean(0) - fake_feat.mean(0)) * (real_feat.mean(0) - fake_feat.mean(0)))
loss = self.feat_criterion(l2, Variable(torch.ones(l2.size())).cuda())
losses += loss
return losses
def get_test_images(self):
if (self.args.task_name.startswith('car') and self.args.task_name.endswith('car')):
self.test_A = self.test_style_A
self.test_B = self.test_style_B
if (self.args.task_name.startswith('car') and (not self.args.task_name.endswith('car'))):
self.test_A = self.test_style_A
self.test_B = read_images(self.test_style_B, None, self.args.image_size)
if ((not self.args.task_name.startswith('car')) and (not self.args.task_name.endswith('car'))):
self.test_A = read_images(self.test_style_A, None, self.args.image_size)
self.test_B = read_images(self.test_style_B, None, self.args.image_size)
def get_images(self):
if (self.args.task_name.startswith('car') and self.args.task_name.endswith('car')):
A = self.A_path
B = self.B_path
if (self.args.task_name.startswith('car') and (not self.args.task_name.endswith('car'))):
A = self.A_path
B = read_images(self.B_path, None, self.args.image_size)
if ((not self.args.task_name.startswith('car')) and (not self.args.task_name.endswith('car'))):
A = read_images(self.A_path, None, self.args.image_size)
B = read_images(self.B_path, None, self.args.image_size)
return (A, B)
def __init__(self):
options = AnglePairingOptions()
options.initialize()
self.args = options.parser.parse_args()
|
class Discriminator(nn.Module):
def __init__(self):
super(Discriminator, self).__init__()
self.conv1 = nn.Conv2d(3, 64, 4, 2, 1, bias=False)
self.relu1 = nn.LeakyReLU(0.2, inplace=True)
self.conv2 = nn.Conv2d(64, (64 * 2), 4, 2, 1, bias=False)
self.bn2 = nn.BatchNorm2d((64 * 2))
self.relu2 = nn.LeakyReLU(0.2, inplace=True)
self.conv3 = nn.Conv2d((64 * 2), (64 * 4), 4, 2, 1, bias=False)
self.bn3 = nn.BatchNorm2d((64 * 4))
self.relu3 = nn.LeakyReLU(0.2, inplace=True)
self.conv4 = nn.Conv2d((64 * 4), (64 * 8), 4, 2, 1, bias=False)
self.bn4 = nn.BatchNorm2d((64 * 8))
self.relu4 = nn.LeakyReLU(0.2, inplace=True)
self.conv5 = nn.Conv2d((64 * 8), 1, 4, 1, 0, bias=False)
def forward(self, input):
conv1 = self.conv1(input)
relu1 = self.relu1(conv1)
conv2 = self.conv2(relu1)
bn2 = self.bn2(conv2)
relu2 = self.relu2(bn2)
conv3 = self.conv3(relu2)
bn3 = self.bn3(conv3)
relu3 = self.relu3(bn3)
conv4 = self.conv4(relu3)
bn4 = self.bn4(conv4)
relu4 = self.relu4(bn4)
conv5 = self.conv5(relu4)
return (torch.sigmoid(conv5), [relu2, relu3, relu4])
|
class Generator(nn.Module):
def __init__(self, num_layers=4):
super(Generator, self).__init__()
if (num_layers == 5):
self.main = nn.Sequential(nn.Conv2d(3, 64, 4, 2, 1, bias=False), nn.LeakyReLU(0.2, inplace=True), nn.Conv2d(64, (64 * 2), 4, 2, 1, bias=False), nn.BatchNorm2d((64 * 2)), nn.LeakyReLU(0.2, inplace=True), nn.Conv2d((64 * 2), (64 * 4), 4, 2, 1, bias=False), nn.BatchNorm2d((64 * 4)), nn.LeakyReLU(0.2, inplace=True), nn.Conv2d((64 * 4), (64 * 8), 4, 2, 1, bias=False), nn.BatchNorm2d((64 * 8)), nn.LeakyReLU(0.2, inplace=True), nn.Conv2d((64 * 8), 100, 4, 1, 0, bias=False), nn.BatchNorm2d(100), nn.LeakyReLU(0.2, inplace=True), nn.ConvTranspose2d(100, (64 * 8), 4, 1, 0, bias=False), nn.BatchNorm2d((64 * 8)), nn.ReLU(True), nn.ConvTranspose2d((64 * 8), (64 * 4), 4, 2, 1, bias=False), nn.BatchNorm2d((64 * 4)), nn.ReLU(True), nn.ConvTranspose2d((64 * 4), (64 * 2), 4, 2, 1, bias=False), nn.BatchNorm2d((64 * 2)), nn.ReLU(True), nn.ConvTranspose2d((64 * 2), 64, 4, 2, 1, bias=False), nn.BatchNorm2d(64), nn.ReLU(True), nn.ConvTranspose2d(64, 3, 4, 2, 1, bias=False), nn.Sigmoid())
if (num_layers == 4):
self.main = nn.Sequential(nn.Conv2d(3, 64, 4, 2, 1, bias=False), nn.LeakyReLU(0.2, inplace=True), nn.Conv2d(64, (64 * 2), 4, 2, 1, bias=False), nn.BatchNorm2d((64 * 2)), nn.LeakyReLU(0.2, inplace=True), nn.Conv2d((64 * 2), (64 * 4), 4, 2, 1, bias=False), nn.BatchNorm2d((64 * 4)), nn.LeakyReLU(0.2, inplace=True), nn.Conv2d((64 * 4), (64 * 8), 4, 2, 1, bias=False), nn.BatchNorm2d((64 * 8)), nn.LeakyReLU(0.2, inplace=True), nn.ConvTranspose2d((64 * 8), (64 * 4), 4, 2, 1, bias=False), nn.BatchNorm2d((64 * 4)), nn.ReLU(True), nn.ConvTranspose2d((64 * 4), (64 * 2), 4, 2, 1, bias=False), nn.BatchNorm2d((64 * 2)), nn.ReLU(True), nn.ConvTranspose2d((64 * 2), 64, 4, 2, 1, bias=False), nn.BatchNorm2d(64), nn.ReLU(True), nn.ConvTranspose2d(64, 3, 4, 2, 1, bias=False), nn.Sigmoid())
if (num_layers == 3):
self.main = nn.Sequential(nn.Conv2d(3, 64, 4, 2, 1, bias=False), nn.LeakyReLU(0.2, inplace=True), nn.Conv2d(64, (64 * 2), 4, 2, 1, bias=False), nn.BatchNorm2d((64 * 2)), nn.LeakyReLU(0.2, inplace=True), nn.Conv2d((64 * 2), (64 * 4), 4, 2, 1, bias=False), nn.BatchNorm2d((64 * 4)), nn.LeakyReLU(0.2, inplace=True), nn.ConvTranspose2d((64 * 4), (64 * 2), 4, 2, 1, bias=False), nn.BatchNorm2d((64 * 2)), nn.ReLU(True), nn.ConvTranspose2d((64 * 2), 64, 4, 2, 1, bias=False), nn.BatchNorm2d(64), nn.ReLU(True), nn.ConvTranspose2d(64, 3, 4, 2, 1, bias=False), nn.Sigmoid())
def forward(self, input):
return self.main(input)
|
def CreateDataLoader(opt):
data_loader = CustomDatasetDataLoader()
print(data_loader.name())
data_loader.initialize(opt)
return data_loader
|
def CreateDataset(opt):
if (opt.dataset_mode == 'aligned'):
from data.aligned_dataset import AlignedDataset
dataset = AlignedDataset()
elif (opt.dataset_mode == 'unaligned'):
from data.unaligned_dataset import UnalignedDataset
dataset = UnalignedDataset()
elif (opt.dataset_mode == 'single'):
from data.single_dataset import SingleDataset
dataset = SingleDataset()
else:
raise ValueError(('Dataset [%s] not recognized.' % opt.dataset_mode))
print(('dataset [%s] was created' % dataset.name()))
dataset.initialize(opt)
return dataset
|
class CustomDatasetDataLoader(BaseDataLoader):
def name(self):
return 'CustomDatasetDataLoader'
def initialize(self, opt):
BaseDataLoader.initialize(self, opt)
self.dataset = CreateDataset(opt)
self.dataloader = torch.utils.data.DataLoader(self.dataset, batch_size=opt.batchSize, shuffle=(not opt.serial_batches), num_workers=int(opt.nThreads))
def load_data(self):
return self
def __len__(self):
return len(self.dataset)
def __iter__(self):
for (i, data) in enumerate(self.dataloader):
(yield data)
|
class BaseDataLoader():
def __init__(self):
pass
def initialize(self, opt):
self.opt = opt
pass
def load_data(self):
return None
|
class BaseDataset(data.Dataset):
def __init__(self):
super(BaseDataset, self).__init__()
def name(self):
return 'BaseDataset'
def initialize(self, opt):
pass
|
def get_transform(opt):
transform_list = []
if (opt.resize_or_crop == 'resize_and_crop'):
osize = [opt.loadSize, opt.loadSize]
transform_list.append(transforms.Scale(osize, Image.BICUBIC))
transform_list.append(transforms.RandomCrop(opt.fineSize))
elif (opt.resize_or_crop == 'crop'):
transform_list.append(transforms.RandomCrop(opt.fineSize))
elif (opt.resize_or_crop == 'scale_width'):
transform_list.append(transforms.Lambda((lambda img: __scale_width(img, opt.fineSize))))
elif (opt.resize_or_crop == 'scale_width_and_crop'):
transform_list.append(transforms.Lambda((lambda img: __scale_width(img, opt.loadSize))))
transform_list.append(transforms.RandomCrop(opt.fineSize))
if (opt.isTrain and (not opt.no_flip_and_rotation)):
transform_list.append(transforms.RandomHorizontalFlip())
transform_list.append(transforms.RandomRotation(opt.rotation_degree))
transform_list += [transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]
return transforms.Compose(transform_list)
|
def __scale_width(img, target_width):
(ow, oh) = img.size
if (ow == target_width):
return img
w = target_width
h = int(((target_width * oh) / ow))
return img.resize((w, h), Image.BICUBIC)
|
def is_image_file(filename):
return any((filename.endswith(extension) for extension in IMG_EXTENSIONS))
|
def make_dataset(dir, max_items=(- 1), start=0):
images = []
assert os.path.isdir(dir), ('%s is not a valid directory' % dir)
for (root, _, fnames) in sorted(os.walk(dir)):
for fname in fnames:
if is_image_file(fname):
path = os.path.join(root, fname)
images.append(path)
if (max_items >= 0):
return sorted(images)[start:(start + max_items)]
return images
|
def default_loader(path):
return Image.open(path).convert('RGB')
|
class ImageFolder(data.Dataset):
def __init__(self, root, transform=None, return_paths=False, loader=default_loader):
imgs = make_dataset(root)
if (len(imgs) == 0):
raise RuntimeError(((('Found 0 images in: ' + root) + '\nSupported image extensions are: ') + ','.join(IMG_EXTENSIONS)))
self.root = root
self.imgs = imgs
self.transform = transform
self.return_paths = return_paths
self.loader = loader
def __getitem__(self, index):
path = self.imgs[index]
img = self.loader(path)
if (self.transform is not None):
img = self.transform(img)
if self.return_paths:
return (img, path)
else:
return img
def __len__(self):
return len(self.imgs)
|
class SingleDataset(BaseDataset):
def initialize(self, opt):
self.opt = opt
self.root = opt.dataroot
self.dir_A = os.path.join(opt.dataroot)
self.A_paths = make_dataset(self.dir_A)
self.A_paths = sorted(self.A_paths)
self.transform = get_transform(opt)
def __getitem__(self, index):
A_path = self.A_paths[index]
A_img = Image.open(A_path).convert('RGB')
A = self.transform(A_img)
if (self.opt.which_direction == 'BtoA'):
input_nc = self.opt.output_nc
else:
input_nc = self.opt.input_nc
if (input_nc == 1):
tmp = (((A[(0, ...)] * 0.299) + (A[(1, ...)] * 0.587)) + (A[(2, ...)] * 0.114))
A = tmp.unsqueeze(0)
return {'A': A, 'A_paths': A_path}
def __len__(self):
return len(self.A_paths)
def name(self):
return 'SingleImageDataset'
|
class UnalignedDataset(BaseDataset):
def initialize(self, opt):
self.opt = opt
self.root = opt.dataroot
self.dir_A = os.path.join(opt.dataroot, (opt.phase + opt.A))
self.dir_B = os.path.join(opt.dataroot, (opt.phase + opt.B))
self.A_paths = make_dataset(self.dir_A, max_items=opt.max_items_A, start=opt.start)
self.B_paths = make_dataset(self.dir_B, max_items=opt.max_items_B, start=opt.start)
self.A_paths = sorted(self.A_paths)
self.B_paths = sorted(self.B_paths)
self.A_size = len(self.A_paths)
self.B_size = len(self.B_paths)
self.transform = get_transform(opt)
def __getitem__(self, index):
A_path = self.A_paths[(index % self.A_size)]
if self.opt.serial_batches:
index_B = (index % self.B_size)
else:
index_B = random.randint(0, (self.B_size - 1))
B_path = self.B_paths[index_B]
A_img = Image.open(A_path).convert('RGB')
B_img = Image.open(B_path).convert('RGB')
A = self.transform(A_img)
B = self.transform(B_img)
if (self.opt.which_direction == 'BtoA'):
input_nc = self.opt.output_nc
output_nc = self.opt.input_nc
else:
input_nc = self.opt.input_nc
output_nc = self.opt.output_nc
if (input_nc == 1):
tmp = (((A[(0, ...)] * 0.299) + (A[(1, ...)] * 0.587)) + (A[(2, ...)] * 0.114))
A = tmp.unsqueeze(0)
if (output_nc == 1):
tmp = (((B[(0, ...)] * 0.299) + (B[(1, ...)] * 0.587)) + (B[(2, ...)] * 0.114))
B = tmp.unsqueeze(0)
return {'A': A, 'B': B, 'A_paths': A_path, 'B_paths': B_path}
def __len__(self):
return max(self.A_size, self.B_size)
def name(self):
return 'UnalignedDataset'
|
def get_file_paths(folder):
image_file_paths = []
for (root, dirs, filenames) in os.walk(folder):
filenames = sorted(filenames)
for filename in filenames:
input_path = os.path.abspath(root)
file_path = os.path.join(input_path, filename)
if (filename.endswith('.png') or filename.endswith('.jpg')):
image_file_paths.append(file_path)
break
return image_file_paths
|
def align_images(a_file_paths, b_file_paths, target_path):
if (not os.path.exists(target_path)):
os.makedirs(target_path)
for i in range(len(a_file_paths)):
img_a = Image.open(a_file_paths[i])
img_b = Image.open(b_file_paths[i])
assert (img_a.size == img_b.size)
aligned_image = Image.new('RGB', ((img_a.size[0] * 2), img_a.size[1]))
aligned_image.paste(img_a, (0, 0))
aligned_image.paste(img_b, (img_a.size[0], 0))
aligned_image.save(os.path.join(target_path, '{:04d}.jpg'.format(i)))
|
def create_model(opt):
print(opt.model)
if (opt.model == 'ost'):
assert (opt.dataset_mode == 'unaligned')
from .ost import OSTModel
model = OSTModel()
elif (opt.model == 'autoencoder'):
assert (opt.dataset_mode == 'single')
from .autoencoder_model import AutoEncoderModel
model = AutoEncoderModel()
elif (opt.model == 'test'):
assert (opt.dataset_mode == 'single')
from .test_model import TestModel
model = TestModel()
else:
raise NotImplementedError(('model [%s] not implemented.' % opt.model))
model.initialize(opt)
print(('model [%s] was created' % model.name()))
return model
|
class BaseModel(object):
def name(self):
return 'BaseModel'
def initialize(self, opt):
self.opt = opt
self.gpu_ids = opt.gpu_ids
self.isTrain = opt.isTrain
self.Tensor = (torch.cuda.FloatTensor if self.gpu_ids else torch.Tensor)
self.load_dir = os.path.join(opt.checkpoints_dir, opt.load_dir)
self.save_dir = os.path.join(opt.checkpoints_dir, opt.name)
def set_input(self, input):
self.input = input
def forward(self):
pass
def test(self):
pass
def get_image_paths(self):
pass
def optimize_parameters(self):
pass
def get_current_visuals(self):
return self.input
def get_current_errors(self):
return {}
def save(self, label):
pass
def save_network(self, network, network_label, epoch_label, gpu_ids):
save_filename = ('%s_net_%s.pth' % (epoch_label, network_label))
save_path = os.path.join(self.save_dir, save_filename)
torch.save(network.cpu().state_dict(), save_path)
if (len(gpu_ids) and torch.cuda.is_available()):
network.cuda(gpu_ids[0])
def load_network(self, network, network_label, epoch_label):
save_filename = ('%s_net_%s.pth' % (epoch_label, network_label))
save_path = os.path.join(self.load_dir, save_filename)
network.load_state_dict(torch.load(save_path))
def update_learning_rate(self):
for scheduler in self.schedulers:
scheduler.step()
lr = self.optimizers[0].param_groups[0]['lr']
print(('learning rate = %.7f' % lr))
def as_np(self, data):
return data.cpu().data.numpy()
|
class pixel_norm(nn.Module):
def forward(self, x, epsilon=1e-08):
return (x * torch.rsqrt((torch.mean(x.pow(2), dim=1, keepdim=True) + epsilon)))
|
def weights_init_normal(m):
classname = m.__class__.__name__
if (classname.find('Conv') != (- 1)):
init.normal(m.weight.data, 0.0, 0.02)
elif (classname.find('Linear') != (- 1)):
init.normal(m.weight.data, 0.0, 0.02)
elif (classname.find('BatchNorm2d') != (- 1)):
init.normal(m.weight.data, 1.0, 0.02)
init.constant(m.bias.data, 0.0)
|
def weights_init_xavier(m):
classname = m.__class__.__name__
if (classname.find('Conv') != (- 1)):
init.xavier_normal(m.weight.data, gain=0.02)
elif (classname.find('Linear') != (- 1)):
init.xavier_normal(m.weight.data, gain=0.02)
elif (classname.find('BatchNorm2d') != (- 1)):
init.normal(m.weight.data, 1.0, 0.02)
init.constant(m.bias.data, 0.0)
|
def weights_init_kaiming(m):
classname = m.__class__.__name__
if (classname.find('Conv') != (- 1)):
init.kaiming_normal(m.weight.data, a=0, mode='fan_in')
elif (classname.find('Linear') != (- 1)):
init.kaiming_normal(m.weight.data, a=0, mode='fan_in')
elif (classname.find('BatchNorm2d') != (- 1)):
init.normal(m.weight.data, 1.0, 0.02)
init.constant(m.bias.data, 0.0)
|
def weights_init_orthogonal(m):
classname = m.__class__.__name__
print(classname)
if (classname.find('Conv') != (- 1)):
init.orthogonal(m.weight.data, gain=1)
elif (classname.find('Linear') != (- 1)):
init.orthogonal(m.weight.data, gain=1)
elif (classname.find('BatchNorm2d') != (- 1)):
init.normal(m.weight.data, 1.0, 0.02)
init.constant(m.bias.data, 0.0)
|
def init_weights(net, init_type='normal'):
print(('initialization method [%s]' % init_type))
if (init_type == 'normal'):
net.apply(weights_init_normal)
elif (init_type == 'xavier'):
net.apply(weights_init_xavier)
elif (init_type == 'kaiming'):
net.apply(weights_init_kaiming)
elif (init_type == 'orthogonal'):
net.apply(weights_init_orthogonal)
else:
raise NotImplementedError(('initialization method [%s] is not implemented' % init_type))
|
def get_norm_layer(norm_type='instance'):
if (norm_type == 'batch'):
norm_layer = functools.partial(nn.BatchNorm2d, affine=True)
elif (norm_type == 'instance'):
norm_layer = functools.partial(nn.InstanceNorm2d, affine=False)
elif (norm_type == 'none'):
norm_layer = None
else:
raise NotImplementedError(('normalization layer [%s] is not found' % norm_type))
return norm_layer
|
def get_scheduler(optimizer, opt):
if (opt.lr_policy == 'lambda'):
def lambda_rule(epoch):
lr_l = (1.0 - (max(0, (((epoch + 1) + opt.epoch_count) - opt.niter)) / float((opt.niter_decay + 1))))
return lr_l
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule)
elif (opt.lr_policy == 'step'):
scheduler = lr_scheduler.StepLR(optimizer, step_size=opt.lr_decay_iters, gamma=0.1)
elif (opt.lr_policy == 'plateau'):
scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.2, threshold=0.01, patience=5)
else:
return NotImplementedError('learning rate policy [%s] is not implemented', opt.lr_policy)
return scheduler
|
def define_ED(input_nc, output_nc, ngf, which_model_netG, norm='batch', use_dropout=False, init_type='normal', gpu_ids=[], n_downsampling=2, start=0, end=2, input_layer=True, output_layer=True, n_blocks_encoder=9, n_blocks_decoder=9, start_dec=0, end_dec=1):
use_gpu = (len(gpu_ids) > 0)
norm_layer = get_norm_layer(norm_type=norm)
if use_gpu:
assert torch.cuda.is_available()
if (which_model_netG == 'resnet_9blocks'):
netE = ResnetEncoder(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=n_blocks_encoder, gpu_ids=gpu_ids, n_downsampling=n_downsampling, start=start, end=end, input_layer=input_layer)
netD = ResnetDecoder(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=n_blocks_decoder, gpu_ids=gpu_ids, n_downsampling=n_downsampling, end=end_dec, start=start_dec, output_layer=output_layer)
elif (which_model_netG == 'resnet_6blocks'):
netE = ResnetEncoder(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=n_blocks_encoder, gpu_ids=gpu_ids, n_downsampling=n_downsampling, start=start, end=end, input_layer=input_layer)
netD = ResnetDecoder(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=n_blocks_decoder, gpu_ids=gpu_ids, n_downsampling=n_downsampling, end=end_dec, start=start_dec, output_layer=output_layer)
else:
raise NotImplementedError(('Generator model name [%s] is not recognized' % which_model_netG))
if (len(gpu_ids) > 0):
netE.cuda(gpu_ids[0])
netD.cuda(gpu_ids[0])
init_weights(netE, init_type=init_type)
init_weights(netD, init_type=init_type)
return (netE, netD)
|
def define_G(input_nc, output_nc, ngf, which_model_netG, norm='batch', use_dropout=False, init_type='normal', gpu_ids=[]):
netG = None
use_gpu = (len(gpu_ids) > 0)
norm_layer = get_norm_layer(norm_type=norm)
if use_gpu:
assert torch.cuda.is_available()
if (which_model_netG == 'resnet_9blocks'):
netG = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=9, gpu_ids=gpu_ids)
elif (which_model_netG == 'resnet_6blocks'):
netG = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=6, gpu_ids=gpu_ids)
elif (which_model_netG == 'unet_128'):
netG = UnetGenerator(input_nc, output_nc, 7, ngf, norm_layer=norm_layer, use_dropout=use_dropout, gpu_ids=gpu_ids)
elif (which_model_netG == 'unet_256'):
netG = UnetGenerator(input_nc, output_nc, 8, ngf, norm_layer=norm_layer, use_dropout=use_dropout, gpu_ids=gpu_ids)
else:
raise NotImplementedError(('Generator model name [%s] is not recognized' % which_model_netG))
if (len(gpu_ids) > 0):
netG.cuda(gpu_ids[0])
init_weights(netG, init_type=init_type)
return netG
|
def define_D(input_nc, ndf, which_model_netD, n_layers_D=3, norm='batch', use_sigmoid=False, init_type='normal', gpu_ids=[]):
use_gpu = (len(gpu_ids) > 0)
norm_layer = get_norm_layer(norm_type=norm)
if use_gpu:
assert torch.cuda.is_available()
if (which_model_netD == 'basic'):
netD = NLayerDiscriminator(input_nc, ndf, n_layers=3, norm_layer=norm_layer, use_sigmoid=use_sigmoid, gpu_ids=gpu_ids)
elif (which_model_netD == 'n_layers'):
netD = NLayerDiscriminator(input_nc, ndf, n_layers_D, norm_layer=norm_layer, use_sigmoid=use_sigmoid, gpu_ids=gpu_ids)
elif (which_model_netD == 'pixel'):
netD = PixelDiscriminator(input_nc, ndf, norm_layer=norm_layer, use_sigmoid=use_sigmoid, gpu_ids=gpu_ids)
else:
raise NotImplementedError(('Discriminator model name [%s] is not recognized' % which_model_netD))
if use_gpu:
netD.cuda(gpu_ids[0])
init_weights(netD, init_type=init_type)
return netD
|
def print_network(net):
num_params = 0
for param in net.parameters():
num_params += param.numel()
print(net)
print(('Total number of parameters: %d' % num_params))
|
class GANLoss(nn.Module):
def __init__(self, use_lsgan=True, target_real_label=1.0, target_fake_label=0.0, tensor=torch.FloatTensor):
super(GANLoss, self).__init__()
self.real_label = target_real_label
self.fake_label = target_fake_label
self.real_label_var = None
self.fake_label_var = None
self.Tensor = tensor
if use_lsgan:
self.loss = nn.MSELoss()
else:
self.loss = nn.BCELoss()
def get_target_tensor(self, input, target_is_real):
target_tensor = None
if target_is_real:
create_label = ((self.real_label_var is None) or (self.real_label_var.numel() != input.numel()))
if create_label:
real_tensor = self.Tensor(input.size()).fill_(self.real_label)
self.real_label_var = Variable(real_tensor, requires_grad=False)
target_tensor = self.real_label_var
else:
create_label = ((self.fake_label_var is None) or (self.fake_label_var.numel() != input.numel()))
if create_label:
fake_tensor = self.Tensor(input.size()).fill_(self.fake_label)
self.fake_label_var = Variable(fake_tensor, requires_grad=False)
target_tensor = self.fake_label_var
return target_tensor
def __call__(self, input, target_is_real):
target_tensor = self.get_target_tensor(input, target_is_real)
return self.loss(input, target_tensor)
|
class ResnetEncoder(nn.Module):
def __init__(self, input_nc, output_nc, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False, gpu_ids=[], padding_type='reflect', n_downsampling=2, start=0, end=2, input_layer=True, n_blocks=6):
assert (n_blocks >= 0)
super(ResnetEncoder, self).__init__()
self.input_nc = input_nc
self.output_nc = output_nc
self.ngf = ngf
self.gpu_ids = gpu_ids
if (type(norm_layer) == functools.partial):
use_bias = (norm_layer.func == nn.InstanceNorm2d)
else:
use_bias = (norm_layer == nn.InstanceNorm2d)
model = []
if input_layer:
model = [nn.ReflectionPad2d(3), nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0, bias=use_bias), norm_layer(ngf), nn.ReLU(True)]
for i in range(start, end):
mult = (2 ** i)
model += [nn.Conv2d((ngf * mult), ((ngf * mult) * 2), kernel_size=3, stride=2, padding=1, bias=use_bias), norm_layer(((ngf * mult) * 2)), nn.ReLU(True)]
mult = (2 ** n_downsampling)
for i in range(n_blocks):
model += [ResnetBlock((ngf * mult), padding_type=padding_type, norm_layer=norm_layer, use_dropout=use_dropout, use_bias=use_bias)]
self.model = nn.Sequential(*model)
def forward(self, input):
if (self.gpu_ids and isinstance(input.data, torch.cuda.FloatTensor)):
return nn.parallel.data_parallel(self.model, input, self.gpu_ids)
else:
return self.model(input)
|
class ResnetDecoder(nn.Module):
def __init__(self, input_nc, output_nc, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False, gpu_ids=[], padding_type='reflect', n_downsampling=2, start=0, end=2, output_layer=True, n_blocks=6):
assert (n_blocks >= 0)
super(ResnetDecoder, self).__init__()
self.input_nc = input_nc
self.output_nc = output_nc
self.ngf = ngf
self.gpu_ids = gpu_ids
if (type(norm_layer) == functools.partial):
use_bias = (norm_layer.func == nn.InstanceNorm2d)
else:
use_bias = (norm_layer == nn.InstanceNorm2d)
model = []
mult = (2 ** n_downsampling)
for i in range(n_blocks):
model += [ResnetBlock((ngf * mult), padding_type=padding_type, norm_layer=norm_layer, use_dropout=use_dropout, use_bias=use_bias)]
for i in range(start, end):
mult = (2 ** (n_downsampling - i))
model += [nn.ConvTranspose2d((ngf * mult), int(((ngf * mult) / 2)), kernel_size=3, stride=2, padding=1, output_padding=1, bias=use_bias), norm_layer(int(((ngf * mult) / 2))), nn.ReLU(True)]
if output_layer:
model += [nn.ReflectionPad2d(3)]
model += [nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0)]
model += [nn.Tanh()]
self.model = nn.Sequential(*model)
def forward(self, input):
if (self.gpu_ids and isinstance(input.data, torch.cuda.FloatTensor)):
return nn.parallel.data_parallel(self.model, input, self.gpu_ids)
else:
return self.model(input)
|
class ResnetGenerator(nn.Module):
def __init__(self, input_nc, output_nc, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False, n_blocks=6, gpu_ids=[], padding_type='reflect'):
assert (n_blocks >= 0)
super(ResnetGenerator, self).__init__()
self.input_nc = input_nc
self.output_nc = output_nc
self.ngf = ngf
self.gpu_ids = gpu_ids
if (type(norm_layer) == functools.partial):
use_bias = (norm_layer.func == nn.InstanceNorm2d)
else:
use_bias = (norm_layer == nn.InstanceNorm2d)
model = [nn.ReflectionPad2d(3), nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0, bias=use_bias), norm_layer(ngf), nn.ReLU(True)]
n_downsampling = 2
for i in range(n_downsampling):
mult = (2 ** i)
model += [nn.Conv2d((ngf * mult), ((ngf * mult) * 2), kernel_size=3, stride=2, padding=1, bias=use_bias), norm_layer(((ngf * mult) * 2)), nn.ReLU(True)]
mult = (2 ** n_downsampling)
for i in range(n_blocks):
model += [ResnetBlock((ngf * mult), padding_type=padding_type, norm_layer=norm_layer, use_dropout=use_dropout, use_bias=use_bias)]
for i in range(n_downsampling):
mult = (2 ** (n_downsampling - i))
model += [nn.ConvTranspose2d((ngf * mult), int(((ngf * mult) / 2)), kernel_size=3, stride=2, padding=1, output_padding=1, bias=use_bias), norm_layer(int(((ngf * mult) / 2))), nn.ReLU(True)]
model += [nn.ReflectionPad2d(3)]
model += [nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0)]
model += [nn.Tanh()]
self.model = nn.Sequential(*model)
def forward(self, input):
if (self.gpu_ids and isinstance(input.data, torch.cuda.FloatTensor)):
return nn.parallel.data_parallel(self.model, input, self.gpu_ids)
else:
return self.model(input)
|
class ResnetBlock(nn.Module):
def __init__(self, dim, padding_type, norm_layer, use_dropout, use_bias):
super(ResnetBlock, self).__init__()
self.conv_block = self.build_conv_block(dim, padding_type, norm_layer, use_dropout, use_bias)
def build_conv_block(self, dim, padding_type, norm_layer, use_dropout, use_bias):
conv_block = []
p = 0
if (padding_type == 'reflect'):
conv_block += [nn.ReflectionPad2d(1)]
elif (padding_type == 'replicate'):
conv_block += [nn.ReplicationPad2d(1)]
elif (padding_type == 'zero'):
p = 1
else:
raise NotImplementedError(('padding [%s] is not implemented' % padding_type))
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), norm_layer(dim), nn.ReLU(True)]
if use_dropout:
conv_block += [nn.Dropout(0.5)]
p = 0
if (padding_type == 'reflect'):
conv_block += [nn.ReflectionPad2d(1)]
elif (padding_type == 'replicate'):
conv_block += [nn.ReplicationPad2d(1)]
elif (padding_type == 'zero'):
p = 1
else:
raise NotImplementedError(('padding [%s] is not implemented' % padding_type))
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), norm_layer(dim)]
return nn.Sequential(*conv_block)
def forward(self, x):
out = (x + self.conv_block(x))
return out
|
class UnetGenerator(nn.Module):
def __init__(self, input_nc, output_nc, num_downs, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False, gpu_ids=[]):
super(UnetGenerator, self).__init__()
self.gpu_ids = gpu_ids
unet_block = UnetSkipConnectionBlock((ngf * 8), (ngf * 8), input_nc=None, submodule=None, norm_layer=norm_layer, innermost=True)
for i in range((num_downs - 5)):
unet_block = UnetSkipConnectionBlock((ngf * 8), (ngf * 8), input_nc=None, submodule=unet_block, norm_layer=norm_layer, use_dropout=use_dropout)
unet_block = UnetSkipConnectionBlock((ngf * 4), (ngf * 8), input_nc=None, submodule=unet_block, norm_layer=norm_layer)
unet_block = UnetSkipConnectionBlock((ngf * 2), (ngf * 4), input_nc=None, submodule=unet_block, norm_layer=norm_layer)
unet_block = UnetSkipConnectionBlock(ngf, (ngf * 2), input_nc=None, submodule=unet_block, norm_layer=norm_layer)
unet_block = UnetSkipConnectionBlock(output_nc, ngf, input_nc=input_nc, submodule=unet_block, outermost=True, norm_layer=norm_layer)
self.model = unet_block
def forward(self, input):
if (self.gpu_ids and isinstance(input.data, torch.cuda.FloatTensor)):
return nn.parallel.data_parallel(self.model, input, self.gpu_ids)
else:
return self.model(input)
|
class UnetSkipConnectionBlock(nn.Module):
def __init__(self, outer_nc, inner_nc, input_nc=None, submodule=None, outermost=False, innermost=False, norm_layer=nn.BatchNorm2d, use_dropout=False):
super(UnetSkipConnectionBlock, self).__init__()
self.outermost = outermost
if (type(norm_layer) == functools.partial):
use_bias = (norm_layer.func == nn.InstanceNorm2d)
else:
use_bias = (norm_layer == nn.InstanceNorm2d)
if (input_nc is None):
input_nc = outer_nc
downconv = nn.Conv2d(input_nc, inner_nc, kernel_size=4, stride=2, padding=1, bias=use_bias)
downrelu = nn.LeakyReLU(0.2, True)
downnorm = norm_layer(inner_nc)
uprelu = nn.ReLU(True)
upnorm = norm_layer(outer_nc)
if outermost:
upconv = nn.ConvTranspose2d((inner_nc * 2), outer_nc, kernel_size=4, stride=2, padding=1)
down = [downconv]
up = [uprelu, upconv, nn.Tanh()]
model = ((down + [submodule]) + up)
elif innermost:
upconv = nn.ConvTranspose2d(inner_nc, outer_nc, kernel_size=4, stride=2, padding=1, bias=use_bias)
down = [downrelu, downconv]
up = [uprelu, upconv, upnorm]
model = (down + up)
else:
upconv = nn.ConvTranspose2d((inner_nc * 2), outer_nc, kernel_size=4, stride=2, padding=1, bias=use_bias)
down = [downrelu, downconv, downnorm]
up = [uprelu, upconv, upnorm]
if use_dropout:
model = (((down + [submodule]) + up) + [nn.Dropout(0.5)])
else:
model = ((down + [submodule]) + up)
self.model = nn.Sequential(*model)
def forward(self, x):
if self.outermost:
return self.model(x)
else:
return torch.cat([x, self.model(x)], 1)
|
class NLayerDiscriminator(nn.Module):
def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d, use_sigmoid=False, gpu_ids=[]):
super(NLayerDiscriminator, self).__init__()
self.gpu_ids = gpu_ids
if (type(norm_layer) == functools.partial):
use_bias = (norm_layer.func == nn.InstanceNorm2d)
else:
use_bias = (norm_layer == nn.InstanceNorm2d)
kw = 4
padw = 1
sequence = [nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw), nn.LeakyReLU(0.2, True)]
nf_mult = 1
nf_mult_prev = 1
for n in range(1, n_layers):
nf_mult_prev = nf_mult
nf_mult = min((2 ** n), 8)
sequence += [nn.Conv2d((ndf * nf_mult_prev), (ndf * nf_mult), kernel_size=kw, stride=2, padding=padw, bias=use_bias), norm_layer((ndf * nf_mult)), nn.LeakyReLU(0.2, True)]
nf_mult_prev = nf_mult
nf_mult = min((2 ** n_layers), 8)
sequence += [nn.Conv2d((ndf * nf_mult_prev), (ndf * nf_mult), kernel_size=kw, stride=1, padding=padw, bias=use_bias), norm_layer((ndf * nf_mult)), nn.LeakyReLU(0.2, True)]
sequence += [nn.Conv2d((ndf * nf_mult), 1, kernel_size=kw, stride=1, padding=padw)]
if use_sigmoid:
sequence += [nn.Sigmoid()]
self.model = nn.Sequential(*sequence)
def forward(self, input):
if (len(self.gpu_ids) and isinstance(input.data, torch.cuda.FloatTensor)):
return nn.parallel.data_parallel(self.model, input, self.gpu_ids)
else:
return self.model(input)
|
class PixelDiscriminator(nn.Module):
def __init__(self, input_nc, ndf=64, norm_layer=nn.BatchNorm2d, use_sigmoid=False, gpu_ids=[]):
super(PixelDiscriminator, self).__init__()
self.gpu_ids = gpu_ids
if (type(norm_layer) == functools.partial):
use_bias = (norm_layer.func == nn.InstanceNorm2d)
else:
use_bias = (norm_layer == nn.InstanceNorm2d)
self.net = [nn.Conv2d(input_nc, ndf, kernel_size=1, stride=1, padding=0), nn.LeakyReLU(0.2, True), nn.Conv2d(ndf, (ndf * 2), kernel_size=1, stride=1, padding=0, bias=use_bias), norm_layer((ndf * 2)), nn.LeakyReLU(0.2, True), nn.Conv2d((ndf * 2), 1, kernel_size=1, stride=1, padding=0, bias=use_bias)]
if use_sigmoid:
self.net.append(nn.Sigmoid())
self.net = nn.Sequential(*self.net)
def forward(self, input):
if (len(self.gpu_ids) and isinstance(input.data, torch.cuda.FloatTensor)):
return nn.parallel.data_parallel(self.net, input, self.gpu_ids)
else:
return self.net(input)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.