code stringlengths 17 6.64M |
|---|
class UnpairedMaskDataset(data.Dataset):
'A dataset class for loading images within a single folder\n '
def __init__(self, opt, im_path, label, is_val=False):
'Initialize this dataset class.\n\n Parameters:\n opt -- experiment options\n im_path -- path to folder of images\n is_val -- is this training or validation? used to determine\n transform\n '
super().__init__()
self.dir = im_path
self.paths = sorted(make_dataset(self.dir, opt.max_dataset_size))
self.label = label
self.size = len(self.paths)
assert (self.size > 0)
self.transform = transforms.get_transform(opt, for_val=is_val)
self.mask_transform = transforms.get_mask_transform(opt, for_val=is_val)
self.opt = opt
def __getitem__(self, index):
'Return a data point and its metadata information.\n\n Parameters:\n index - - a random integer for data indexing\n '
path = self.paths[index]
img = Image.open(path).convert('RGB')
img = self.transform(img)
(C, H, W) = np.array(img).shape
real_mask = torch.ones([H, W])
img_mask = Image.fromarray(np.uint8((real_mask * 255)), 'L')
img_mask = self.mask_transform(img_mask)
return {'img': img, 'path': path, 'mask': img_mask}
def __len__(self):
return self.size
|
class Struct():
def __init__(self, **entries):
self.__dict__.update(entries)
|
def find_model_using_name(model_name):
model_filename = (('models.' + model_name) + '_model')
modellib = importlib.import_module(model_filename)
model = None
target_model_name = (model_name.replace('_', '') + 'model')
for (name, cls) in modellib.__dict__.items():
if ((name.lower() == target_model_name.lower()) and issubclass(cls, BaseModel)):
model = cls
if (model is None):
print(('In %s.py, there should be a subclass of BaseModel with class name that matches %s in lowercase.' % (model_filename, target_model_name)))
exit(0)
return model
|
def get_option_setter(model_name):
model_class = find_model_using_name(model_name)
return model_class.modify_commandline_options
|
def create_model(opt, **kwargs):
model = find_model_using_name(opt.model)
instance = model(opt, **kwargs)
print(('model [%s] was created' % instance.name()))
return instance
|
class BaseModel():
@staticmethod
def modify_commandline_options(parser):
networks.modify_commandline_options(parser)
return parser
def __init__(self, opt):
self.opt = opt
self.gpu_ids = opt.gpu_ids
self.isTrain = opt.isTrain
self.device = (torch.device('cuda:{}'.format(self.gpu_ids[0])) if self.gpu_ids else torch.device('cpu'))
self.save_dir = os.path.join(opt.checkpoints_dir, opt.name)
torch.backends.cudnn.benchmark = True
self.loss_names = []
self.model_names = []
self.visual_names = []
self.image_paths = []
self.optimizers = {}
def name(self):
return 'BaseModel'
def set_input(self, input, mode='TRAIN'):
self.input = input
def forward(self):
pass
def setup(self, opt, parser=None):
current_ep = 0
(best_val_metric, best_val_ep) = (0, 0)
self.print_networks()
if self.isTrain:
self.schedulers = {k: netutils.get_scheduler(optim, opt) for (k, optim) in self.optimizers.items()}
if ((not self.isTrain) or opt.load_model):
(current_ep, best_val_metric, best_val_ep) = self.load_networks(opt.which_epoch)
if (opt.which_epoch not in ['latest', 'bestval']):
current_ep += 1
return (current_ep, best_val_metric, best_val_ep)
def eval(self):
for name in self.model_names:
if isinstance(name, str):
net = getattr(self, ('net_' + name))
net.eval()
def train(self):
for name in self.model_names:
if isinstance(name, str):
net = getattr(self, ('net_' + name))
net.train()
def test(self, compute_losses=False):
with torch.no_grad():
self.forward()
if compute_losses:
self.compute_losses_D()
def get_image_paths(self):
return self.image_paths
def optimize_parameters(self):
pass
def update_learning_rate(self, metric=None):
for (k, scheduler) in self.schedulers.items():
if (metric is not None):
assert (self.opt.lr_policy in ['plateau', 'constant'])
scheduler.step(metric)
else:
scheduler.step()
for (k, optim) in self.optimizers.items():
logging.info(('learning rate net_%s = %0.7f' % (k, optim.param_groups[0]['lr'])))
def get_current_visuals(self):
visual_ret = OrderedDict()
for name in self.visual_names:
assert isinstance(name, str)
visual_ret[name] = getattr(self, name)
return visual_ret
def get_current_losses(self):
errors_ret = OrderedDict()
for name in self.loss_names:
assert isinstance(name, str)
errors_ret[name] = float(getattr(self, name))
return errors_ret
def save_networks(self, save_name, current_ep, best_val_metric, best_val_ep):
for name in self.model_names:
assert isinstance(name, str)
save_filename = ('%s_net_%s.pth' % (save_name, name))
save_path = os.path.join(self.save_dir, save_filename)
net = getattr(self, ('net_' + name))
if isinstance(net, torch.nn.DataParallel):
sd = net.module.state_dict()
else:
sd = net.state_dict()
optim = self.optimizers[name].state_dict()
sched = self.schedulers[name].state_dict()
checkpoint = dict(state_dict=sd, optimizer=optim, scheduler=sched, epoch=current_ep, best_val_metric=best_val_metric, best_val_ep=best_val_ep)
torch.save(checkpoint, save_path)
def load_networks(self, save_name):
for name in self.model_names:
assert isinstance(name, str)
load_filename = ('%s_net_%s.pth' % (save_name, name))
load_path = os.path.join(self.save_dir, load_filename)
net = getattr(self, ('net_' + name))
if isinstance(net, torch.nn.DataParallel):
net = net.module
print(('loading the model from %s' % load_path))
checkpoint = torch.load(load_path, map_location=str(self.device))
state_dict = checkpoint['state_dict']
if hasattr(state_dict, '_metadata'):
del state_dict._metadata
net.load_state_dict(state_dict)
if self.isTrain:
print(('restoring optimizer and scheduler for %s' % name))
self.optimizers[name].load_state_dict(checkpoint['optimizer'])
self.schedulers[name].load_state_dict(checkpoint['scheduler'])
current_ep = checkpoint['epoch']
best_val_metric = checkpoint['best_val_metric']
best_val_ep = checkpoint['best_val_ep']
return (current_ep, best_val_metric, best_val_ep)
def print_networks(self):
print('---------- Networks initialized -------------')
for name in self.model_names:
if isinstance(name, str):
net = getattr(self, ('net_' + name))
num_params = 0
for param in net.parameters():
num_params += param.numel()
print(net)
print(('[Network %s] Total number of parameters : %.3f M' % (name, (num_params / 1000000.0))))
print('-----------------------------------------------')
def set_requires_grad(self, nets, requires_grad=False):
if (not isinstance(nets, list)):
nets = [nets]
for net in nets:
if (net is not None):
for param in net.parameters():
param.requires_grad = requires_grad
|
def compute_mhsa(q, k, v, scale_factor=1, mask=None):
scaled_dot_prod = (torch.einsum('... i d , ... j d -> ... i j', q, k) * scale_factor)
if (mask is not None):
assert (mask.shape == scaled_dot_prod.shape[2:])
scaled_dot_prod = scaled_dot_prod.masked_fill(mask, (- np.inf))
attention = torch.softmax(scaled_dot_prod, dim=(- 1))
return torch.einsum('... i j , ... j d -> ... i d', attention, v)
|
class MultiHeadSelfAttention(nn.Module):
def __init__(self, dim, heads=8, dim_head=None):
"\n Implementation of multi-head attention layer of the original transformer model.\n einsum and einops.rearrange is used whenever possible\n Args:\n dim: token's dimension, i.e. word embedding vector size\n heads: the number of distinct representations to learn\n dim_head: the dim of the head. In general dim_head<dim.\n However, it may not necessary be (dim/heads)\n "
super().__init__()
self.dim_head = (int((dim / heads)) if (dim_head is None) else dim_head)
_dim = (self.dim_head * heads)
self.heads = heads
self.to_qvk = nn.Linear(dim, (_dim * 3), bias=False)
self.W_0 = nn.Linear(_dim, dim, bias=False)
self.scale_factor = (self.dim_head ** (- 0.5))
def forward(self, x, mask=None):
assert (x.dim() == 3)
qkv = self.to_qvk(x)
(q, k, v) = tuple(rearrange(qkv, 'b t (d k h ) -> k b h t d ', k=3, h=self.heads))
out = compute_mhsa(q, k, v, mask=mask, scale_factor=self.scale_factor)
out = rearrange(out, 'b h t d -> b t (h d)')
return self.W_0(out)
|
class NLBlockND(nn.Module):
def __init__(self, in_channels=256):
"Implementation of Non-Local Block with 4 different pairwise functions but doesn't include subsampling trick\n args:\n in_channels: original channel size (1024 in the paper)\n inter_channels: channel size inside the block if not specifed reduced to half (512 in the paper)\n mode: supports Gaussian, Embedded Gaussian, Dot Product, and Concatenation\n dimension: can be 1 (temporal), 2 (spatial), 3 (spatiotemporal)\n bn_layer: whether to add batch norm\n "
super(NLBlockND, self).__init__()
self.in_channels = in_channels
self.sig = nn.Sigmoid()
self.theta = nn.Conv2d(in_channels=self.in_channels, out_channels=self.in_channels, kernel_size=1)
self.phi = nn.Conv2d(in_channels=self.in_channels, out_channels=self.in_channels, kernel_size=1)
def forward(self, x, return_nl_map=False):
'\n args\n x: (N, C, T, H, W) for dimension=3; (N, C, H, W) for dimension 2; (N, C, T) for dimension 1\n '
batch_size = x.size(0)
theta_x = self.theta(x).view(batch_size, self.in_channels, (- 1))
phi_x = self.phi(x).view(batch_size, self.in_channels, (- 1))
theta_x = theta_x.permute(0, 2, 1)
f = torch.matmul(theta_x, phi_x)
f_div_C = (f / math.sqrt(self.in_channels))
y = f_div_C.permute(0, 2, 1).contiguous()
sig_y = self.sig(y)
final_y = sig_y.view(batch_size, *x.size()[2:], *x.size()[2:])
if return_nl_map:
return (final_y, sig_y)
else:
return final_y
|
def make_patch_resnet(depth, layername, num_classes=2, extra_output=None):
def change_out(layers):
(ind, layer) = [(i, l) for (i, (n, l)) in enumerate(layers) if (n == layername)][0]
if layername.startswith('layer'):
bn = list(layer.modules())[((- 1) if (depth < 50) else (- 2))]
assert isinstance(bn, nn.BatchNorm2d)
num_ch = bn.num_features
else:
num_ch = 64
layers[(ind + 1):] = [('convout', nn.Conv2d(num_ch, num_classes, kernel_size=1))]
return layers
if (extra_output == None):
model = CustomResNet(depth, modify_sequence=change_out)
else:
print(extra_output)
model = CustomResNet(depth, modify_sequence=change_out, extra_output=extra_output)
return model
|
def make_patch_xceptionnet(layername, num_classes=2, extra_output=None):
def change_out(layers):
(ind, layer) = [(i, l) for (i, (n, l)) in enumerate(layers) if (n == layername)][0]
if layername.startswith('block'):
module_list = list(layer.modules())
bn = module_list[(- 1)]
if (not isinstance(bn, nn.BatchNorm2d)):
bn = module_list[(- 2)]
assert isinstance(bn, nn.BatchNorm2d)
num_ch = bn.num_features
elif layername.startswith('relu'):
bn = layers[(ind - 1)][1]
assert isinstance(bn, nn.BatchNorm2d)
num_ch = bn.num_features
else:
raise NotImplementedError
layers[(ind + 1):] = [('convout', nn.Conv2d(num_ch, num_classes, kernel_size=1))]
return layers
if (extra_output == None):
model = CustomXceptionNet(modify_sequence=change_out)
else:
model = CustomXceptionNet(extra_output=extra_output, modify_sequence=change_out)
return model
|
def make_pcl(backbone='xception', layername='block3', input_size=128):
if (backbone == 'xception'):
channels = [128, 256, 728, 728, 728, 728, 728, 728, 728, 728, 728, 1024]
(b1, b2, b3, b12) = (int((input_size / 4)), int((input_size / 8)), int((input_size / 16)), int((input_size / 32)))
out_ch = [b1, b2, b3, b3, b3, b3, b3, b3, b3, b3, b3, b12]
layer = int(layername[5])
channel = out_ch[(layer - 1)]
elif (backbone[:6] == 'resnet'):
layer = int(layername[5:])
assert (layer >= 2)
channels = [0, 64, 128, 256, 512]
(b1, b2, b3, b4) = (int((input_size / 2)), int((input_size / 4)), int((input_size / 8)), int((input_size / 16)))
out_ch = [b1, b2, b3, b4]
channel = out_ch[(layer - 1)]
from . import PCL
model = PCL.NLBlockND(in_channels=channels[(layer - 1)])
return (model, channel)
|
def make_xceptionnet_long():
from . import xception
def change_out(layers):
channels = [3, 32, 64, 128, 256, 728, 728, 728, 728, 728, 728, 728, 728, 728, 1024, 1536, 2048]
(ind, layer) = [(i, l) for (i, (n, l)) in enumerate(layers) if (n == 'block2')][0]
new_layers = [('pblock3', xception.PixelBlock(channels[4], channels[5], 2, 1, start_with_relu=True, grow_first=True)), ('pblock4', xception.PixelBlock(channels[5], channels[6], 3, 1, start_with_relu=True, grow_first=True))]
num_ch = channels[9]
new_layers.append(('convout', nn.Conv2d(num_ch, 2, kernel_size=1)))
layers[(ind + 1):] = new_layers
return layers
model = CustomXceptionNet(modify_sequence=change_out)
return model
|
class CustomResNet(nn.Module):
"\n Customizable ResNet, compatible with pytorch's resnet, but:\n * The top-level sequence of modules can be modified to add\n or remove or alter layers.\n * Extra outputs can be produced, to allow backprop and access\n to internal features.\n * Pooling is replaced by resizable GlobalAveragePooling so that\n any size can be input (e.g., any multiple of 32 pixels).\n * halfsize=True halves striding on the first pooling to\n set the default size to 112x112 instead of 224x224.\n "
def __init__(self, size=None, block=None, layers=None, num_classes=1000, extra_output=None, modify_sequence=None, halfsize=False):
standard_sizes = {18: (resnet.BasicBlock, [2, 2, 2, 2]), 34: (resnet.BasicBlock, [3, 4, 6, 3]), 50: (resnet.Bottleneck, [3, 4, 6, 3]), 101: (resnet.Bottleneck, [3, 4, 23, 3]), 152: (resnet.Bottleneck, [3, 8, 36, 3])}
assert ((size in standard_sizes) == (block is None) == (layers is None))
if (size in standard_sizes):
(block, layers) = standard_sizes[size]
if (modify_sequence is None):
def modify_sequence(x):
return x
self.inplanes = 64
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.dilation = 1
self.groups = 1
self.base_width = 64
sequence = modify_sequence([('conv1', nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)), ('bn1', norm_layer(64)), ('relu', nn.ReLU(inplace=True)), ('maxpool', nn.MaxPool2d(3, stride=(1 if halfsize else 2), padding=1)), ('layer1', self._make_layer(block, 64, layers[0])), ('layer2', self._make_layer(block, 128, layers[1], stride=2)), ('layer3', self._make_layer(block, 256, layers[2], stride=2)), ('layer4', self._make_layer(block, 512, layers[3], stride=2)), ('avgpool', GlobalAveragePool2d()), ('fc', nn.Linear((512 * block.expansion), num_classes))])
super(CustomResNet, self).__init__()
for (name, layer) in sequence:
setattr(self, name, layer)
self.extra_output = extra_output
def _make_layer(self, block, channels, depth, stride=1):
return resnet.ResNet._make_layer(self, block, channels, depth, stride)
def forward(self, x):
extra = []
for (name, module) in self._modules.items():
x = module(x)
if (self.extra_output and (name in self.extra_output)):
extra.append(x)
if self.extra_output:
return ((x,) + tuple(extra))
return x
|
class CustomXceptionNet(nn.Module):
'\n Customizable Xceptionnet, compatible with https://github.com/Cadene/pretrained-models.pytorch/blob/master/pretrainedmodels/models/xception.py\n but:\n * The top-level sequence of modules can be modified to add\n or remove or alter layers.\n * Extra outputs can be produced, to allow backprop and access\n to internal features.\n * halfsize=True halves striding on the first convolution to\n allow 151x151 images to be processed rather than 299x299 only.\n '
def __init__(self, channels=None, num_classes=1000, extra_output=None, modify_sequence=None, halfsize=False):
from . import xception
if (channels is None):
channels = [3, 32, 64, 128, 256, 728, 728, 728, 728, 728, 728, 728, 728, 728, 1024, 1536, 2048]
assert (len(channels) == 17)
if (modify_sequence is None):
def modify_sequence(x):
return x
sequence = modify_sequence([('conv1', nn.Conv2d(channels[0], channels[1], kernel_size=3, stride=(1 if halfsize else 2), padding=0, bias=False)), ('bn1', nn.BatchNorm2d(channels[1])), ('relu1', nn.ReLU(inplace=True)), ('conv2', nn.Conv2d(channels[1], channels[2], 3, bias=False)), ('bn2', nn.BatchNorm2d(channels[2])), ('relu2', nn.ReLU(inplace=True)), ('block1', xception.Block(channels[2], channels[3], 2, 2, start_with_relu=False, grow_first=True)), ('block2', xception.Block(channels[3], channels[4], 2, 2, start_with_relu=True, grow_first=True)), ('block3', xception.Block(channels[4], channels[5], 2, 2, start_with_relu=True, grow_first=True)), ('block4', xception.Block(channels[5], channels[6], 3, 1, start_with_relu=True, grow_first=True)), ('block5', xception.Block(channels[6], channels[7], 3, 1, start_with_relu=True, grow_first=True)), ('block6', xception.Block(channels[7], channels[8], 3, 1, start_with_relu=True, grow_first=True)), ('block7', xception.Block(channels[8], channels[9], 3, 1, start_with_relu=True, grow_first=True)), ('block8', xception.Block(channels[9], channels[10], 3, 1, start_with_relu=True, grow_first=True)), ('block9', xception.Block(channels[10], channels[11], 3, 1, start_with_relu=True, grow_first=True)), ('block10', xception.Block(channels[11], channels[12], 3, 1, start_with_relu=True, grow_first=True)), ('block11', xception.Block(channels[12], channels[13], 3, 1, start_with_relu=True, grow_first=True)), ('block12', xception.Block(channels[13], channels[14], 2, 2, start_with_relu=True, grow_first=False)), ('conv3', xception.SeparableConv2d(channels[14], channels[15], 3, 1, 1)), ('bn3', nn.BatchNorm2d(channels[15])), ('relu3', nn.ReLU(inplace=True)), ('conv4', xception.SeparableConv2d(channels[15], channels[16], 3, 1, 1)), ('bn4', nn.BatchNorm2d(channels[16])), ('relu4', nn.ReLU(inplace=True)), ('avgpool', GlobalAveragePool2d()), ('fc', nn.Linear(channels[16], num_classes))])
super(CustomXceptionNet, self).__init__()
for (name, layer) in sequence:
setattr(self, name, layer)
self.extra_output = extra_output
def forward(self, x):
extra = []
for (name, module) in self._modules.items():
x = module(x)
if (self.extra_output and (name in self.extra_output)):
extra.append(x)
if self.extra_output:
return ((x,) + tuple(extra))
return x
|
class Vectorize(nn.Module):
def __init__(self):
super(Vectorize, self).__init__()
def forward(self, x):
x = x.view(x.size(0), int(numpy.prod(x.size()[1:])))
return x
|
class GlobalAveragePool2d(nn.Module):
def __init__(self):
super(GlobalAveragePool2d, self).__init__()
def forward(self, x):
x = torch.mean(x.view(x.size(0), x.size(1), (- 1)), dim=2)
return x
|
def get_scheduler(optimizer, opt):
if (opt.lr_policy == 'plateau'):
scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='max', factor=0.1, threshold=0.0001, patience=opt.patience, eps=1e-06)
elif (opt.lr_policy == 'constant'):
scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='max', factor=0.1, threshold=0.0001, patience=1000, eps=opt.lr)
else:
return NotImplementedError('learning rate policy [%s] is not implemented', opt.lr_policy)
return scheduler
|
def init_weights(net, init_type='xavier', gain=0.02):
def init_func(m):
classname = m.__class__.__name__
if (hasattr(m, 'weight') and ((classname.find('Conv') != (- 1)) or (classname.find('Linear') != (- 1)))):
if (init_type == 'normal'):
init.normal_(m.weight.data, 0.0, gain)
elif (init_type == 'xavier'):
init.xavier_normal_(m.weight.data, gain=gain)
elif (init_type == 'kaiming'):
init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
elif (init_type == 'orthogonal'):
init.orthogonal_(m.weight.data, gain=gain)
else:
raise NotImplementedError(('initialization method [%s] is not implemented' % init_type))
if (hasattr(m, 'bias') and (m.bias is not None)):
init.constant_(m.bias.data, 0.0)
elif (classname.find('BatchNorm2d') != (- 1)):
init.normal_(m.weight.data, 1.0, gain)
init.constant_(m.bias.data, 0.0)
print(('initialize network with %s' % init_type))
net.apply(init_func)
|
def init_net(net, init_type='xavier', gpu_ids=[]):
if (len(gpu_ids) > 0):
assert torch.cuda.is_available()
net.to(gpu_ids[0])
net = torch.nn.DataParallel(net, gpu_ids)
if (init_type is None):
return net
init_weights(net, init_type)
return net
|
def modify_commandline_options(parser):
(opt, _) = parser.parse_known_args()
if ('xception' in opt.which_model_netD):
parser.set_defaults(loadSize=333, fineSize=299)
elif ('resnet' in opt.which_model_netD):
parser.set_defaults(loadSize=256, fineSize=224)
else:
raise NotImplementedError
|
def define_D(which_model_netD, init_type, gpu_ids=[]):
if ('resnet' in which_model_netD):
from torchvision.models import resnet
model = getattr(resnet, which_model_netD)
netD = model(pretrained=False, num_classes=2)
elif ('xception' in which_model_netD):
from . import xception
netD = xception.xception(num_classes=2)
else:
raise NotImplementedError(('Discriminator model name [%s] is not recognized' % which_model_netD))
return netutils.init_net(netD, init_type, gpu_ids=gpu_ids)
|
def define_patch_D(which_model_netD, init_type, gpu_ids=[]):
if which_model_netD.startswith('resnet'):
from . import customnet
splits = which_model_netD.split('_')
depth = int(splits[0][6:])
layer = splits[1]
if (len(splits) == 2):
netD = customnet.make_patch_resnet(depth, layer)
else:
extra_output = [i.replace('extra', 'layer') for i in splits[2:]]
netD = customnet.make_patch_resnet(depth, layer, extra_output=extra_output)
return netutils.init_net(netD, init_type, gpu_ids=gpu_ids)
elif which_model_netD.startswith('widenet'):
splits = which_model_netD.split('_')
kernel_size = int(splits[1][2:])
dilation = int(splits[2][1:])
netD = WideNet(kernel_size, dilation)
return netutils.init_net(netD, init_type, gpu_ids=gpu_ids)
elif which_model_netD.startswith('xception'):
from . import customnet
splits = which_model_netD.split('_')
layer = splits[1]
if (len(splits) == 2):
netD = customnet.make_patch_xceptionnet(layer)
else:
extra_output = [i.replace('extra', 'block') for i in splits[2:]]
netD = customnet.make_patch_xceptionnet(layername=layer, extra_output=extra_output)
return netutils.init_net(netD, init_type, gpu_ids=gpu_ids)
elif which_model_netD.startswith('longxception'):
from . import customnet
netD = customnet.make_xceptionnet_long()
return netutils.init_net(netD, init_type, gpu_ids=gpu_ids)
else:
raise NotImplementedError(('Discriminator model name [%s] is not recognized' % which_model_netD))
|
def define_PCL(which_model_netD, init_type, gpu_ids=[], input_size=128):
if which_model_netD.startswith('resnet'):
from . import customnet
backbone = which_model_netD.split('_')[0]
layer = which_model_netD.split('_')[1]
(netPCL, out_ch) = customnet.make_pcl(backbone=backbone, layername=layer, input_size=input_size)
return (netutils.init_net(netPCL, init_type, gpu_ids=gpu_ids), out_ch)
elif which_model_netD.startswith('widenet'):
splits = which_model_netD.split('_')
kernel_size = int(splits[1][2:])
dilation = int(splits[2][1:])
netD = WideNet(kernel_size, dilation)
return netutils.init_net(netD, init_type, gpu_ids=gpu_ids)
elif which_model_netD.startswith('xception'):
from . import customnet
splits = which_model_netD.split('_')
backbone = splits[0]
layer = splits[1]
(netPCL, out_ch) = customnet.make_pcl(backbone=backbone, layername=layer, input_size=input_size)
return (netutils.init_net(netPCL, init_type, gpu_ids=gpu_ids), out_ch)
elif which_model_netD.startswith('longxception'):
from . import customnet
netD = customnet.make_xceptionnet_long()
return netutils.init_net(netD, init_type, gpu_ids=gpu_ids)
else:
raise NotImplementedError(('Discriminator model name [%s] is not recognized' % which_model_netD))
|
class WideNet(nn.Module):
def __init__(self, kernel_size=7, dilation=1):
super().__init__()
sequence = [nn.Conv2d(3, 256, kernel_size=kernel_size, dilation=dilation, stride=2, padding=(kernel_size // 2), bias=False), nn.BatchNorm2d(256), nn.ReLU(inplace=True), nn.MaxPool2d(3, stride=2, padding=1), nn.Conv2d(256, 256, kernel_size=1), nn.ReLU(inplace=True), nn.Conv2d(256, 256, kernel_size=1), nn.ReLU(inplace=True), nn.Conv2d(256, 2, kernel_size=1)]
self.model = nn.Sequential(*sequence)
def forward(self, x):
return self.model(x)
|
class SeparableConv2d(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=1, stride=1, padding=0, dilation=1, bias=False):
super(SeparableConv2d, self).__init__()
self.conv1 = nn.Conv2d(in_channels, in_channels, kernel_size, stride, padding, dilation, groups=in_channels, bias=bias)
self.pointwise = nn.Conv2d(in_channels, out_channels, 1, 1, 0, 1, 1, bias=bias)
def forward(self, x):
x = self.conv1(x)
x = self.pointwise(x)
return x
|
class PixelBlock(nn.Module):
def __init__(self, in_filters, out_filters, reps, strides=1, start_with_relu=True, grow_first=True):
super(PixelBlock, self).__init__()
assert (strides == 1)
if ((out_filters != in_filters) or (strides != 1)):
self.skip = nn.Conv2d(in_filters, out_filters, 1, stride=strides, bias=False)
self.skipbn = nn.BatchNorm2d(out_filters)
else:
self.skip = None
rep = []
filters = in_filters
if grow_first:
rep.append(nn.ReLU(inplace=True))
rep.append(SeparableConv2d(in_filters, out_filters, 1, stride=1, padding=0, bias=False))
rep.append(nn.BatchNorm2d(out_filters))
filters = out_filters
for i in range((reps - 1)):
rep.append(nn.ReLU(inplace=True))
rep.append(SeparableConv2d(filters, filters, 1, stride=1, padding=0, bias=False))
rep.append(nn.BatchNorm2d(filters))
if (not grow_first):
rep.append(nn.ReLU(inplace=True))
rep.append(SeparableConv2d(in_filters, out_filters, 1, stride=1, padding=0, bias=False))
rep.append(nn.BatchNorm2d(out_filters))
if (not start_with_relu):
rep = rep[1:]
else:
rep[0] = nn.ReLU(inplace=False)
if (strides != 1):
pass
self.rep = nn.Sequential(*rep)
def forward(self, inp):
x = self.rep(inp)
if (self.skip is not None):
skip = self.skip(inp)
skip = self.skipbn(skip)
else:
skip = inp
x += skip
return x
|
class Block(nn.Module):
def __init__(self, in_filters, out_filters, reps, strides=1, start_with_relu=True, grow_first=True):
super(Block, self).__init__()
if ((out_filters != in_filters) or (strides != 1)):
self.skip = nn.Conv2d(in_filters, out_filters, 1, stride=strides, bias=False)
self.skipbn = nn.BatchNorm2d(out_filters)
else:
self.skip = None
rep = []
filters = in_filters
if grow_first:
rep.append(nn.ReLU(inplace=True))
rep.append(SeparableConv2d(in_filters, out_filters, 3, stride=1, padding=1, bias=False))
rep.append(nn.BatchNorm2d(out_filters))
filters = out_filters
for i in range((reps - 1)):
rep.append(nn.ReLU(inplace=True))
rep.append(SeparableConv2d(filters, filters, 3, stride=1, padding=1, bias=False))
rep.append(nn.BatchNorm2d(filters))
if (not grow_first):
rep.append(nn.ReLU(inplace=True))
rep.append(SeparableConv2d(in_filters, out_filters, 3, stride=1, padding=1, bias=False))
rep.append(nn.BatchNorm2d(out_filters))
if (not start_with_relu):
rep = rep[1:]
else:
rep[0] = nn.ReLU(inplace=False)
if (strides != 1):
rep.append(nn.MaxPool2d(3, strides, 1))
self.rep = nn.Sequential(*rep)
def forward(self, inp):
x = self.rep(inp)
if (self.skip is not None):
skip = self.skip(inp)
skip = self.skipbn(skip)
else:
skip = inp
x += skip
return x
|
class Xception(nn.Module):
'\n Xception optimized for the ImageNet dataset, as specified in\n https://arxiv.org/pdf/1610.02357.pdf\n '
def __init__(self, num_classes=1000):
' Constructor\n Args:\n num_classes: number of classes\n '
super(Xception, self).__init__()
self.num_classes = num_classes
self.conv1 = nn.Conv2d(3, 32, 3, 2, 0, bias=False)
self.bn1 = nn.BatchNorm2d(32)
self.relu1 = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(32, 64, 3, bias=False)
self.bn2 = nn.BatchNorm2d(64)
self.relu2 = nn.ReLU(inplace=True)
self.block1 = Block(64, 128, 2, 2, start_with_relu=False, grow_first=True)
self.block2 = Block(128, 256, 2, 2, start_with_relu=True, grow_first=True)
self.block3 = Block(256, 728, 2, 2, start_with_relu=True, grow_first=True)
self.block4 = Block(728, 728, 3, 1, start_with_relu=True, grow_first=True)
self.block5 = Block(728, 728, 3, 1, start_with_relu=True, grow_first=True)
self.block6 = Block(728, 728, 3, 1, start_with_relu=True, grow_first=True)
self.block7 = Block(728, 728, 3, 1, start_with_relu=True, grow_first=True)
self.block8 = Block(728, 728, 3, 1, start_with_relu=True, grow_first=True)
self.block9 = Block(728, 728, 3, 1, start_with_relu=True, grow_first=True)
self.block10 = Block(728, 728, 3, 1, start_with_relu=True, grow_first=True)
self.block11 = Block(728, 728, 3, 1, start_with_relu=True, grow_first=True)
self.block12 = Block(728, 1024, 2, 2, start_with_relu=True, grow_first=False)
self.conv3 = SeparableConv2d(1024, 1536, 3, 1, 1)
self.bn3 = nn.BatchNorm2d(1536)
self.relu3 = nn.ReLU(inplace=True)
self.conv4 = SeparableConv2d(1536, 2048, 3, 1, 1)
self.bn4 = nn.BatchNorm2d(2048)
self.fc = nn.Linear(2048, num_classes)
def features(self, input):
x = self.conv1(input)
x = self.bn1(x)
x = self.relu1(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.relu2(x)
x = self.block1(x)
x = self.block2(x)
x = self.block3(x)
x = self.block4(x)
x = self.block5(x)
x = self.block6(x)
x = self.block7(x)
x = self.block8(x)
x = self.block9(x)
x = self.block10(x)
x = self.block11(x)
x = self.block12(x)
x = self.conv3(x)
x = self.bn3(x)
x = self.relu3(x)
x = self.conv4(x)
x = self.bn4(x)
return x
def logits(self, features):
x = nn.ReLU(inplace=True)(features)
x = F.adaptive_avg_pool2d(x, (1, 1))
x = x.view(x.size(0), (- 1))
x = self.last_linear(x)
return x
def forward(self, input):
x = self.features(input)
x = self.logits(x)
return x
|
def xception(num_classes=1000, pretrained='imagenet'):
model = Xception(num_classes=num_classes)
if pretrained:
settings = pretrained_settings['xception'][pretrained]
model = Xception(num_classes=num_classes)
pretrained_state = model_zoo.load_url(settings['url'])
model_state = model.state_dict()
pretrained_state = {k: v for (k, v) in pretrained_state.items() if ((k in model_state) and (v.size() == model_state[k].size()))}
print(list(pretrained_state.keys()))
model_state.update(pretrained_state)
model.load_state_dict(model_state)
model.input_space = settings['input_space']
model.input_size = settings['input_size']
model.input_range = settings['input_range']
model.mean = settings['mean']
model.std = settings['std']
model.last_linear = model.fc
del model.fc
return model
|
class BaseOptions(options.Options):
def __init__(self, print_opt=True):
options.Options.__init__(self)
self.isTrain = False
self.print_opt = print_opt
parser = self.parser
parser.add_argument('--model', type=str, default='basic_discriminator', help='chooses which model to use')
parser.add_argument('--which_model_netD', type=str, default='resnet18', help='selects model to use for netD')
parser.add_argument('--fake_class_id', type=int, default=0, help='class id of fake ims')
parser.add_argument('--gpu_ids', type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU')
parser.add_argument('--which_epoch', type=str, default='latest', help='which epoch to load? set to latest to use latest cached model')
parser.add_argument('--load_model', action='store_true', help='load the latest model')
parser.add_argument('--seed', type=int, default=0, help='torch.manual_seed value')
parser.add_argument('--init_type', type=str, default='xavier', help='network initialization [normal|xavier|kaiming|orthogonal]')
parser.add_argument('--lbda', type=int, default=10, help='lambda value for Patch-consistency learning')
parser.add_argument('--loadSize', type=int, default=256, help='scale images to this size')
parser.add_argument('--fineSize', type=int, default=256, help='then crop to this size')
parser.add_argument('--nThreads', default=4, type=int, help='# threads for loading data')
parser.add_argument('--batch_size', type=int, default=32, help='input batch size')
parser.add_argument('--real_im_path', type=str, help='path to real images')
parser.add_argument('--fake_im_path', type=str, help='path to fake images')
parser.add_argument('--max_dataset_size', type=int, default=float('inf'), help='Maximum number of samples to use in dataset')
parser.add_argument('--name', type=str, default='', help='name of the experiment. it decides where to store samples and models')
parser.add_argument('--suffix', default='', type=str, help='customized suffix: opt.name = opt.name + suffix: e.g., {model}_{which_model_netG}_size{loadSize}')
parser.add_argument('--prefix', default='', type=str, help='customized prefix: opt.name = prefix + opt.name: e.g., {model}_{which_model_netG}_size{loadSize}')
parser.add_argument('--checkpoints_dir', type=str, default='./checkpoints', help='models are saved here')
parser.add_argument('--results_dir', type=str, default='./results/', help='saves results here.')
def parse(self):
opt = options.Options.parse(self, print_opt=False)
model_name = opt.model
model_option_setter = models.get_option_setter(model_name)
self.parser = model_option_setter(self.parser)
opt = options.Options.parse(self, print_opt=False)
opt.isTrain = self.isTrain
if (opt.name == ''):
opt.name = '{model}_{which_model_netD}_size{fineSize}'.format(**vars(opt))
else:
opt.name = opt.name.format(**vars(opt))
if opt.suffix:
suffix = (('_' + opt.suffix.format(**vars(opt))) if (opt.suffix != '') else '')
opt.name = (opt.name + suffix)
opt.suffix = ''
if opt.prefix:
prefix = (opt.prefix.format(**vars(opt)) if (opt.prefix != '') else '')
prefix += '-'
opt.name = (prefix + opt.name)
opt.prefix = ''
if self.print_opt:
self.print_options(opt)
str_ids = opt.gpu_ids
if isinstance(opt.gpu_ids, str):
str_ids = opt.gpu_ids.split(',')
opt.gpu_ids = []
for str_id in str_ids:
id = int(str_id)
if (id >= 0):
opt.gpu_ids.append(id)
if ((len(opt.gpu_ids) > 0) and torch.cuda.is_available()):
torch.cuda.set_device(opt.gpu_ids[0])
if ((not hasattr(opt, 'dataset_name')) or (opt.dataset_name != 'openmfc')):
if (not (opt.model == 'patch_inconsistency_discriminator')):
assert (opt.real_im_path and opt.fake_im_path)
return opt
|
class TestOptions(BaseOptions):
def __init__(self):
BaseOptions.__init__(self, print_opt=False)
parser = self.parser
parser.add_argument('--train_config', type=argparse.FileType(mode='r'), required=True, help='config file saved from model training')
parser.add_argument('--partition', type=str, default='val', help='val or test')
parser.add_argument('--dataset_name', type=str, required=True, help='name to describe test dataset when saving results, e.g. celebahq_pgan')
parser.add_argument('--force_redo', action='store_true', help='force recompute results')
parser.add_argument('--test_compression', type=int, help='jpeg compression level')
parser.add_argument('--test_gamma', type=int, help='gamma adjustment level')
parser.add_argument('--test_blur', type=int, help='blur level')
parser.add_argument('--test_flip', action='store_true', help='flip all test images')
parser.add_argument('--visualize', action='store_true', help='save visualizations when running test')
parser.add_argument('--average_mode', help='which kind of patch averaging to use for visualizations [vote, before_softmax, after_softmax]')
parser.add_argument('--topn', type=int, default=100, help='visualize top n')
def parse(self):
opt = super().parse()
train_conf = yaml.load(opt.train_config, Loader=yaml.FullLoader)
option_strings = {}
for action_group in self.parser._action_groups:
for action in action_group._group_actions:
for option in action.option_strings:
option_strings[option] = action.dest
specified_options = set([option_strings[x] for x in sys.argv if (x in option_strings)])
options_from_train = []
for (k, v) in train_conf.items():
if (k in ['real_im_path', 'fake_im_path', 'gpu_ids']):
continue
if (getattr(opt, k, None) is None):
continue
if (k not in specified_options):
setattr(opt, k, v)
options_from_train.append((k, v))
print('Using the following options from the train configuration file:')
print(options_from_train)
if opt.real_im_path:
assert (opt.partition in opt.real_im_path)
opt.real_im_path = opt.real_im_path.rstrip('/')
if opt.fake_im_path:
assert (opt.partition in opt.fake_im_path)
opt.fake_im_path = opt.fake_im_path.rstrip('/')
opt.load_model = True
opt.model_seed = 0
opt.isTrain = False
return opt
|
class TrainOptions(BaseOptions):
def __init__(self, print_opt=True):
BaseOptions.__init__(self, print_opt)
parser = self.parser
parser.add_argument('--display_freq', type=int, default=1000, help='frequency of showing training results visualization')
parser.add_argument('--print_freq', type=int, default=100, help='frequency of showing training results on console')
parser.add_argument('--save_latest_freq', type=int, default=1000, help='frequency of saving the latest results')
parser.add_argument('--save_epoch_freq', type=int, default=100, help='frequency of saving checkpoints at the end of epochs')
parser.add_argument('--beta1', type=float, default=0.9, help='momentum term of adam')
parser.add_argument('--lr', type=float, default=0.001, help='initial learning rate for adam')
parser.add_argument('--lr_policy', default='constant', help='lr schedule [constant|plateau]')
parser.add_argument('--patience', type=int, default=10, help='will stop training if val metric does not improve for this many epochs')
parser.add_argument('--max_epochs', type=int, help='maximum epochs to train, if not specified, will stop based on patience, or whichever is sooner')
self.isTrain = True
|
def train(opt):
torch.manual_seed(opt.seed)
if (opt.model == 'patch_inconsistency_discriminator'):
WITH_MASK = True
else:
WITH_MASK = False
if (not WITH_MASK):
dset = PairedDataset(opt, os.path.join(opt.real_im_path, 'train'), os.path.join(opt.fake_im_path, 'train'), with_mask=WITH_MASK)
else:
dset = PairedDataset(opt, os.path.join(opt.real_im_path), os.path.join(opt.fake_im_path), with_mask=WITH_MASK)
dl = DataLoader(dset, batch_size=(opt.batch_size // 2), num_workers=opt.nThreads, pin_memory=False, shuffle=True)
assert (opt.fake_class_id in [0, 1])
fake_label = opt.fake_class_id
real_label = (1 - fake_label)
logging.info(('real label = %d' % real_label))
logging.info(('fake label = %d' % fake_label))
dataset_size = len(dset)
logging.info(('# total images = %d' % dataset_size))
logging.info(('# total batches = %d' % len(dl)))
model = create_model(opt)
(epoch, best_val_metric, best_val_ep) = model.setup(opt)
visualizer_losses = (model.loss_names + [(n + '_val') for n in model.loss_names])
visualizer = Visualizer(opt, visualizer_losses, model.visual_names)
total_batches = (epoch * len(dl))
t_data = 0
now = time.strftime('%c')
logging.info(('================ Training Loss (%s) ================\n' % now))
while True:
epoch_start_time = time.time()
iter_data_time = time.time()
epoch_iter = 0
for (i, ims) in enumerate(dl):
ims_real = ims['original'].to(opt.gpu_ids[0])
ims_fake = ims['manipulated'].to(opt.gpu_ids[0])
labels_real = (real_label * torch.ones(ims_real.shape[0], dtype=torch.long).to(opt.gpu_ids[0]))
labels_fake = (fake_label * torch.ones(ims_fake.shape[0], dtype=torch.long).to(opt.gpu_ids[0]))
if (not WITH_MASK):
inputs = dict(ims=torch.cat((ims_real, ims_fake), axis=0), labels=torch.cat((labels_real, labels_fake), axis=0))
else:
masks_real = ims['mask_original'].to(opt.gpu_ids[0])
masks_fake = ims['mask_manipulated'].to(opt.gpu_ids[0])
inputs = dict(ims=torch.cat((ims_real, ims_fake), axis=0), masks=torch.cat((masks_real, masks_fake), axis=0), labels=torch.cat((labels_real, labels_fake), axis=0))
batch_data = dict(inputs)
iter_start_time = time.time()
if ((total_batches % opt.print_freq) == 0):
t_data = (iter_start_time - iter_data_time)
total_batches += 1
epoch_iter += 1
model.reset()
model.set_input(batch_data)
model.optimize_parameters()
if ((epoch_iter % opt.print_freq) == 0):
losses = model.get_current_losses()
t = (time.time() - iter_start_time)
visualizer.print_current_losses(epoch, (float(epoch_iter) / len(dl)), total_batches, losses, t, t_data)
visualizer.plot_current_losses(total_batches, losses)
if ((epoch_iter % opt.save_latest_freq) == 0):
logging.info(('saving the latest model (epoch %d, total_batches %d)' % (epoch, total_batches)))
model.save_networks('latest', epoch, best_val_metric, best_val_ep)
model.reset()
iter_data_time = time.time()
model.eval()
val_start_time = time.time()
val_losses = validate(model, opt)
visualizer.plot_current_losses(epoch, val_losses)
logging.info('Printing validation losses:')
visualizer.print_current_losses(epoch, 0.0, total_batches, val_losses, (time.time() - val_start_time), 0.0)
model.train()
model.reset()
assert model.net_D.training
if (val_losses[(model.val_metric + '_val')] > best_val_metric):
logging.info(('Updating best val mode at ep %d' % epoch))
logging.info(('The previous values: ep %d, val %0.2f' % (best_val_ep, best_val_metric)))
best_val_ep = epoch
best_val_metric = val_losses[(model.val_metric + '_val')]
logging.info(('The updated values: ep %d, val %0.2f' % (best_val_ep, best_val_metric)))
model.save_networks('bestval', epoch, best_val_metric, best_val_ep)
with open(os.path.join(model.save_dir, 'bestval_ep.txt'), 'a') as f:
f.write(('ep: %d %s: %f\n' % (epoch, (model.val_metric + '_val'), best_val_metric)))
elif (epoch > (best_val_ep + (5 * opt.patience))):
logging.info(('Current epoch %d, last updated val at ep %d' % (epoch, best_val_ep)))
logging.info('Stopping training...')
break
elif (best_val_metric == 1):
logging.info('Reached perfect val accuracy metric')
logging.info('Stopping training...')
break
elif (opt.max_epochs and (epoch > opt.max_epochs)):
logging.info('Reached max epoch count')
logging.info('Stopping training...')
break
logging.info(('Best val ep: %d' % best_val_ep))
logging.info(('Best val metric: %0.2f' % best_val_metric))
visualizer.save_final_plots()
if (((epoch % opt.save_epoch_freq) == 0) and (epoch > 0)):
logging.info(('saving the model at the end of epoch %d, total batches %d' % (epoch, total_batches)))
model.save_networks('latest', epoch, best_val_metric, best_val_ep)
model.save_networks(epoch, epoch, best_val_metric, best_val_ep)
logging.info(('End of epoch %d \t Time Taken: %d sec' % (epoch, (time.time() - epoch_start_time))))
model.update_learning_rate(metric=val_losses[(model.val_metric + '_val')])
epoch += 1
visualizer.save_final_plots()
model.save_networks('latest', epoch, best_val_metric, best_val_ep)
model.save_networks(epoch, epoch, best_val_metric, best_val_ep)
logging.info('Finished Training')
|
def validate(model, opt):
logging.info('Starting evaluation loop ...')
model.reset()
assert (not model.net_D.training)
if (opt.model == 'patch_inconsistency_discriminator'):
WITH_MASK = True
else:
WITH_MASK = False
if (not WITH_MASK):
val_dset = PairedDataset(opt, os.path.join(opt.real_im_path, 'val'), os.path.join(opt.fake_im_path, 'val'), with_mask=WITH_MASK)
else:
val_dset = PairedDataset(opt, os.path.join(opt.real_im_path), os.path.join(opt.fake_im_path), with_mask=WITH_MASK)
val_dl = DataLoader(val_dset, batch_size=opt.batch_size, num_workers=opt.nThreads, pin_memory=False, shuffle=True)
val_losses = OrderedDict([((k + '_val'), util.AverageMeter()) for k in model.loss_names])
fake_label = opt.fake_class_id
real_label = (1 - fake_label)
val_start_time = time.time()
for (i, ims) in enumerate(val_dl):
ims_real = ims['original'].to(opt.gpu_ids[0])
ims_fake = ims['manipulated'].to(opt.gpu_ids[0])
labels_real = (real_label * torch.ones(ims_real.shape[0], dtype=torch.long).to(opt.gpu_ids[0]))
labels_fake = (fake_label * torch.ones(ims_fake.shape[0], dtype=torch.long).to(opt.gpu_ids[0]))
if WITH_MASK:
masks_real = ims['mask_original'].to(opt.gpu_ids[0])
masks_fake = ims['mask_manipulated'].to(opt.gpu_ids[0])
inputs = dict(ims=torch.cat((ims_real, ims_fake), axis=0), masks=torch.cat((masks_real, masks_fake), axis=0), labels=torch.cat((labels_real, labels_fake), axis=0))
else:
inputs = dict(ims=torch.cat((ims_real, ims_fake), axis=0), labels=torch.cat((labels_real, labels_fake), axis=0))
model.reset()
model.set_input(inputs)
model.test(True)
losses = model.get_current_losses()
for (k, v) in losses.items():
val_losses[(k + '_val')].update(v, n=len(inputs['labels']))
for (k, v) in val_losses.items():
val_losses[k] = v.avg
return val_losses
|
def train(opt):
torch.manual_seed(opt.seed)
dset = I2GDataset(opt, os.path.join(opt.real_im_path, 'train'))
dset.get32frames()
dl = DataLoader(dset, batch_size=opt.batch_size, num_workers=opt.nThreads, pin_memory=False, shuffle=True)
assert (opt.fake_class_id in [0, 1])
fake_label = opt.fake_class_id
real_label = (1 - fake_label)
logging.info(('real label = %d' % real_label))
logging.info(('fake label = %d' % fake_label))
dataset_size = len(dset)
logging.info(('# total images = %d' % dataset_size))
logging.info(('# total batches = %d' % len(dl)))
model = create_model(opt)
(epoch, best_val_metric, best_val_ep) = model.setup(opt)
visualizer_losses = (model.loss_names + [(n + '_val') for n in model.loss_names])
visualizer = Visualizer(opt, visualizer_losses, model.visual_names)
total_batches = (epoch * len(dl))
t_data = 0
now = time.strftime('%c')
logging.info(('================ Training Loss (%s) ================\n' % now))
while True:
epoch_start_time = time.time()
iter_data_time = time.time()
epoch_iter = 0
for (i, ims) in enumerate(dl):
images = ims['img'].to(opt.gpu_ids[0])
masks = ims['mask'].to(opt.gpu_ids[0])
labels = ims['label'].to(opt.gpu_ids[0])
batch_im = images
batch_mask = masks
batch_label = labels
batch_data = dict(ims=batch_im, masks=batch_mask, labels=batch_label)
iter_start_time = time.time()
if ((total_batches % opt.print_freq) == 0):
t_data = (iter_start_time - iter_data_time)
total_batches += 1
epoch_iter += 1
model.reset()
model.set_input(batch_data)
model.optimize_parameters()
if ((epoch_iter % opt.print_freq) == 0):
losses = model.get_current_losses()
t = (time.time() - iter_start_time)
visualizer.print_current_losses(epoch, (float(epoch_iter) / len(dl)), total_batches, losses, t, t_data)
visualizer.plot_current_losses(total_batches, losses)
if ((epoch_iter % opt.save_latest_freq) == 0):
logging.info(('saving the latest model (epoch %d, total_batches %d)' % (epoch, total_batches)))
model.save_networks('latest', epoch, best_val_metric, best_val_ep)
model.reset()
iter_data_time = time.time()
model.eval()
val_start_time = time.time()
val_losses = validate(model, opt)
visualizer.plot_current_losses(epoch, val_losses)
logging.info('Printing validation losses:')
visualizer.print_current_losses(epoch, 0.0, total_batches, val_losses, (time.time() - val_start_time), 0.0)
model.train()
model.reset()
assert model.net_D.training
if (val_losses[(model.val_metric + '_val')] > best_val_metric):
logging.info(('Updating best val mode at ep %d' % epoch))
logging.info(('The previous values: ep %d, val %0.2f' % (best_val_ep, best_val_metric)))
best_val_ep = epoch
best_val_metric = val_losses[(model.val_metric + '_val')]
logging.info(('The updated values: ep %d, val %0.2f' % (best_val_ep, best_val_metric)))
model.save_networks('bestval', epoch, best_val_metric, best_val_ep)
with open(os.path.join(model.save_dir, 'bestval_ep.txt'), 'a') as f:
f.write(('ep: %d %s: %f\n' % (epoch, (model.val_metric + '_val'), best_val_metric)))
elif (epoch > (best_val_ep + (5 * opt.patience))):
logging.info(('Current epoch %d, last updated val at ep %d' % (epoch, best_val_ep)))
logging.info('Stopping training...')
break
elif (best_val_metric == 1):
logging.info('Reached perfect val accuracy metric')
logging.info('Stopping training...')
break
elif (opt.max_epochs and (epoch > opt.max_epochs)):
logging.info('Reached max epoch count')
logging.info('Stopping training...')
break
logging.info(('Best val ep: %d' % best_val_ep))
logging.info(('Best val metric: %0.2f' % best_val_metric))
visualizer.save_final_plots()
if (((epoch % opt.save_epoch_freq) == 0) and (epoch > 0)):
logging.info(('saving the model at the end of epoch %d, total batches %d' % (epoch, total_batches)))
model.save_networks('latest', epoch, best_val_metric, best_val_ep)
model.save_networks(epoch, epoch, best_val_metric, best_val_ep)
logging.info(('End of epoch %d \t Time Taken: %d sec' % (epoch, (time.time() - epoch_start_time))))
model.update_learning_rate(metric=val_losses[(model.val_metric + '_val')])
epoch += 1
dset.get32frames()
dl = DataLoader(dset, batch_size=opt.batch_size, num_workers=opt.nThreads, pin_memory=False, shuffle=True)
visualizer.save_final_plots()
model.save_networks('latest', epoch, best_val_metric, best_val_ep)
model.save_networks(epoch, epoch, best_val_metric, best_val_ep)
logging.info('Finished Training')
|
def validate(model, opt):
logging.info('Starting evaluation loop ...')
model.reset()
assert (not model.net_D.training)
val_dset = I2GDataset(opt, os.path.join(opt.real_im_path, 'val'), is_val=True)
val_dset.get32frames()
val_dl = DataLoader(val_dset, batch_size=opt.batch_size, num_workers=opt.nThreads, pin_memory=False, shuffle=True)
val_losses = OrderedDict([((k + '_val'), util.AverageMeter()) for k in model.loss_names])
fake_label = opt.fake_class_id
real_label = (1 - fake_label)
val_start_time = time.time()
for (i, ims) in enumerate(val_dl):
images = ims['img'].to(opt.gpu_ids[0])
masks = ims['mask'].to(opt.gpu_ids[0])
labels = ims['label'].to(opt.gpu_ids[0])
inputs = dict(ims=images, masks=masks, labels=labels)
model.reset()
model.set_input(inputs)
model.test(True)
losses = model.get_current_losses()
for (k, v) in losses.items():
val_losses[(k + '_val')].update(v, n=len(inputs['labels']))
for (k, v) in val_losses.items():
val_losses[k] = v.avg
return val_losses
|
class TqdmLoggingHandler(logging.Handler):
def __init__(self, level=logging.NOTSET):
super(self.__class__, self).__init__(level)
def emit(self, record):
try:
msg = self.format(record)
tqdm.tqdm.write(msg)
self.flush()
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record)
|
class MultiLineFormatter(logging.Formatter):
def __init__(self, fmt=None, datefmt=None, style='%'):
assert (style == '%')
super(MultiLineFormatter, self).__init__(fmt, datefmt, style)
self.multiline_fmt = fmt
def format(self, record):
"\n This is mostly the same as logging.Formatter.format except for the splitlines() thing.\n This is done so (copied the code) to not make logging a bottleneck. It's not lots of code\n after all, and it's pretty straightforward.\n "
record.message = record.getMessage()
if self.usesTime():
record.asctime = self.formatTime(record, self.datefmt)
if ('\n' in record.message):
splitted = record.message.splitlines()
output = (self._fmt % dict(record.__dict__, message=splitted.pop(0)))
output += (' \n' + '\n'.join(((self.multiline_fmt % dict(record.__dict__, message=line)) for line in splitted)))
else:
output = (self._fmt % record.__dict__)
if record.exc_info:
if (not record.exc_text):
record.exc_text = self.formatException(record.exc_info)
if record.exc_text:
output += ' \n'
try:
output += '\n'.join(((self.multiline_fmt % dict(record.__dict__, message=line)) for (index, line) in enumerate(record.exc_text.splitlines())))
except UnicodeError:
output += '\n'.join(((self.multiline_fmt % dict(record.__dict__, message=line)) for (index, line) in enumerate(record.exc_text.decode(sys.getfilesystemencoding(), 'replace').splitlines())))
return output
|
def handle_exception(exc_type, exc_value, exc_traceback):
if issubclass(exc_type, KeyboardInterrupt):
sys.__excepthook__(exc_type, exc_value, exc_traceback)
return
logging.error('Uncaught exception', exc_info=(exc_type, exc_value, exc_traceback))
|
def configure(logging_file, log_level=logging.INFO, level_prefix='', prefix='', write_to_stdout=True, append=True):
logging.getLogger().setLevel(logging.INFO)
sys.excepthook = handle_exception
handlers = []
if write_to_stdout:
handlers.append(TqdmLoggingHandler())
delayed_logging = []
if (logging_file is not None):
delayed_logging.append((logging.info, 'Logging to {}'.format(logging_file)))
if append:
if os.path.isfile(logging_file):
delayed_logging.append((logging.warning, 'Log file already exists, will append'))
handlers.append(logging.FileHandler(logging_file))
else:
delayed_logging.append((logging.warning, 'Creating {} with mode write'.format(logging_file)))
handlers.append(logging.FileHandler(logging_file, mode='w'))
formatter = MultiLineFormatter('{}%(asctime)s [{}%(levelname)-5s] %(message)s'.format(prefix, level_prefix), '%Y-%m-%d %H:%M:%S')
logger = logging.getLogger()
logger.handlers = []
for h in handlers:
h.setFormatter(formatter)
logger.addHandler(h)
logger.setLevel(log_level)
for (fn, msg) in delayed_logging:
fn(msg)
return logger
|
@contextlib.contextmanager
def disable(level):
prev_level = logging.getLogger().getEffectiveLevel()
logging.disable(level)
(yield)
logging.disable(prev_level)
|
class Options():
def __init__(self):
self.parser = parser = argparse.ArgumentParser()
self.parser.add_argument('config_file', nargs='?', type=argparse.FileType(mode='r'))
self.parser.add_argument('--overwrite_config', action='store_true', help='overwrite config files if they exist')
def print_options(self, opt):
opt_dict = OrderedDict()
message = ''
message += '----------------- Options ---------------\n'
for (k, v) in sorted(vars(opt).items()):
if (type(v) == argparse.Namespace):
grouped_k.append((k, v))
continue
comment = ''
default = self.parser.get_default(k)
if (v != default):
comment = ('\t[default: %s]' % str(default))
message += '{:>25}: {:<30}{}\n'.format(str(k), str(v), comment)
opt_dict[k] = v
message += '----------------- End -------------------'
print(message)
if (hasattr(opt, 'checkpoints_dir') and hasattr(opt, 'name')):
if (opt.name != ''):
expr_dir = os.path.join(opt.checkpoints_dir, opt.name)
else:
expr_dir = os.path.join(opt.checkpoints_dir)
else:
expr_dir = './'
os.makedirs(expr_dir, exist_ok=True)
file_name = os.path.join(expr_dir, 'opt.txt')
if (not opt.overwrite_config):
assert (not os.path.isfile(file_name)), 'config file exists, use --overwrite_config'
with open(file_name, 'wt') as opt_file:
opt_file.write(message)
opt_file.write('\n')
file_name = os.path.join(expr_dir, 'opt.yml')
if (not opt.overwrite_config):
assert (not os.path.isfile(file_name)), 'config file exists, use --overwrite_config'
with open(file_name, 'wt') as opt_file:
opt_dict['overwrite_config'] = False
yaml.dump(opt_dict, opt_file, default_flow_style=False)
def parse(self, print_opt=True):
opt = self.parser.parse_args()
if opt.config_file:
data = yaml.load(opt.config_file)
else:
data = {}
option_strings = {}
for action_group in self.parser._action_groups:
for action in action_group._group_actions:
for option in action.option_strings:
option_strings[option] = action.dest
specified_options = set([option_strings[x] for x in sys.argv if (x in option_strings)])
args = {}
for group in self.parser._action_groups:
assert (group.title in ['positional arguments', 'optional arguments'])
group_dict = {a.dest: (data[a.dest] if ((a.dest in data) and (a.dest not in specified_options)) else getattr(opt, a.dest, None)) for a in group._group_actions}
args.update(group_dict)
opt = argparse.Namespace(**args)
delattr(opt, 'config_file')
if print_opt:
self.print_options(opt)
self.opt = opt
return opt
|
def verbose(verbose):
'\n Sets default verbosity level. Set to True to see progress bars.\n '
global default_verbosity
default_verbosity = verbose
|
def post(**kwargs):
'\n When within a progress loop, pbar.post(k=str) will display\n the given k=str status on the right-hand-side of the progress\n status bar. If not within a visible progress bar, does nothing.\n '
innermost = innermost_tqdm()
if innermost:
innermost.set_postfix(**kwargs)
|
def desc(desc):
'\n When within a progress loop, pbar.desc(str) changes the\n left-hand-side description of the loop toe the given description.\n '
innermost = innermost_tqdm()
if innermost:
innermost.set_description(str(desc))
|
def descnext(desc):
'\n Called before starting a progress loop, pbar.descnext(str)\n sets the description text that will be used in the following loop.\n '
global next_description
if ((not default_verbosity) or (tqdm is None)):
return
next_description = desc
|
def print(*args):
'\n When within a progress loop, will print above the progress loop.\n '
global next_description
next_description = None
if default_verbosity:
msg = ' '.join((str(s) for s in args))
if (tqdm is None):
print(msg)
else:
tqdm.write(msg)
|
def tqdm_terminal(it, *args, **kwargs):
'\n Some settings for tqdm that make it run better in resizable terminals.\n '
return tqdm(it, *args, dynamic_ncols=True, ascii=True, leave=(not innermost_tqdm()), **kwargs)
|
def in_notebook():
'\n True if running inside a Jupyter notebook.\n '
try:
shell = get_ipython().__class__.__name__
if (shell == 'ZMQInteractiveShell'):
return True
elif (shell == 'TerminalInteractiveShell'):
return False
else:
return False
except NameError:
return False
|
def innermost_tqdm():
'\n Returns the innermost active tqdm progress loop on the stack.\n '
if (hasattr(tqdm, '_instances') and (len(tqdm._instances) > 0)):
return max(tqdm._instances, key=(lambda x: x.pos))
else:
return None
|
def __call__(x, *args, **kwargs):
'\n Invokes a progress function that can wrap iterators to print\n progress messages, if verbose is True.\n \n If verbose is False or tqdm is unavailable, then a quiet\n non-printing identity function is used.\n\n verbose can also be set to a spefific progress function rather\n than True, and that function will be used.\n '
global default_verbosity, next_description
if ((not default_verbosity) or (tqdm is None)):
return x
if (default_verbosity == True):
fn = (tqdm_notebook if in_notebook() else tqdm_terminal)
else:
fn = default_verbosity
if (next_description is not None):
kwargs = dict(kwargs)
kwargs['desc'] = next_description
next_description = None
return fn(x, *args, **kwargs)
|
class CallableModule(types.ModuleType):
def __init__(self):
types.ModuleType.__init__(self, __name__)
self.__dict__.update(sys.modules[__name__].__dict__)
def __call__(self, x, *args, **kwargs):
return __call__(x, *args, **kwargs)
|
def exit_if_job_done(directory, redo=False, force=False, verbose=True):
if pidfile_taken(os.path.join(directory, 'lockfile.pid'), force=force, verbose=verbose):
sys.exit(0)
donefile = os.path.join(directory, 'done.txt')
if os.path.isfile(donefile):
with open(donefile) as f:
msg = f.read()
if (redo or force):
if verbose:
print(('Removing %s %s' % (donefile, msg)))
os.remove(donefile)
else:
if verbose:
print(('%s %s' % (donefile, msg)))
sys.exit(0)
|
def mark_job_done(directory):
with open(os.path.join(directory, 'done.txt'), 'w') as f:
f.write(('done by %d@%s %s at %s' % (os.getpid(), socket.gethostname(), os.getenv('STY', ''), time.strftime('%c'))))
|
def pidfile_taken(path, verbose=False, force=False):
"\n Usage. To grab an exclusive lock for the remaining duration of the\n current process (and exit if another process already has the lock),\n do this:\n\n if pidfile_taken('job_423/lockfile.pid', verbose=True):\n sys.exit(0)\n\n To do a batch of jobs, just run a script that does them all on\n each available machine, sharing a network filesystem. When each\n job grabs a lock, then this will automatically distribute the\n jobs so that each one is done just once on one machine.\n "
try:
os.makedirs(os.path.dirname(path), exist_ok=True)
fd = os.open(path, ((os.O_CREAT | os.O_EXCL) | os.O_RDWR))
except OSError as e:
if (e.errno == errno.EEXIST):
conflicter = 'race'
try:
with open(path, 'r') as lockfile:
conflicter = (lockfile.read().strip() or 'empty')
except:
pass
if force:
if verbose:
print(('Removing %s from %s' % (path, conflicter)))
os.remove(path)
return pidfile_taken(path, verbose=verbose, force=False)
if verbose:
print(('%s held by %s' % (path, conflicter)))
return conflicter
else:
raise
lockfile = os.fdopen(fd, 'r+')
atexit.register(delete_pidfile, lockfile, path)
lockfile.write(('%d@%s %s\n' % (os.getpid(), socket.gethostname(), os.getenv('STY', ''))))
lockfile.flush()
os.fsync(lockfile)
return None
|
def delete_pidfile(lockfile, path):
'\n Runs at exit after pidfile_taken succeeds.\n '
if (lockfile is not None):
try:
lockfile.close()
except:
pass
try:
os.unlink(path)
except:
pass
|
def blocks(obj, space=''):
return IPython.display.HTML(space.join(blocks_tags(obj)))
|
def rows(obj, space=''):
return IPython.display.HTML(space.join(rows_tags(obj)))
|
def rows_tags(obj):
if isinstance(obj, dict):
obj = obj.items()
results = []
results.append('<table style="display:inline-table">')
for row in obj:
results.append('<tr style="padding:0">')
for item in row:
results.append(('<td style="text-align:left; vertical-align:top;' + 'padding:1px">'))
results.extend(blocks_tags(item))
results.append('</td>')
results.append('</tr>')
results.append('</table>')
return results
|
def blocks_tags(obj):
results = []
if isinstance(obj, PIL.Image.Image):
results.append(pil_to_html(obj))
elif isinstance(obj, (str, int, float)):
results.append('<div>')
results.append(html_module.escape(str(obj)))
results.append('</div>')
elif isinstance(obj, IPython.display.HTML):
results.append(obj.data)
elif isinstance(obj, dict):
results.extend(blocks_tags([(k, v) for (k, v) in obj.items()]))
elif hasattr(obj, '__iter__'):
(blockstart, blockend, tstart, tend, rstart, rend, cstart, cend) = [('<div style="display:inline-block;text-align:center;line-height:1;' + 'vertical-align:top;padding:1px">'), '</div>', '<table style="display:inline-table">', '</table>', '<tr style="padding:0">', '</tr>', '<td style="text-align:left; vertical-align:top; padding:1px">', '</td>']
needs_end = False
table_mode = False
for (i, line) in enumerate(obj):
if (i == 0):
needs_end = True
if isinstance(line, tuple):
table_mode = True
results.append(tstart)
else:
results.append(blockstart)
if table_mode:
results.append(rstart)
if ((not isinstance(line, str)) and hasattr(line, '__iter__')):
for cell in line:
results.append(cstart)
results.extend(blocks_tags(cell))
results.append(cend)
else:
results.append(cstart)
results.extend(blocks_tags(line))
results.append(cend)
results.append(rend)
else:
results.extend(blocks_tags(line))
if needs_end:
results.append(((table_mode and tend) or blockend))
return results
|
def pil_to_b64(img, format='png'):
buffered = io.BytesIO()
img.save(buffered, format=format)
return base64.b64encode(buffered.getvalue()).decode('utf-8')
|
def pil_to_url(img, format='png'):
return ('data:image/%s;base64,%s' % (format, pil_to_b64(img, format)))
|
def pil_to_html(img, margin=1):
mattr = (' style="margin:%dpx"' % margin)
return ('<img src="%s"%s>' % (pil_to_url(img), mattr))
|
def a(x, cols=None):
global g_buffer
if (g_buffer is None):
g_buffer = []
g_buffer.append(x)
if ((cols is not None) and (len(g_buffer) >= cols)):
flush()
|
def reset():
global g_buffer
g_buffer = []
|
def flush(*args, **kwargs):
global g_buffer
if (g_buffer is not None):
x = g_buffer
g_buffer = None
display(blocks(x, *args, **kwargs))
|
def show(x=None, *args, **kwargs):
flush(*args, **kwargs)
if (x is not None):
display(blocks(x, *args, **kwargs))
|
class CallableModule(types.ModuleType):
def __init__(self):
types.ModuleType.__init__(self, __name__)
self.__dict__.update(sys.modules[__name__].__dict__)
def __call__(self, x=None, *args, **kwargs):
show(x, *args, **kwargs)
|
class LinePlotter(object):
def __init__(self, writer, tag):
self.writer = writer
self.tag = tag
def plot(self, x, data, walltime=None):
if (not hasattr(self, 'plot_data')):
self.plot_data = {'X': [], 'Y': []}
self.plot_data['X'].append(x)
self.plot_data['Y'].append(data)
self.writer.add_scalar(self.tag, data, x, walltime)
def save_final_plot(self, save_dir):
save_path = os.path.join(save_dir, '{}'.format(self.tag.replace('/', '_')))
os.makedirs(save_path, exist_ok=True)
if hasattr(self, 'plot_data'):
save_data = dict(X=np.array(self.plot_data['X']), Y=np.array(self.plot_data['Y']))
np.savez((save_path + '.npz'), **save_data)
logging.info('Saved to {}'.format(save_path))
|
class ImageGridPlotter(object):
def __init__(self, writer, ncols, grid=False):
self.ncols = ncols
self.writer = writer
self.grid = grid
def plot(self, visuals, niter=0):
ncols = self.ncols
ncols = min(ncols, len(visuals))
if self.grid:
images = []
labels = '|'
idx = 0
for (label, im) in visuals.items():
images.append(im[0])
labels += (label + '|')
idx += 1
if (((idx % ncols) == 0) and (idx > 0)):
labels += '||'
blank_image = torch.ones_like(images[0])
while ((idx % ncols) != 0):
images.append(blank_image)
idx += 1
labels += ' |'
self.writer.add_text('Visuals Labels', labels, niter)
x = vutils.make_grid(images, normalize=True, nrow=ncols)
self.writer.add_image('Visuals', x, niter)
else:
for (label, im) in visuals.items():
x = vutils.make_grid([im[0]], normalize=True)
self.writer.add_image(label, x, niter)
|
def remove_prefix(s, prefix):
if s.startswith(prefix):
s = s[len(prefix):]
return s
|
def get_subset_dict(in_dict, keys):
if len(keys):
subset = OrderedDict()
for key in keys:
subset[key] = in_dict[key]
else:
subset = in_dict
return subset
|
def datestring():
return time.strftime('%Y-%m-%d %H:%M:%S')
|
def format_str_one(v, float_prec=6, int_pad=1):
if (isinstance(v, torch.Tensor) and (v.numel() == 1)):
v = v.item()
if isinstance(v, float):
return (('{:.' + str(float_prec)) + 'f}').format(v)
if (isinstance(v, int) and int_pad):
return (('{:0' + str(int_pad)) + 'd}').format(v)
return str(v)
|
def format_str(*args, format_opts={}, **kwargs):
ss = [format_str_one(arg, **format_opts) for arg in args]
for (k, v) in kwargs.items():
ss.append('{}: {}'.format(k, format_str_one(v, **format_opts)))
return '\t'.join(ss)
|
def complete_device(device):
if (not torch.cuda.is_available()):
return torch.device('cpu')
if (type(device) == str):
device = torch.device(device)
if ((device.type == 'cuda') and (device.index is None)):
return torch.device(device.type, torch.cuda.current_device())
return device
|
def check_timestamp(checkpoint_path, timestamp_path):
" returns True if checkpoint_path timestamp is different\n from timestamp path or timestamp_path doesn't exist"
if (not os.path.isfile(timestamp_path)):
print('No timestamp found')
return True
newtime = os.path.getmtime(checkpoint_path)
newtime = datetime.fromtimestamp(newtime).strftime('%Y-%m-%d %H:%M:%S')
with open(timestamp_path) as f:
oldtime = f.readlines()[0].strip()
if (oldtime != newtime):
print('Timestamp out of date')
return True
print('Timestamp is correct')
return False
|
def update_timestamp(checkpoint_path, timestamp_path):
' write the last modified date of checkpoint_path to the\n the file timestamp_path '
newtime = os.path.getmtime(checkpoint_path)
newtime = datetime.fromtimestamp(newtime).strftime('%Y-%m-%d %H:%M:%S')
with open(timestamp_path, 'w') as f:
f.write(('%s' % newtime))
|
class AverageMeter(object):
'Computes and stores the average and current value'
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += (val * n)
self.count += n
self.avg = (self.sum / self.count)
|
class Visualizer():
def __init__(self, opt, loss_names, visual_names=None):
from . import tensorboard_utils as tb_utils
self.name = opt.name
self.opt = opt
self.visual_names = visual_names
tb_path = os.path.join('runs', self.name)
if os.path.isdir(tb_path):
logging.info(('Found existing tensorboard history at %s' % tb_path))
if (not opt.overwrite_config):
logging.info('Use --overwrite_config to write to existing tensorboard history')
exit(0)
self.writer = SummaryWriter(logdir=tb_path)
self.plotters = []
for name in loss_names:
setattr(self, (name + '_plotter'), tb_utils.LinePlotter(self.writer, name.replace('_', '/', 1)))
self.plotters.append(getattr(self, (name + '_plotter')))
self.imgrid = tb_utils.ImageGridPlotter(self.writer, ncols=5, grid=True)
def display_current_results(self, visuals, epoch):
self.imgrid.plot(visuals, epoch)
def plot_current_losses(self, niter, losses):
for (k, v) in losses.items():
plotter = getattr(self, (k + '_plotter'))
plotter.plot(niter, v)
def print_current_losses(self, epoch, iters, total_steps, losses, t, t_data, prefix=''):
message = ('(epoch: %d, iters: %.3f, time: %.3f, data: %.3f) ' % (epoch, iters, t, t_data))
message += prefix
message += ' '
for (k, v) in losses.items():
message += ('%s: %.3f, ' % (k, v))
logging.info(('%s' % message))
logging.info(('Total batches: %0.2f k\n' % (total_steps / 1000)))
def save_final_plots(self):
save_dir = os.path.join(self.opt.checkpoints_dir, self.opt.name, 'visualize')
for plotter in self.plotters:
plotter.save_final_plot(save_dir)
|
def init_matrix(data):
for i in range(len(data)):
data[i][0] = float('inf')
for i in range(len(data[0])):
data[0][i] = float('inf')
data[0][0] = 0
return data
|
def LpDist(time_pt_1, time_pt_2):
if ((type(time_pt_1) == int) and (type(time_pt_2) == int)):
return abs((time_pt_1 - time_pt_2))
else:
return sum(abs((time_pt_1 - time_pt_2)))
|
def TWED(t1, t2, lam, nu):
'"Requires: t1: multivariate time series in numpy matrix format. t2: multivariate time series in numpy matrix format. lam: penalty lambda parameter, nu: stiffness coefficient'
'Returns the TWED distance between the two time series. '
t1_data = t1
t2_data = t2
result = [([0] * len(t2_data)) for row in range(len(t1_data))]
result = init_matrix(result)
n = len(t1_data)
m = len(t2_data)
t1_time = range(1, (len(t1_data) + 1))
t2_time = range(1, (len(t2_data) + 1))
assert (len(t1_time) == n)
assert (len(t2_time) == m)
for i in range(1, n):
for j in range(1, m):
cost = LpDist(t1_data[i], t2_data[j])
insertion = ((result[(i - 1)][j] + LpDist(t1_data[(i - 1)], t1_data[i])) + (nu * ((t1_time[i] - t1_time[(i - 1)]) + lam)))
deletion = ((result[i][(j - 1)] + LpDist(t2_data[(j - 1)], t2_data[j])) + (nu * ((t2_time[j] - t2_time[(j - 1)]) + lam)))
match = ((((result[(i - 1)][(j - 1)] + LpDist(t1_data[i], t2_data[j])) + (nu * abs((t1_time[i] - t2_time[j])))) + LpDist(t1_time[(i - 1)], t2_time[(j - 1)])) + (nu * abs((t1_time[(i - 1)] - t2_time[(j - 1)]))))
result[i][j] = min(insertion, deletion, match)
return result[(n - 1)][(m - 1)]
|
class HyperParams():
def __init__(self):
pass
def get_uniwarp_config(self, argv):
config = {}
config['optimizer:num_epochs'] = 1000000
config['model:num_batch_pairs'] = 100
config['uniwarp:length'] = 1024
config['uniwarp:rnn_encoder_layers'] = [256, 128, 64]
config['uniwarp:warp_nn_layers'] = [64, 16, 1]
config['uniwarp:eta'] = 0.0001
config['uniwarp:max_grad_norm'] = 10.0
config['uniwarp:lambda'] = 0.0
config['uniwarp:cnn_encoder_layers'] = [1024, 256, 64]
config['uniwarp:cnn_kernel_lengths'] = [5, 5, 3]
config['uniwarp:cnn_strides'] = [2, 1, 1]
config['uniwarp:dropout_rate'] = 0.05
config['uniwarp:enable_batch_normalization'] = True
config['dataset:num_channels'] = 1
return config
def restore(file_path):
return json.loads(file_path)
|
class Inference_Experiments():
def __init__(self, model_type, model_file, dataset_path):
self.model_type = model_type
self.model_file = model_file
self.dataset_path = dataset_path
hp = HyperParams()
self.config = hp.get_uniwarp_config(None)
self.ds = Dataset()
self.ds.load_multivariate(dataset_path)
self.config['uniwarp:length'] = self.ds.series_length
self.config['dataset:num_channels'] = self.ds.num_channels
self.model = None
if (model_type == 'SiameseRNN'):
self.model = rnn_models.SiameseRNN(config=self.config)
elif (model_type == 'WarpedSiameseRNN'):
self.model = rnn_models.WarpedSiameseRNN(config=self.config)
elif (model_type == 'CNNSim'):
self.model = cnn_models.CNNSim(config=self.config)
elif (model_type == 'CNNWarpedSim'):
self.model = cnn_models.CNNWarpedSim(config=self.config)
else:
print('Test - No model of type', model_type)
self.model.create_model()
self.saver = tf.train.Saver()
self.X_batch = np.zeros(((2 * self.config['model:num_batch_pairs']), self.config['uniwarp:length'], self.config['dataset:num_channels']))
self.true_sim_batch = np.zeros((self.config['model:num_batch_pairs'],))
print('Model has', self.model.num_model_parameters(), 'parameters')
def infer_dataset(self, start_pct, chunk_pct):
start_range = int((start_pct * self.ds.num_test_instances))
stop_range = int(((start_pct + chunk_pct) * self.ds.num_test_instances))
if (stop_range > self.ds.num_test_instances):
stop_range = self.ds.num_test_instances
with tf.Session() as sess:
self.saver.restore(sess, self.model_file)
(correct, num_infers) = (0, 0)
time = (- 1)
for idx_test in range(start_range, stop_range):
max_similarity = 0
max_similarity_idx = 0
for idx in range(0, self.ds.num_train_instances, self.config['model:num_batch_pairs']):
start_idx = idx
if ((idx + self.config['model:num_batch_pairs']) >= self.ds.num_train_instances):
start_idx = (self.ds.num_train_instances - self.config['model:num_batch_pairs'])
for i in range(self.config['model:num_batch_pairs']):
self.X_batch[(2 * i)] = self.ds.X_test[idx_test]
self.X_batch[((2 * i) + 1)] = self.ds.X_train[(start_idx + i)]
sim = sess.run(self.model.pred_similarities, feed_dict={self.model.X_batch: self.X_batch, self.model.is_training: False})
for i in range(self.config['model:num_batch_pairs']):
if (sim[i] >= max_similarity):
max_similarity = sim[i]
max_similarity_idx = (start_idx + i)
if np.array_equal(self.ds.Y_test[idx_test], self.ds.Y_train[max_similarity_idx]):
correct += 1
num_infers += 1
print(idx_test, (correct / num_infers))
print(num_infers, correct, time, dataset_path)
def test_pairwise_similarities(self, n, folder_path):
num_test_series = n
dists = np.zeros((num_test_series, num_test_series))
with tf.Session() as sess:
self.saver.restore(sess, self.model_file)
pairs_list = []
for i in np.arange(0, num_test_series, 1):
for j in np.arange(0, num_test_series, 1):
pairs_list.append((i, j))
num_pairs = len(pairs_list)
batch_start_pair_idx = 0
print('Num pairs:', len(pairs_list))
while (batch_start_pair_idx < num_pairs):
for i in range(self.config['model:num_batch_pairs']):
j = (batch_start_pair_idx + i)
if (j >= num_pairs):
j = (num_pairs - 1)
self.X_batch[(2 * i)] = self.ds.X_test[pairs_list[j][0]]
self.X_batch[((2 * i) + 1)] = self.ds.X_test[pairs_list[j][1]]
sim = sess.run(self.model.pred_similarities, feed_dict={self.model.X_batch: self.X_batch, self.model.is_training: False})
for i in range(self.config['model:num_batch_pairs']):
j = (batch_start_pair_idx + i)
if (j >= num_pairs):
j = (num_pairs - 1)
dists[pairs_list[j][0]][pairs_list[j][1]] = (1.0 - sim[i])
batch_start_pair_idx += self.config['model:num_batch_pairs']
print(dists.shape)
np.save(os.path.join(folder_path, (((self.model.name + '_') + self.ds.dataset_name) + '_dists.npy')), dists)
np.save(os.path.join(folder_path, (((self.model.name + '_') + self.ds.dataset_name) + '_labels.npy')), self.ds.Y_test[:num_test_series])
def pairwise_test_accuracy(self, num_test_batches):
test_acc = 0
with tf.Session() as sess:
self.saver.restore(sess, self.model_file)
for i in range(num_test_batches):
batch_pairs_idxs = []
batch_true_similarities = []
for j in range((self.config['model:num_batch_pairs'] // 2)):
pos_idxs = self.ds.draw_test_pair(True)
batch_pairs_idxs.append(pos_idxs[0])
batch_pairs_idxs.append(pos_idxs[1])
batch_true_similarities.append(1.0)
neg_idxs = self.ds.draw_test_pair(False)
batch_pairs_idxs.append(neg_idxs[0])
batch_pairs_idxs.append(neg_idxs[1])
batch_true_similarities.append(0.0)
X_batch = np.take(a=self.ds.X_test, indices=batch_pairs_idxs, axis=0)
sim_batch = np.asarray(batch_true_similarities)
pred_similarities = sess.run(self.model.pred_similarities, feed_dict={self.model.X_batch: X_batch, self.model.true_similarities: sim_batch, self.model.is_training: False})
pred_label = np.where((pred_similarities >= 0.5), 1, 0)
test_acc += sklearn.metrics.accuracy_score(sim_batch, pred_label)
print(i, (test_acc / (i + 1)))
print((test_acc / num_test_batches))
def transductive_test_loss(self):
test_loss = 0
with tf.Session() as sess:
self.saver.restore(sess, self.model_file)
for i in range(num_test_batches):
batch_pairs_idxs = []
batch_true_similarities = []
for j in range((self.config['model:num_batch_pairs'] // 2)):
pos_idxs = self.ds.draw_test_pair(True)
batch_pairs_idxs.append(pos_idxs[0])
batch_pairs_idxs.append(pos_idxs[1])
batch_true_similarities.append(1.0)
neg_idxs = self.ds.draw_test_pair(False)
batch_pairs_idxs.append(neg_idxs[0])
batch_pairs_idxs.append(neg_idxs[1])
batch_true_similarities.append(0.0)
X_batch = np.take(a=self.ds.X_test, indices=batch_pairs_idxs, axis=0)
sim_batch = np.asarray(batch_true_similarities)
batch_loss = sess.run(self.model.loss, feed_dict={self.model.X_batch: X_batch, self.model.true_similarities: sim_batch, self.model.is_training: False})
test_loss += batch_loss
print(i, (test_loss / (i + 1)))
print((test_loss / num_test_batches))
|
class Optimizer():
def __init__(self, config, dataset, sim_model):
self.config = config
self.dataset = dataset
self.num_epochs = self.config['optimizer:num_epochs']
self.sim_model = sim_model
self.saver = tf.train.Saver(max_to_keep=100)
def optimize(self):
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
loss = 0
freq = 100
for epoch_idx in range(self.num_epochs):
batch_true_similarities = []
batch_pairs_idxs = []
for i in range((self.config['model:num_batch_pairs'] // 2)):
pos_idxs = self.dataset.draw_pair(True)
batch_pairs_idxs.append(pos_idxs[0])
batch_pairs_idxs.append(pos_idxs[1])
batch_true_similarities.append(1.0)
neg_idxs = self.dataset.draw_pair(False)
batch_pairs_idxs.append(neg_idxs[0])
batch_pairs_idxs.append(neg_idxs[1])
batch_true_similarities.append(0.0)
pair_loss = self.update_model(sess, batch_pairs_idxs, batch_true_similarities)
loss += pair_loss
if ((epoch_idx % freq) == 0):
if (epoch_idx > 0):
loss /= freq
print('DS', epoch_idx, self.dataset.dataset_name, loss)
self.saver.save(sess, (((('./saved_models/' + self.sim_model.name) + '_') + self.dataset.dataset_name) + '.ckpt'), global_step=(epoch_idx // freq))
loss = 0
def update_model(self, sess, batch_pairs_idxs, batch_true_similarities):
X_batch = np.take(a=self.dataset.X_train, indices=batch_pairs_idxs, axis=0)
sim_batch = np.asarray(batch_true_similarities)
pair_loss = sess.run(self.sim_model.loss, feed_dict={self.sim_model.X_batch: X_batch, self.sim_model.true_similarities: sim_batch, self.sim_model.is_training: False})
sess.run(self.sim_model.update_rule, feed_dict={self.sim_model.X_batch: X_batch, self.sim_model.true_similarities: sim_batch, self.sim_model.is_training: True})
return pair_loss
|
class AbstractSimModel():
def __init__(self, config):
self.config = config
self.minus_one_constant = tf.constant((- 1.0), dtype=tf.float32)
self.sequence_length = self.config['uniwarp:length']
self.X_batch = tf.placeholder(shape=((2 * self.config['model:num_batch_pairs']), self.config['uniwarp:length'], self.config['dataset:num_channels']), dtype=tf.float32)
self.true_similarities = tf.placeholder(shape=(self.config['model:num_batch_pairs'],), dtype=tf.float32)
self.pair_dists = None
self.h = (None, None)
(self.loss, self.pred_similarities, self.update_rule) = (None, None, None)
self.reg_penalty = tf.constant(self.config['uniwarp:lambda'], dtype=tf.float32)
self.name = 'AbstractSingleSimModel'
self.is_training = tf.placeholder(tf.bool)
self.additional_loss = None
def num_model_parameters(self):
total_parameters = 0
for variable in tf.trainable_variables():
shape = variable.get_shape()
variable_parameters = 1
for dim in shape:
variable_parameters *= dim.value
total_parameters += variable_parameters
return total_parameters
def create_encoder(self):
print('ERROR: Encoder left undefined')
pass
def create_similarity(self):
print('ERROR: Similarity left undefined')
pass
def dist_pair(self, pair_ixd):
print('ERROR: Distance of pairs left undefined')
pass
def create_optimization_routine(self):
with tf.variable_scope('OptimizationRoutines'):
self.loss = tf.losses.log_loss(self.true_similarities, self.pred_similarities)
if (self.additional_loss is not None):
print('Adding penalty term', self.additional_loss)
self.loss += (self.reg_penalty * self.additional_loss)
trainable_vars = tf.trainable_variables()
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
(clipped_grads, _) = tf.clip_by_global_norm(tf.gradients(self.loss, trainable_vars), self.config['uniwarp:max_grad_norm'])
self.update_rule = tf.train.AdamOptimizer(self.config['uniwarp:eta']).apply_gradients(zip(clipped_grads, trainable_vars))
def create_model(self):
self.create_encoder()
self.create_similarity()
self.create_optimization_routine()
|
class BaseArgs():
'\n Arguments for data, model, and checkpoints.\n '
def __init__(self):
(self.is_train, self.split) = (None, None)
self.parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
self.parser.add_argument('--n_workers', type=int, default=8, help='number of threads')
self.parser.add_argument('--gpus', type=str, default='0', help='visible GPU ids, separated by comma')
self.parser.add_argument('--dset_dir', type=str, default=os.path.join(os.environ['HOME'], 'slowbro'))
self.parser.add_argument('--dset_name', type=str, default='moving_mnist')
self.parser.add_argument('--image_size', type=int, nargs='+', default=[64, 64])
self.parser.add_argument('--n_frames_input', type=int, default=10)
self.parser.add_argument('--n_frames_output', type=int, default=10)
self.parser.add_argument('--num_objects', type=int, nargs='+', default=[2], help='Max number of digits in Moving MNIST videos.')
self.parser.add_argument('--model', type=str, default='crop', help='Model name')
self.parser.add_argument('--n_components', type=int, default=2)
self.parser.add_argument('--image_latent_size', type=int, default=256, help='Output size of image encoder')
self.parser.add_argument('--content_latent_size', type=int, default=128, help='Size of content vector')
self.parser.add_argument('--pose_latent_size', type=int, default=3, help='Size of pose vector')
self.parser.add_argument('--hidden_size', type=int, default=64, help='Hidden size of LSTM')
self.parser.add_argument('--ngf', type=int, default=8, help='number of channels in encoder and decoder')
self.parser.add_argument('--stn_scale_prior', type=float, default=3, help='The scale of the spatial transformer prior.')
self.parser.add_argument('--independent_components', type=int, default=0, help='Baseline: (if set to 1) independent prediction of each component.')
self.parser.add_argument('--ckpt_dir', type=str, default=os.path.join(os.environ['HOME'], 'slowbro', 'ckpt'), help='the directory that contains all checkpoints')
self.parser.add_argument('--ckpt_name', type=str, default='ckpt', help='checkpoint name')
self.parser.add_argument('--log_every', type=int, default=400, help='log every x steps')
self.parser.add_argument('--save_every', type=int, default=50, help='save every x epochs')
self.parser.add_argument('--evaluate_every', type=int, default=(- 1), help='evaluate on val set every x epochs')
def parse(self):
opt = self.parser.parse_args()
assert ((opt.n_frames_input > 0) and (opt.n_frames_output > 0))
(opt.is_train, opt.split) = (self.is_train, self.split)
opt.dset_path = os.path.join(opt.dset_dir, opt.dset_name)
if opt.is_train:
ckpt_name = '{:s}_NC{:d}_lr{:.01e}_bt{:d}_{:s}'.format(opt.model, opt.n_components, opt.lr_init, opt.batch_size, opt.ckpt_name)
else:
ckpt_name = opt.ckpt_name
opt.ckpt_path = os.path.join(opt.ckpt_dir, opt.dset_name, ckpt_name)
if (opt.dset_name == 'moving_mnist'):
opt.n_channels = 1
opt.image_size = (64, 64)
elif (opt.dset_name == 'bouncing_balls'):
opt.n_channels = 1
opt.image_size = (128, 128)
else:
raise NotImplementedError
if (opt.model == 'crop'):
opt.pose_latent_size = 3
else:
raise NotImplementedError
log = ['Arguments: ']
for (k, v) in sorted(vars(opt).items()):
log.append('{}: {}'.format(k, v))
return (opt, log)
|
class TestArgs(BaseArgs):
'\n Arguments for testing.\n '
def __init__(self):
super(TestArgs, self).__init__()
self.is_train = False
self.split = 'val'
self.parser.add_argument('--batch_size', type=int, default=1, help='batch size')
self.parser.add_argument('--which_epochs', type=int, nargs='+', default=[(- 1)], help='which epochs to evaluate, -1 to load latest checkpoint')
self.parser.add_argument('--save_visuals', type=int, default=0, help='Save results to tensorboard')
self.parser.add_argument('--save_all_results', type=int, default=0, help='Save results to tensorboard')
|
class TrainArgs(BaseArgs):
'\n Arguments specific for training.\n '
def __init__(self):
super(TrainArgs, self).__init__()
self.is_train = True
self.split = 'train'
self.parser.add_argument('--batch_size', type=int, default=4, help='batch size per gpu')
self.parser.add_argument('--n_epochs', type=int, default=50, help='total # of epochs')
self.parser.add_argument('--n_iters', type=int, default=0, help='total # of iterations')
self.parser.add_argument('--start_epoch', type=int, default=0, help='starting epoch')
self.parser.add_argument('--lr_init', type=float, default=0.001, help='initial learning rate')
self.parser.add_argument('--lr_decay', type=int, default=1, choices=[0, 1], help='whether to decay learning rate')
self.parser.add_argument('--load_ckpt_dir', type=str, default='', help='directory of checkpoint')
self.parser.add_argument('--load_ckpt_epoch', type=int, default=0, help='epoch to load checkpoint')
self.parser.add_argument('--when_to_predict_only', type=float, default=0, help='when to set predict_loss_only to True.')
|
def make_dataset(root, is_train):
if is_train:
folder = 'balls_n4_t60_ex50000'
else:
folder = 'balls_n4_t60_ex2000'
dataset = np.load(os.path.join(root, folder, 'dataset_info.npy'))
return dataset
|
class BouncingBalls(data.Dataset):
'\n Bouncing balls dataset.\n '
def __init__(self, root, is_train, n_frames_input, n_frames_output, image_size, transform=None, return_positions=False):
super(BouncingBalls, self).__init__()
self.n_frames = (n_frames_input + n_frames_output)
self.dataset = make_dataset(root, is_train)
self.size = image_size
self.scale = (self.size / 800)
self.radius = int((60 * self.scale))
self.root = root
self.is_train = is_train
self.n_frames_input = n_frames_input
self.n_frames_output = n_frames_output
self.transform = transform
self.return_positions = return_positions
def __getitem__(self, idx):
traj = self.dataset[idx]
(vid_len, n_balls) = traj.shape[:2]
if self.is_train:
start = random.randint(0, (vid_len - self.n_frames))
else:
start = 0
n_channels = 1
images = np.zeros([self.n_frames, self.size, self.size, n_channels], np.uint8)
positions = []
for fid in range(self.n_frames):
xy = []
for bid in range(n_balls):
ball = traj[((start + fid), bid)]
(x, y) = (int(round((self.scale * ball[0]))), int(round((self.scale * ball[1]))))
images[fid] = cv2.circle(images[fid], (x, y), int((self.radius * ball[3])), 255, (- 1))
xy.append([(x / self.size), (y / self.size)])
positions.append(xy)
if (self.transform is not None):
images = self.transform(images)
input = images[:self.n_frames_input]
if (self.n_frames_output > 0):
output = images[self.n_frames_input:]
else:
output = []
if (not self.return_positions):
return (input, output)
else:
positions = np.array(positions)
return (input, output, positions)
def __len__(self):
return len(self.dataset)
|
def get_data_loader(opt):
if (opt.dset_name == 'moving_mnist'):
transform = transforms.Compose([vtransforms.ToTensor()])
dset = MovingMNIST(opt.dset_path, opt.is_train, opt.n_frames_input, opt.n_frames_output, opt.num_objects, transform)
elif (opt.dset_name == 'bouncing_balls'):
transform = transforms.Compose([vtransforms.Scale(opt.image_size), vtransforms.ToTensor()])
dset = BouncingBalls(opt.dset_path, opt.is_train, opt.n_frames_input, opt.n_frames_output, opt.image_size[0], transform)
else:
raise NotImplementedError
dloader = data.DataLoader(dset, batch_size=opt.batch_size, shuffle=opt.is_train, num_workers=opt.n_workers, pin_memory=True)
return dloader
|
def get_model(opt):
if (opt.model == 'crop'):
model = DDPAE(opt)
else:
raise NotImplementedError
model.setup_training()
model.initialize_weights()
return model
|
class ImageDecoder(nn.Module):
'\n Decode images from vectors. Similar structure as DCGAN.\n '
def __init__(self, input_size, n_channels, ngf, n_layers, activation='tanh'):
super(ImageDecoder, self).__init__()
ngf = (ngf * (2 ** (n_layers - 2)))
layers = [nn.ConvTranspose2d(input_size, ngf, 4, 1, 0, bias=False), nn.BatchNorm2d(ngf), nn.ReLU(True)]
for i in range(1, (n_layers - 1)):
layers += [nn.ConvTranspose2d(ngf, (ngf // 2), 4, 2, 1, bias=False), nn.BatchNorm2d((ngf // 2)), nn.ReLU(True)]
ngf = (ngf // 2)
layers += [nn.ConvTranspose2d(ngf, n_channels, 4, 2, 1, bias=False)]
if (activation == 'tanh'):
layers += [nn.Tanh()]
elif (activation == 'sigmoid'):
layers += [nn.Sigmoid()]
else:
raise NotImplementedError
self.main = nn.Sequential(*layers)
def forward(self, x):
if (len(x.size()) == 2):
x = x.view(*x.size(), 1, 1)
x = self.main(x)
return x
|
class ImageEncoder(nn.Module):
'\n Encodes images. Similar structure as DCGAN.\n '
def __init__(self, n_channels, output_size, ngf, n_layers):
super(ImageEncoder, self).__init__()
layers = [nn.Conv2d(n_channels, ngf, 4, 2, 1, bias=False), nn.LeakyReLU(0.2, inplace=True)]
for i in range(1, (n_layers - 1)):
layers += [nn.Conv2d(ngf, (ngf * 2), 4, 2, 1, bias=False), nn.BatchNorm2d((ngf * 2)), nn.LeakyReLU(0.2, inplace=True)]
ngf *= 2
layers += [nn.Conv2d(ngf, output_size, 4, 1, 0, bias=False)]
self.main = nn.Sequential(*layers)
def forward(self, x):
x = self.main(x)
x = x.squeeze(3).squeeze(2)
return x
|
def build(is_train, tb_dir=None):
'\n Parse arguments, setup logger and tensorboardX directory.\n '
(opt, log) = (args.TrainArgs().parse() if is_train else args.TestArgs().parse())
os.environ['CUDA_VISIBLE_DEVICES'] = opt.gpus
os.makedirs(opt.ckpt_path, exist_ok=True)
torch.manual_seed(666)
torch.cuda.manual_seed_all(666)
np.random.seed(666)
random.seed(666)
logger = Logger(opt.ckpt_path, opt.split)
if (tb_dir is not None):
tb_path = os.path.join(opt.ckpt_path, tb_dir)
vis = Visualizer(tb_path)
else:
vis = None
logger.print(log)
return (opt, logger, vis)
|
class Logger():
'\n Logger to write logs to file.\n '
def __init__(self, ckpt_path, name='train'):
self.logger = logging.getLogger()
self.logger.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s %(message)s', datefmt=blue('[%Y-%m-%d,%H:%M:%S]'))
fh = logging.FileHandler(os.path.join(ckpt_path, '{}.log'.format(name)), 'w')
fh.setLevel(logging.INFO)
fh.setFormatter(formatter)
self.logger.addHandler(fh)
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(logging.INFO)
ch.setFormatter(formatter)
self.logger.addHandler(ch)
def print(self, log):
if isinstance(log, list):
self.logger.info('\n - '.join(log))
else:
self.logger.info(log)
|
def to_numpy(array):
'\n :param array: Variable, GPU tensor, or CPU tensor\n :return: numpy\n '
if isinstance(array, np.ndarray):
return array
if isinstance(array, torch.autograd.Variable):
array = array.data
if array.is_cuda:
array = array.cpu()
return array.numpy()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.