code
stringlengths
17
6.64M
class BaseOptions(): def __init__(self): self.parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) self.initialized = False def initialize(self): self.parser.add_argument('--dataroot', required=True, help='path to images (should have subfolders trainA, trainB, valA, valB, etc)') self.parser.add_argument('--batchSize', type=int, default=1, help='input batch size') self.parser.add_argument('--max_items_A', type=int, default=(- 1), help='max number of items for domain A, -1 indicates no maximum') self.parser.add_argument('--max_items_B', type=int, default=(- 1), help='max number of items for domain B, -1 indicates no maximum') self.parser.add_argument('--start', type=int, default=0, help='starting index of items of domain A, after sorting') self.parser.add_argument('--loadSize', type=int, default=286, help='scale images to this size') self.parser.add_argument('--fineSize', type=int, default=256, help='then crop to this size') self.parser.add_argument('--input_nc', type=int, default=3, help='# of input image channels') self.parser.add_argument('--output_nc', type=int, default=3, help='# of output image channels') self.parser.add_argument('--ngf', type=int, default=64, help='# of gen filters in first conv layer') self.parser.add_argument('--ndf', type=int, default=64, help='# of discrim filters in first conv layer') self.parser.add_argument('--which_model_netD', type=str, default='basic', help='selects model to use for netD') self.parser.add_argument('--which_model_netG', type=str, default='resnet_9blocks', help='selects model to use for netG and netED') self.parser.add_argument('--n_layers_D', type=int, default=3, help='only used if which_model_netD==n_layers') self.parser.add_argument('--gpu_ids', type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU') self.parser.add_argument('--name', type=str, default='experiment_name', help='name of the experiment. It decides where to store samples and models') self.parser.add_argument('--dataset_mode', type=str, default='unaligned', help='chooses how datasets are loaded. [unaligned | aligned | single]') self.parser.add_argument('--model', type=str, default='ost', help='chooses which model to use. ost, autoencoder, test.') self.parser.add_argument('--which_direction', type=str, default='AtoB', help='AtoB or BtoA') self.parser.add_argument('--nThreads', default=2, type=int, help='# threads for loading data') self.parser.add_argument('--load_dir', type=str, default='./checkpoints', help='models are loaded here') self.parser.add_argument('--checkpoints_dir', type=str, default='./checkpoints', help='models are saved here') self.parser.add_argument('--norm', type=str, default='instance', help='instance normalization or batch normalization') self.parser.add_argument('--serial_batches', action='store_true', help='if true, takes images in order to make batches, otherwise takes them randomly') self.parser.add_argument('--display_winsize', type=int, default=256, help='display window size') self.parser.add_argument('--display_id', type=int, default=1, help='window id of the web display') self.parser.add_argument('--display_server', type=str, default='http://localhost', help='visdom server of the web display') self.parser.add_argument('--display_port', type=int, default=8097, help='visdom port of the web display') self.parser.add_argument('--no_dropout', action='store_true', help='no dropout for the generator') self.parser.add_argument('--resize_or_crop', type=str, default='resize_and_crop', help='scaling and cropping of images at load time [resize_and_crop|crop|scale_width|scale_width_and_crop]') self.parser.add_argument('--no_flip_and_rotation', action='store_true', help='if specified, do not flip and rotate the images for data augmentation') self.parser.add_argument('--rotation_degree', type=int, default=7, help='rotation degree used for augmentation') self.parser.add_argument('--init_type', type=str, default='normal', help='network initialization [normal|xavier|kaiming|orthogonal]') self.parser.add_argument('--A', type=str, default='A', help='used to exchange dataset A for B by setting the value to B') self.parser.add_argument('--B', type=str, default='B', help='used to exchange dataset B for A by setting the value to A') self.parser.add_argument('--n_downsampling', type=int, default=2, help='number of downsampling/upsampling convolutional/deconvolutional layers') self.parser.add_argument('--num_unshared', type=int, default=1, help='number of unshared encoder/decoder layers, not including input and final layers') self.parser.add_argument('--num_res_blocks_unshared', type=int, default=0, help='number of unshared resnet blocks') self.parser.add_argument('--num_res_blocks_shared', type=int, default=6, help='number of shared resnet blocks') self.initialized = True def parse(self): if (not self.initialized): self.initialize() self.opt = self.parser.parse_args() self.opt.isTrain = self.isTrain str_ids = self.opt.gpu_ids.split(',') self.opt.gpu_ids = [] for str_id in str_ids: id = int(str_id) if (id >= 0): self.opt.gpu_ids.append(id) if (len(self.opt.gpu_ids) > 0): torch.cuda.set_device(self.opt.gpu_ids[0]) args = vars(self.opt) print('------------ Options -------------') for (k, v) in sorted(args.items()): print(('%s: %s' % (str(k), str(v)))) print('-------------- End ----------------') expr_dir = os.path.join(self.opt.checkpoints_dir, self.opt.name) util.mkdirs(expr_dir) file_name = os.path.join(expr_dir, 'opt.txt') with open(file_name, 'wt') as opt_file: opt_file.write('------------ Options -------------\n') for (k, v) in sorted(args.items()): opt_file.write(('%s: %s\n' % (str(k), str(v)))) opt_file.write('-------------- End ----------------\n') return self.opt
class TestOptions(BaseOptions): def initialize(self): BaseOptions.initialize(self) self.parser.add_argument('--ntest', type=int, default=float('inf'), help='# of test examples.') self.parser.add_argument('--results_dir', type=str, default='./results/', help='saves results here.') self.parser.add_argument('--aspect_ratio', type=float, default=1.0, help='aspect ratio of result images') self.parser.add_argument('--phase', type=str, default='train', help='train, val, test, etc') self.parser.add_argument('--which_epoch', type=str, default='latest', help='which epoch to load? set to latest to use latest cached model') self.parser.add_argument('--how_many', type=int, default=50, help='how many test images to run') self.isTrain = False
class TrainOptions(BaseOptions): def initialize(self): BaseOptions.initialize(self) self.parser.add_argument('--display_freq', type=int, default=100, help='frequency of showing training results on screen') self.parser.add_argument('--display_single_pane_ncols', type=int, default=0, help='if positive, display all images in a single visdom web panel with certain number of images per row.') self.parser.add_argument('--update_html_freq', type=int, default=1000, help='frequency of saving training results to html') self.parser.add_argument('--print_freq', type=int, default=100, help='frequency of showing training results on console') self.parser.add_argument('--save_latest_freq', type=int, default=10000, help='frequency of saving the latest results') self.parser.add_argument('--save_epoch_freq', type=int, default=10, help='frequency of saving checkpoints at the end of epochs') self.parser.add_argument('--continue_train', action='store_true', help='continue training: load the latest model') self.parser.add_argument('--epoch_count', type=int, default=1, help='the starting epoch count, we save the model by <epoch_count>, <epoch_count>+<save_latest_freq>, ...') self.parser.add_argument('--phase', type=str, default='train', help='train, val, test, etc') self.parser.add_argument('--which_epoch', type=str, default='latest', help='which epoch to load? set to latest to use latest cached model') self.parser.add_argument('--niter', type=int, default=60, help='# of iter at starting learning rate') self.parser.add_argument('--niter_decay', type=int, default=20, help='# of iter to linearly decay learning rate to zero') self.parser.add_argument('--beta1', type=float, default=0.5, help='momentum term of adam') self.parser.add_argument('--lr', type=float, default=0.0002, help='initial learning rate for adam') self.parser.add_argument('--no_lsgan', action='store_true', help='do *not* use least square GAN, if false, use vanilla GAN') self.parser.add_argument('--kl_lambda', type=float, default=0.1, help='weight for kl loss') self.parser.add_argument('--lambda_A', type=float, default=10.0, help='weight for cycle loss (A -> B -> A)') self.parser.add_argument('--lambda_B', type=float, default=10.0, help='weight for cycle loss (B -> A -> B)') self.parser.add_argument('--pool_size', type=int, default=50, help='the size of image buffer that stores previously generated images') self.parser.add_argument('--dont_load_pretrained_autoencoder', action='store_true', help='do not load pretrained autoencoder') self.parser.add_argument('--no_html', action='store_true', help='do not save intermediate training results to [opt.checkpoints_dir]/[opt.name]/web/') self.parser.add_argument('--lr_policy', type=str, default='lambda', help='learning rate policy: lambda|step|plateau') self.parser.add_argument('--lr_decay_iters', type=int, default=50, help='multiply by a gamma every lr_decay_iters iterations') self.isTrain = True
class GetData(object): "\n\n Download CycleGAN or Pix2Pix Data.\n\n Args:\n technique : str\n One of: 'cyclegan' or 'pix2pix'.\n verbose : bool\n If True, print additional information.\n\n Examples:\n >>> from util.get_data import GetData\n >>> gd = GetData(technique='cyclegan')\n >>> new_data_path = gd.get(save_path='./datasets') # options will be displayed.\n\n " def __init__(self, technique='cyclegan', verbose=True): url_dict = {'pix2pix': 'https://people.eecs.berkeley.edu/~tinghuiz/projects/pix2pix/datasets', 'cyclegan': 'https://people.eecs.berkeley.edu/~taesung_park/CycleGAN/datasets'} self.url = url_dict.get(technique.lower()) self._verbose = verbose def _print(self, text): if self._verbose: print(text) @staticmethod def _get_options(r): soup = BeautifulSoup(r.text, 'lxml') options = [h.text for h in soup.find_all('a', href=True) if h.text.endswith(('.zip', 'tar.gz'))] return options def _present_options(self): r = requests.get(self.url) options = self._get_options(r) print('Options:\n') for (i, o) in enumerate(options): print('{0}: {1}'.format(i, o)) choice = input('\nPlease enter the number of the dataset above you wish to download:') return options[int(choice)] def _download_data(self, dataset_url, save_path): if (not isdir(save_path)): os.makedirs(save_path) base = basename(dataset_url) temp_save_path = join(save_path, base) with open(temp_save_path, 'wb') as f: r = requests.get(dataset_url) f.write(r.content) if base.endswith('.tar.gz'): obj = tarfile.open(temp_save_path) elif base.endswith('.zip'): obj = ZipFile(temp_save_path, 'r') else: raise ValueError('Unknown File Type: {0}.'.format(base)) self._print('Unpacking Data...') obj.extractall(save_path) obj.close() os.remove(temp_save_path) def get(self, save_path, dataset=None): '\n\n Download a dataset.\n\n Args:\n save_path : str\n A directory to save the data to.\n dataset : str, optional\n A specific dataset to download.\n Note: this must include the file extension.\n If None, options will be presented for you\n to choose from.\n\n Returns:\n save_path_full : str\n The absolute path to the downloaded data.\n\n ' if (dataset is None): selected_dataset = self._present_options() else: selected_dataset = dataset save_path_full = join(save_path, selected_dataset.split('.')[0]) if isdir(save_path_full): warn("\n'{0}' already exists. Voiding Download.".format(save_path_full)) else: self._print('Downloading Data...') url = '{0}/{1}'.format(self.url, selected_dataset) self._download_data(url, save_path=save_path) return abspath(save_path_full)
class HTML(): def __init__(self, web_dir, title, reflesh=0): self.title = title self.web_dir = web_dir self.img_dir = os.path.join(self.web_dir, 'images') if (not os.path.exists(self.web_dir)): os.makedirs(self.web_dir) if (not os.path.exists(self.img_dir)): os.makedirs(self.img_dir) self.doc = dominate.document(title=title) if (reflesh > 0): with self.doc.head: meta(http_equiv='reflesh', content=str(reflesh)) def get_image_dir(self): return self.img_dir def add_header(self, str): with self.doc: h3(str) def add_table(self, border=1): self.t = table(border=border, style='table-layout: fixed;') self.doc.add(self.t) def add_images(self, ims, txts, links, width=400): self.add_table() with self.t: with tr(): for (im, txt, link) in zip(ims, txts, links): with td(style='word-wrap: break-word;', halign='center', valign='top'): with p(): with a(href=os.path.join('images', link)): img(style=('width:%dpx' % width), src=os.path.join('images', im)) br() p(txt) def save(self): html_file = ('%s/index.html' % self.web_dir) f = open(html_file, 'wt') f.write(self.doc.render()) f.close()
class ImagePool(): def __init__(self, pool_size): self.pool_size = pool_size if (self.pool_size > 0): self.num_imgs = 0 self.images = [] def query(self, images): if (self.pool_size == 0): return Variable(images) return_images = [] for image in images: image = torch.unsqueeze(image, 0) if (self.num_imgs < self.pool_size): self.num_imgs = (self.num_imgs + 1) self.images.append(image) return_images.append(image) else: p = random.uniform(0, 1) if (p > 0.5): random_id = random.randint(0, (self.pool_size - 1)) tmp = self.images[random_id].clone() self.images[random_id] = image return_images.append(tmp) else: return_images.append(image) return_images = Variable(torch.cat(return_images, 0)) return return_images
def tensor2im(image_tensor, imtype=np.uint8): image_numpy = image_tensor[0].cpu().float().numpy() if (image_numpy.shape[0] == 1): image_numpy = np.tile(image_numpy, (3, 1, 1)) image_numpy = (((np.transpose(image_numpy, (1, 2, 0)) + 1) / 2.0) * 255.0) return image_numpy.astype(imtype)
def diagnose_network(net, name='network'): mean = 0.0 count = 0 for param in net.parameters(): if (param.grad is not None): mean += torch.mean(torch.abs(param.grad.data)) count += 1 if (count > 0): mean = (mean / count) print(name) print(mean)
def save_image(image_numpy, image_path): image_pil = Image.fromarray(image_numpy) image_pil.save(image_path)
def print_numpy(x, val=True, shp=False): x = x.astype(np.float64) if shp: print('shape,', x.shape) if val: x = x.flatten() print(('mean = %3.3f, min = %3.3f, max = %3.3f, median = %3.3f, std=%3.3f' % (np.mean(x), np.min(x), np.max(x), np.median(x), np.std(x))))
def mkdirs(paths): if (isinstance(paths, list) and (not isinstance(paths, str))): for path in paths: mkdir(path) else: mkdir(paths)
def mkdir(path): if (not os.path.exists(path)): os.makedirs(path)
class Visualizer(): def __init__(self, opt): self.display_id = opt.display_id self.use_html = (opt.isTrain and (not opt.no_html)) self.win_size = opt.display_winsize self.name = opt.name self.opt = opt self.saved = False if (self.display_id > 0): import visdom self.vis = visdom.Visdom(server=opt.display_server, port=opt.display_port) if self.use_html: self.web_dir = os.path.join(opt.checkpoints_dir, opt.name, 'web') self.img_dir = os.path.join(self.web_dir, 'images') print(('create web directory %s...' % self.web_dir)) util.mkdirs([self.web_dir, self.img_dir]) self.log_name = os.path.join(opt.checkpoints_dir, opt.name, 'loss_log.txt') with open(self.log_name, 'a') as log_file: now = time.strftime('%c') log_file.write(('================ Training Loss (%s) ================\n' % now)) def reset(self): self.saved = False def display_current_results(self, visuals, epoch, save_result): if (self.display_id > 0): ncols = self.opt.display_single_pane_ncols if (ncols > 0): (h, w) = next(iter(visuals.values())).shape[:2] table_css = ('<style>\n table {border-collapse: separate; border-spacing:4px; white-space:nowrap; text-align:center}\n table td {width: %dpx; height: %dpx; padding: 4px; outline: 4px solid black}\n </style>' % (w, h)) title = self.name label_html = '' label_html_row = '' nrows = int(np.ceil((len(visuals.items()) / ncols))) images = [] idx = 0 for (label, image_numpy) in visuals.items(): label_html_row += ('<td>%s</td>' % label) images.append(image_numpy.transpose([2, 0, 1])) idx += 1 if ((idx % ncols) == 0): label_html += ('<tr>%s</tr>' % label_html_row) label_html_row = '' white_image = (np.ones_like(image_numpy.transpose([2, 0, 1])) * 255) while ((idx % ncols) != 0): images.append(white_image) label_html_row += '<td></td>' idx += 1 if (label_html_row != ''): label_html += ('<tr>%s</tr>' % label_html_row) self.vis.images(images, nrow=ncols, win=(self.display_id + 1), padding=2, opts=dict(title=(title + ' images'))) label_html = ('<table>%s</table>' % label_html) self.vis.text((table_css + label_html), win=(self.display_id + 2), opts=dict(title=(title + ' labels'))) else: idx = 1 for (label, image_numpy) in visuals.items(): self.vis.image(image_numpy.transpose([2, 0, 1]), opts=dict(title=label), win=(self.display_id + idx)) idx += 1 if (self.use_html and (save_result or (not self.saved))): self.saved = True for (label, image_numpy) in visuals.items(): img_path = os.path.join(self.img_dir, ('epoch%.3d_%s.png' % (epoch, label))) util.save_image(image_numpy, img_path) webpage = html.HTML(self.web_dir, ('Experiment name = %s' % self.name), reflesh=1) for n in range(epoch, 0, (- 1)): webpage.add_header(('epoch [%d]' % n)) ims = [] txts = [] links = [] for (label, image_numpy) in visuals.items(): img_path = ('epoch%.3d_%s.png' % (n, label)) ims.append(img_path) txts.append(label) links.append(img_path) webpage.add_images(ims, txts, links, width=self.win_size) webpage.save() def plot_current_errors(self, epoch, counter_ratio, opt, errors): if (not hasattr(self, 'plot_data')): self.plot_data = {'X': [], 'Y': [], 'legend': list(errors.keys())} self.plot_data['X'].append((epoch + counter_ratio)) self.plot_data['Y'].append([errors[k] for k in self.plot_data['legend']]) self.vis.line(X=np.stack(([np.array(self.plot_data['X'])] * len(self.plot_data['legend'])), 1), Y=np.array(self.plot_data['Y']), opts={'title': (self.name + ' loss over time'), 'legend': self.plot_data['legend'], 'xlabel': 'epoch', 'ylabel': 'loss'}, win=self.display_id) def print_current_errors(self, epoch, i, errors, t, t_data): message = ('(epoch: %d, iters: %d, time: %.3f, data: %.3f) ' % (epoch, i, t, t_data)) for (k, v) in errors.items(): message += ('%s: %.3f ' % (k, v)) print(message) with open(self.log_name, 'a') as log_file: log_file.write(('%s\n' % message)) def save_images(self, webpage, visuals, image_path, aspect_ratio=1.0, index=None, split=1): image_dir = webpage.get_image_dir() short_path = ntpath.basename(image_path[0]) name = os.path.splitext(short_path)[0] if (index is not None): name_splits = name.split('_') if (split == 0): name = str(index) else: name = ((str(index) + '_') + name_splits[split]) webpage.add_header(name) ims = [] txts = [] links = [] for (label, im) in visuals.items(): image_name = ('%s_%s.png' % (name, label)) save_path = os.path.join(image_dir, image_name) (h, w, _) = im.shape if (aspect_ratio > 1.0): im = imresize(im, (h, int((w * aspect_ratio))), interp='bicubic') if (aspect_ratio < 1.0): im = imresize(im, (int((h / aspect_ratio)), w), interp='bicubic') util.save_image(im, save_path) ims.append(image_name) txts.append(label) links.append(image_name) webpage.add_images(ims, txts, links, width=self.win_size)
def get_loader(config): 'Builds and returns Dataloader for MNIST and SVHN dataset.' transform_list = [] if config.use_augmentation: transform_list.append(transforms.RandomHorizontalFlip()) transform_list.append(transforms.RandomRotation(0.1)) transform_list.append(transforms.Scale(config.image_size)) transform_list.append(transforms.ToTensor()) transform_list.append(transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))) transform_test = transforms.Compose([transforms.Scale(config.image_size), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]) transform_train = transforms.Compose(transform_list) svhn = datasets.SVHN(root=config.svhn_path, download=True, transform=transform_train, split='train') mnist = datasets.MNIST(root=config.mnist_path, download=True, transform=transform_train, train=True) svhn_test = datasets.SVHN(root=config.svhn_path, download=True, transform=transform_test, split='test') mnist_test = datasets.MNIST(root=config.mnist_path, download=True, transform=transform_test, train=False) svhn_loader = torch.utils.data.DataLoader(dataset=svhn, batch_size=config.batch_size, shuffle=config.shuffle, num_workers=config.num_workers) mnist_loader = torch.utils.data.DataLoader(dataset=mnist, batch_size=config.batch_size, shuffle=config.shuffle, num_workers=config.num_workers) svhn_test_loader = torch.utils.data.DataLoader(dataset=svhn_test, batch_size=config.batch_size, shuffle=False, num_workers=config.num_workers) mnist_test_loader = torch.utils.data.DataLoader(dataset=mnist_test, batch_size=config.batch_size, shuffle=False, num_workers=config.num_workers) return (svhn_loader, mnist_loader, svhn_test_loader, mnist_test_loader)
def str2bool(v): return (v.lower() in 'true')
def main(config): (svhn_loader, mnist_loader, svhn_test_loader, mnist_test_loader) = get_loader(config) solver = Solver(config, svhn_loader, mnist_loader) cudnn.benchmark = True if (not os.path.exists(config.model_path)): os.makedirs(config.model_path) if (not os.path.exists(config.sample_path)): os.makedirs(config.sample_path) if (config.mode == 'train'): solver.train()
def str2bool(v): return (v.lower() in 'true')
def main(config): (svhn_loader, mnist_loader, svhn_test_loader, mnist_test_loader) = get_loader(config) solver = Solver(config, svhn_loader, mnist_loader) cudnn.benchmark = True if (not os.path.exists(config.model_path)): os.makedirs(config.model_path) if (not os.path.exists(config.sample_path)): os.makedirs(config.sample_path) base = config.log_path filename = os.path.join(base, str(config.max_items)) if (not os.path.isdir(base)): os.mkdir(base) logging.basicConfig(filename=filename, level=logging.DEBUG) if (config.mode == 'train'): solver.train() elif (config.mode == 'sample'): solver.sample()
def str2bool(v): return (v.lower() in 'true')
def main(config): (svhn_loader, mnist_loader, svhn_test_loader, mnist_test_loader) = get_loader(config) solver = Solver(config, svhn_loader, mnist_loader) cudnn.benchmark = True if (not os.path.exists(config.model_path)): os.makedirs(config.model_path) if (not os.path.exists(config.sample_path)): os.makedirs(config.sample_path) base = config.log_path filename = os.path.join(base, str(config.max_items)) if (not os.path.isdir(base)): os.mkdir(base) logging.basicConfig(filename=filename, level=logging.DEBUG) if (config.mode == 'train'): solver.train() elif (config.mode == 'sample'): solver.sample()
def deconv(c_in, c_out, k_size, stride=2, pad=1, bn=True): 'Custom deconvolutional layer for simplicity.' layers = [] layers.append(nn.ConvTranspose2d(c_in, c_out, k_size, stride, pad, bias=False)) if bn: layers.append(nn.BatchNorm2d(c_out)) return nn.Sequential(*layers)
def conv(c_in, c_out, k_size, stride=2, pad=1, bn=True): 'Custom convolutional layer for simplicity.' layers = [] layers.append(nn.Conv2d(c_in, c_out, k_size, stride, pad, bias=False)) if bn: layers.append(nn.BatchNorm2d(c_out)) return nn.Sequential(*layers)
class G11(nn.Module): def __init__(self, conv_dim=64): super(G11, self).__init__() self.conv1 = conv(1, conv_dim, 4) self.conv1_svhn = conv(3, conv_dim, 4) self.conv2 = conv(conv_dim, (conv_dim * 2), 4) res_dim = (conv_dim * 2) self.conv3 = conv(res_dim, res_dim, 3, 1, 1) self.conv4 = conv(res_dim, res_dim, 3, 1, 1) self.deconv1 = deconv((conv_dim * 2), conv_dim, 4) self.deconv2 = deconv(conv_dim, 1, 4, bn=False) self.deconv2_svhn = deconv(conv_dim, 3, 4, bn=False) def forward(self, x, svhn=False): if svhn: out = F.leaky_relu(self.conv1_svhn(x), 0.05) else: out = F.leaky_relu(self.conv1(x), 0.05) out = F.leaky_relu(self.conv2(out), 0.05) out = F.leaky_relu(self.conv3(out), 0.05) out = F.leaky_relu(self.conv4(out), 0.05) out = F.leaky_relu(self.deconv1(out), 0.05) if svhn: out = F.tanh(self.deconv2_svhn(out)) else: out = F.tanh(self.deconv2(out)) return out def encode(self, x, svhn=False): if svhn: out = F.leaky_relu(self.conv1_svhn(x), 0.05) else: out = F.leaky_relu(self.conv1(x), 0.05) out = F.leaky_relu(self.conv2(out), 0.05) out = F.leaky_relu(self.conv3(out), 0.05) return out def decode(self, out, svhn=False): out = F.leaky_relu(self.conv4(out), 0.05) out = F.leaky_relu(self.deconv1(out), 0.05) if svhn: out = F.tanh(self.deconv2_svhn(out)) else: out = F.tanh(self.deconv2(out)) return out def encode_params(self): layers_basic = (list(self.conv1_svhn.parameters()) + list(self.conv1.parameters())) layers_basic += list(self.conv2.parameters()) layers_basic += list(self.conv3.parameters()) return layers_basic def decode_params(self): layers_basic = (list(self.deconv2_svhn.parameters()) + list(self.deconv2.parameters())) layers_basic += list(self.deconv1.parameters()) layers_basic += list(self.conv4.parameters()) return layers_basic def unshared_parameters(self): return (((list(self.deconv2_svhn.parameters()) + list(self.conv1_svhn.parameters())) + list(self.deconv2.parameters())) + list(self.conv1.parameters()))
class G22(nn.Module): def __init__(self, conv_dim=64): super(G22, self).__init__() self.conv1 = conv(3, conv_dim, 4) self.conv1_mnist = conv(1, conv_dim, 4) self.conv2 = conv(conv_dim, (conv_dim * 2), 4) res_dim = (conv_dim * 2) self.conv3 = conv(res_dim, res_dim, 3, 1, 1) self.conv4 = conv(res_dim, res_dim, 3, 1, 1) self.deconv1 = deconv((conv_dim * 2), conv_dim, 4) self.deconv2 = deconv(conv_dim, 3, 4, bn=False) self.deconv2_mnist = deconv(conv_dim, 1, 4, bn=False) def forward(self, x, mnist=False): if mnist: out = F.leaky_relu(self.conv1_mnist(x), 0.05) else: out = F.leaky_relu(self.conv1(x), 0.05) out = F.leaky_relu(self.conv2(out), 0.05) out = F.leaky_relu(self.conv3(out), 0.05) out = F.leaky_relu(self.conv4(out), 0.05) out = F.leaky_relu(self.deconv1(out), 0.05) if mnist: out = F.tanh(self.deconv2_mnist(out)) else: out = F.tanh(self.deconv2(out)) return out def encode(self, x, mnist=False): if mnist: out = F.leaky_relu(self.conv1_mnist(x), 0.05) else: out = F.leaky_relu(self.conv1(x), 0.05) out = F.leaky_relu(self.conv2(out), 0.05) out = F.leaky_relu(self.conv3(out), 0.05) return out def decode(self, out, mnist=False): out = F.leaky_relu(self.conv4(out), 0.05) out = F.leaky_relu(self.deconv1(out), 0.05) if mnist: out = F.tanh(self.deconv2_mnist(out)) else: out = F.tanh(self.deconv2(out)) return out def encode_params(self): layers_basic = (list(self.conv1_mnist.parameters()) + list(self.conv1.parameters())) layers_basic += list(self.conv2.parameters()) layers_basic += list(self.conv3.parameters()) return layers_basic def decode_params(self): layers_basic = (list(self.deconv2_mnist.parameters()) + list(self.deconv2.parameters())) layers_basic += list(self.deconv1.parameters()) layers_basic += list(self.conv4.parameters()) return layers_basic def unshared_parameters(self): return (((list(self.deconv2_mnist.parameters()) + list(self.conv1_mnist.parameters())) + list(self.deconv2.parameters())) + list(self.conv1.parameters()))
class D1(nn.Module): 'Discriminator for mnist.' def __init__(self, conv_dim=64, use_labels=False): super(D1, self).__init__() self.conv1 = conv(1, conv_dim, 4, bn=False) self.conv2 = conv(conv_dim, (conv_dim * 2), 4) self.conv3 = conv((conv_dim * 2), (conv_dim * 4), 4) n_out = (11 if use_labels else 1) self.fc = conv((conv_dim * 4), n_out, 4, 1, 0, False) def forward(self, x_0): out = F.leaky_relu(self.conv1(x_0), 0.05) out = F.leaky_relu(self.conv2(out), 0.05) out = F.leaky_relu(self.conv3(out), 0.05) out_0 = self.fc(out).squeeze() return out_0
class D2(nn.Module): 'Discriminator for svhn.' def __init__(self, conv_dim=64, use_labels=False): super(D2, self).__init__() self.conv1 = conv(3, conv_dim, 4, bn=False) self.conv2 = conv(conv_dim, (conv_dim * 2), 4) self.conv3 = conv((conv_dim * 2), (conv_dim * 4), 4) n_out = (11 if use_labels else 1) self.fc = conv((conv_dim * 4), n_out, 4, 1, 0, False) def forward(self, x_0): out = F.leaky_relu(self.conv1(x_0), 0.05) out = F.leaky_relu(self.conv2(out), 0.05) out = F.leaky_relu(self.conv3(out), 0.05) out_0 = self.fc(out).squeeze() return out_0
def relu6(x): return K.relu(x, max_value=6)
def load_model(input_shape=(224, 224, 3), n_veid=576, Mode='train', Weights_path='./weights'): alpha = 1.0 depth_multiplier = 1 dropout = 0.001 gauss_size = 1024 input_layer = Input(shape=input_shape) y = Input(shape=[n_veid]) x = _conv_block(input_layer, 32, alpha, strides=(2, 2)) x = _depthwise_conv_block(x, 64, alpha, depth_multiplier, block_id=1) x = _depthwise_conv_block(x, 128, alpha, depth_multiplier, strides=(2, 2), block_id=2) x = _depthwise_conv_block(x, 128, alpha, depth_multiplier, block_id=3) x = _depthwise_conv_block(x, 256, alpha, depth_multiplier, strides=(2, 2), block_id=4) x = _depthwise_conv_block(x, 256, alpha, depth_multiplier, block_id=5) x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, strides=(2, 2), block_id=6) x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=7) x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=8) x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=9) x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=10) x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=11) x = _depthwise_conv_block(x, 1024, alpha, depth_multiplier, strides=(2, 2), block_id=12) x = _depthwise_conv_block(x, 1024, alpha, depth_multiplier, block_id=13) x = GlobalAveragePooling2D()(x) hidden = Dropout(dropout, name='dropout')(x) z_mean = Dense(gauss_size, name='z_mean')(hidden) z_log_var = Dense(gauss_size, name='z_log_var')(hidden) y_pred = Dense(n_veid, activation='softmax')(z_mean) if (Mode == 'inference'): model = Model(inputs=[input_layer], outputs=[z_mean]) model_weights_path = os.path.join(Weights_path, model_name) try: model.load_weights(model_weights_path, by_name=True) print('successfully loaded weights...') except: pass return (model, model_name) else: model = Model(inputs=[input_layer, y], outputs=[y_pred]) if (Mode == 'train'): baseline = 'mobilenet_vehicleID.h5' weight_path = os.path.join(Weights_path, baseline) model.load_weights(weight_path, by_name=True) print('successfully loaded VehicleID weights.') elif (Mode == 'resume_training'): model_weights_path = os.path.join(folder_model_weights_path, model_name) try: model.load_weights(model_weights_path, by_name=True) print('successfully loaded weights to resum training.') except: pass kl_loss = ((- 0.5) * K.sum((((1 + z_log_var) - K.square(z_mean)) - K.exp(z_log_var)), axis=(- 1))) cls_loss = K.categorical_crossentropy(y, y_pred) combined_loss = K.mean((cls_loss + (kl_loss * 0.1))) model.add_loss(combined_loss) opt = Adam(lr=1e-05, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0) model.compile(optimizer=opt, metrics=['accuracy']) model.summary() target_feature = 'z_mean' print('Model is ready ....') return (model, model_name, target_feature)
def _conv_block(inputs, filters, alpha, kernel=(3, 3), strides=(1, 1)): channel_axis = (1 if (K.image_data_format() == 'channels_first') else (- 1)) filters = int((filters * alpha)) x = ZeroPadding2D(padding=(1, 1), name='conv1_pad')(inputs) x = Conv2D(filters, kernel, padding='valid', use_bias=False, strides=strides, name='conv1')(x) x = BatchNormalization(axis=channel_axis, name='conv1_bn')(x) return Activation(relu6, name='conv1_relu')(x)
def _depthwise_conv_block(inputs, pointwise_conv_filters, alpha, depth_multiplier=1, strides=(1, 1), block_id=1): channel_axis = (1 if (K.image_data_format() == 'channels_first') else (- 1)) pointwise_conv_filters = int((pointwise_conv_filters * alpha)) x = ZeroPadding2D(padding=(1, 1), name=('conv_pad_%d' % block_id))(inputs) x = DepthwiseConv2D((3, 3), padding='valid', depth_multiplier=depth_multiplier, strides=strides, use_bias=False, name=('conv_dw_%d' % block_id))(x) x = BatchNormalization(axis=channel_axis, name=('conv_dw_%d_bn' % block_id))(x) x = Activation(relu6, name=('conv_dw_%d_relu' % block_id))(x) x = Conv2D(pointwise_conv_filters, (1, 1), padding='same', use_bias=False, strides=(1, 1), name=('conv_pw_%d' % block_id))(x) x = BatchNormalization(axis=channel_axis, name=('conv_pw_%d_bn' % block_id))(x) return Activation(relu6, name=('conv_pw_%d_relu' % block_id))(x)
def display_output(output): sys.stdout.write('\x1b[F') print(output)
def eval(args): e_common = E_common(args.sep, int((args.resize / 64))) e_separate_A = E_separate_A(args.sep, int((args.resize / 64))) e_separate_B = E_separate_B(args.sep, int((args.resize / 64))) decoder = Decoder(int((args.resize / 64))) if torch.cuda.is_available(): e_common = e_common.cuda() e_separate_A = e_separate_A.cuda() e_separate_B = e_separate_B.cuda() decoder = decoder.cuda() if (args.load != ''): save_file = os.path.join(args.load, 'checkpoint') _iter = load_model_for_eval(save_file, e_common, e_separate_A, e_separate_B, decoder) e_common = e_common.eval() e_separate_A = e_separate_A.eval() e_separate_B = e_separate_B.eval() decoder = decoder.eval() if ((not os.path.exists(args.out)) and (args.out != '')): os.mkdir(args.out) save_chosen_imgs(args, e_common, e_separate_A, e_separate_B, decoder, _iter, [0, 1, 2, 3, 4], [0, 1, 2, 3, 4], False)
class E_common(nn.Module): def __init__(self, sep, size, dim=512): super(E_common, self).__init__() self.sep = sep self.size = size self.dim = dim self.layer1 = [] self.layer2 = [] self.layer3 = [] self.layer4 = [] self.layer5 = [] self.layer6 = [] self.z_dim_size = (((self.dim - (2 * self.sep)) * self.size) * self.size) self.layer1.append(SpectralNorm(nn.Conv2d(3, 32, 4, 2, 1))) self.layer1.append(nn.InstanceNorm2d(32)) self.layer1.append(nn.LeakyReLU(0.2, inplace=True)) self.l1 = nn.Sequential(*self.layer1) self.layer2.append(SpectralNorm(nn.Conv2d(32, 64, 4, 2, 1))) self.layer1.append(nn.InstanceNorm2d(64)) self.layer2.append(nn.LeakyReLU(0.2, inplace=True)) self.l2 = nn.Sequential(*self.layer2) self.layer3.append(SpectralNorm(nn.Conv2d(64, 128, 4, 2, 1))) self.layer3.append(nn.InstanceNorm2d(128)) self.layer3.append(nn.LeakyReLU(0.2, inplace=True)) self.l3 = nn.Sequential(*self.layer3) self.layer4.append(SpectralNorm(nn.Conv2d(128, 256, 4, 2, 1))) self.layer4.append(nn.InstanceNorm2d(256)) self.layer4.append(nn.LeakyReLU(0.2, inplace=True)) self.l4 = nn.Sequential(*self.layer4) self.layer5.append(SpectralNorm(nn.Conv2d(256, (512 - self.sep), 4, 2, 1))) self.layer5.append(nn.InstanceNorm2d((512 - self.sep))) self.layer5.append(nn.LeakyReLU(0.2, inplace=True)) self.l5 = nn.Sequential(*self.layer5) self.layer6.append(SpectralNorm(nn.Conv2d((512 - self.sep), (512 - (2 * self.sep)), 4, 2, 1))) self.layer6.append(nn.InstanceNorm2d((512 - (2 * self.sep)))) self.layer6.append(nn.LeakyReLU(0.2, inplace=True)) self.l6 = nn.Sequential(*self.layer6) def forward(self, net): out = self.l1(net) out = self.l2(out) out = self.l3(out) out = self.l4(out) out = self.l5(out) out = self.l6(out) out = out.view((- 1), self.z_dim_size) return out
class E_separate_A(nn.Module): def __init__(self, sep, size): super(E_separate_A, self).__init__() self.sep = sep self.size = size self.layer1 = [] self.layer2 = [] self.layer3 = [] self.layer4 = [] self.layer5 = [] self.layer6 = [] self.layer1.append(SpectralNorm(nn.Conv2d(3, 32, 4, 2, 1))) self.layer1.append(nn.InstanceNorm2d(32)) self.layer1.append(nn.LeakyReLU(0.2, inplace=True)) self.l1 = nn.Sequential(*self.layer1) self.layer2.append(SpectralNorm(nn.Conv2d(32, 64, 4, 2, 1))) self.layer2.append(nn.InstanceNorm2d(64)) self.layer2.append(nn.LeakyReLU(0.2, inplace=True)) self.l2 = nn.Sequential(*self.layer2) self.layer3.append(SpectralNorm(nn.Conv2d(64, 128, 4, 2, 1))) self.layer3.append(nn.InstanceNorm2d(128)) self.layer3.append(nn.LeakyReLU(0.2, inplace=True)) self.l3 = nn.Sequential(*self.layer3) self.layer4.append(SpectralNorm(nn.Conv2d(128, 256, 4, 2, 1))) self.layer4.append(nn.InstanceNorm2d(256)) self.layer4.append(nn.LeakyReLU(0.2, inplace=True)) self.l4 = nn.Sequential(*self.layer4) self.layer5.append(SpectralNorm(nn.Conv2d(256, 512, 4, 2, 1))) self.layer5.append(nn.InstanceNorm2d(self.sep)) self.layer5.append(nn.LeakyReLU(0.2, inplace=True)) self.l5 = nn.Sequential(*self.layer5) self.layer6.append(SpectralNorm(nn.Conv2d(512, self.sep, 4, 2, 1))) self.layer6.append(nn.InstanceNorm2d(512)) self.layer6.append(nn.LeakyReLU(0.2, inplace=True)) self.l6 = nn.Sequential(*self.layer6) def forward(self, net): out = self.l1(net) out = self.l2(out) out = self.l3(out) out = self.l4(out) out = self.l5(out) out = self.l6(out) out = out.view((- 1), ((self.sep * self.size) * self.size)) return out
class E_separate_B(nn.Module): def __init__(self, sep, size): super(E_separate_B, self).__init__() self.sep = sep self.size = size self.layer1 = [] self.layer2 = [] self.layer3 = [] self.layer4 = [] self.layer5 = [] self.layer6 = [] self.layer1.append(SpectralNorm(nn.Conv2d(3, 32, 4, 2, 1))) self.layer1.append(nn.InstanceNorm2d(32)) self.layer1.append(nn.LeakyReLU(0.2, inplace=True)) self.l1 = nn.Sequential(*self.layer1) self.layer2.append(SpectralNorm(nn.Conv2d(32, 64, 4, 2, 1))) self.layer2.append(nn.InstanceNorm2d(64)) self.layer2.append(nn.LeakyReLU(0.2, inplace=True)) self.l2 = nn.Sequential(*self.layer2) self.layer3.append(SpectralNorm(nn.Conv2d(64, 128, 4, 2, 1))) self.layer3.append(nn.InstanceNorm2d(128)) self.layer3.append(nn.LeakyReLU(0.2, inplace=True)) self.l3 = nn.Sequential(*self.layer3) self.layer4.append(SpectralNorm(nn.Conv2d(128, 256, 4, 2, 1))) self.layer4.append(nn.InstanceNorm2d(256)) self.layer4.append(nn.LeakyReLU(0.2, inplace=True)) self.l4 = nn.Sequential(*self.layer4) self.layer5.append(SpectralNorm(nn.Conv2d(256, 512, 4, 2, 1))) self.layer5.append(nn.InstanceNorm2d(self.sep)) self.layer5.append(nn.LeakyReLU(0.2, inplace=True)) self.l5 = nn.Sequential(*self.layer5) self.layer6.append(SpectralNorm(nn.Conv2d(512, self.sep, 4, 2, 1))) self.layer6.append(nn.InstanceNorm2d(512)) self.layer6.append(nn.LeakyReLU(0.2, inplace=True)) self.l6 = nn.Sequential(*self.layer6) def forward(self, net): out = self.l1(net) out = self.l2(out) out = self.l3(out) out = self.l4(out) out = self.l5(out) out = self.l6(out) out = out.view((- 1), ((self.sep * self.size) * self.size)) return out
class Decoder(nn.Module): def __init__(self, size, dim=512): super(Decoder, self).__init__() self.size = size self.dim = dim self.layer1 = [] self.layer2 = [] self.layer3 = [] self.layer4 = [] self.layer5 = [] self.layer6 = [] self.layer1.append(SpectralNorm(nn.ConvTranspose2d(512, 512, 4, 2, 1))) self.layer1.append(nn.InstanceNorm2d(512)) self.layer1.append(nn.ReLU(inplace=True)) self.l1 = nn.Sequential(*self.layer1) self.layer2.append(SpectralNorm(nn.ConvTranspose2d(512, 256, 4, 2, 1))) self.layer2.append(nn.InstanceNorm2d(256)) self.layer2.append(nn.ReLU(inplace=True)) self.l2 = nn.Sequential(*self.layer2) self.layer3.append(SpectralNorm(nn.ConvTranspose2d(256, 128, 4, 2, 1))) self.layer3.append(nn.InstanceNorm2d(128)) self.layer3.append(nn.ReLU(inplace=True)) self.l3 = nn.Sequential(*self.layer3) self.layer4.append(SpectralNorm(nn.ConvTranspose2d(128, 64, 4, 2, 1))) self.layer4.append(nn.InstanceNorm2d(64)) self.layer4.append(nn.ReLU(inplace=True)) self.l4 = nn.Sequential(*self.layer4) self.layer5.append(SpectralNorm(nn.ConvTranspose2d(64, 32, 4, 2, 1))) self.layer5.append(nn.InstanceNorm2d(32)) self.layer5.append(nn.ReLU(inplace=True)) self.l5 = nn.Sequential(*self.layer5) self.layer6.append(nn.ConvTranspose2d(32, 3, 4, 2, 1)) self.layer6.append(nn.Tanh()) self.l6 = nn.Sequential(*self.layer6) def forward(self, net): net = net.view((- 1), self.dim, self.size, self.size) out = self.l1(net) out = self.l2(out) out = self.l3(out) out = self.l4(out) out = self.l5(out) out = self.l6(out) return out
class Disc(nn.Module): def __init__(self, sep, size, dim=512): super(Disc, self).__init__() self.sep = sep self.size = size self.dim = dim self.classify = nn.Sequential(nn.Linear((((dim - (2 * self.sep)) * self.size) * self.size), dim), nn.LeakyReLU(0.2, inplace=True), nn.Linear(dim, 1), nn.Sigmoid()) def forward(self, net): net = net.view((- 1), (((self.dim - (2 * self.sep)) * self.size) * self.size)) net = self.classify(net) net = net.view((- 1)) return net
def l2normalize(v, eps=1e-12): return (v / (v.norm() + eps))
class SpectralNorm(nn.Module): def __init__(self, module, name='weight', power_iterations=1): super(SpectralNorm, self).__init__() self.module = module self.name = name self.power_iterations = power_iterations if (not self._made_params()): self._make_params() def _update_u_v(self): u = getattr(self.module, (self.name + '_u')) v = getattr(self.module, (self.name + '_v')) w = getattr(self.module, (self.name + '_bar')) height = w.data.shape[0] for _ in range(self.power_iterations): v.data = l2normalize(torch.mv(torch.t(w.view(height, (- 1)).data), u.data)) u.data = l2normalize(torch.mv(w.view(height, (- 1)).data, v.data)) sigma = u.dot(w.view(height, (- 1)).mv(v)) setattr(self.module, self.name, (w / sigma.expand_as(w))) def _made_params(self): try: u = getattr(self.module, (self.name + '_u')) v = getattr(self.module, (self.name + '_v')) w = getattr(self.module, (self.name + '_bar')) return True except AttributeError: return False def _make_params(self): w = getattr(self.module, self.name) height = w.data.shape[0] width = w.view(height, (- 1)).data.shape[1] u = Parameter(w.data.new(height).normal_(0, 1), requires_grad=False) v = Parameter(w.data.new(width).normal_(0, 1), requires_grad=False) u.data = l2normalize(u.data) v.data = l2normalize(v.data) w_bar = Parameter(w.data) del self.module._parameters[self.name] self.module.register_parameter((self.name + '_u'), u) self.module.register_parameter((self.name + '_v'), v) self.module.register_parameter((self.name + '_bar'), w_bar) def forward(self, *args): self._update_u_v() return self.module.forward(*args)
def preprocess_celeba(args): if (not os.path.exists(args.dest)): os.mkdir(args.dest) allA = [] allB = [] with open(args.attributes) as f: lines = f.readlines() if (args.config == 'beard_glasses'): for line in lines[2:]: line = line.split() if (male_no_5_oclock(line) and beard(line) and (not glasses(line))): allA.append(line[0]) elif (male_no_5_oclock(line) and (not beard(line)) and glasses(line)): allB.append(line[0]) if (args.config == 'beard_smile'): for line in lines[2:]: line = line.split() if (male_no_5_oclock(line) and beard(line) and (not smile(line))): allA.append(line[0]) elif (male_no_5_oclock(line) and (not beard(line)) and smile(line)): allB.append(line[0]) if (args.config == 'smile_glasses'): for line in lines[2:]: line = line.split() if (smile(line) and (not glasses(line))): allA.append(line[0]) elif ((not smile(line)) and glasses(line)): allB.append(line[0]) if (args.config == 'male_female'): for line in lines[2:]: line = line.split() if (int(line[21]) == 1): allA.append(line[0]) else: allB.append(line[0]) if (args.config == 'blond_black'): for line in lines[2:]: line = line.split() if (blonde_hair(line) and (not hat(line))): allA.append(line[0]) elif (black_hair(line) and (not hat(line))): allB.append(line[0]) testA = allA[:args.num_test_imgs] testB = allB[:args.num_test_imgs] trainA = allA[args.num_test_imgs:] trainB = allB[args.num_test_imgs:] with open(os.path.join(args.dest, 'testA.txt'), 'w') as f: for (i, _img) in enumerate(testA): if (i == (len(testA) - 1)): f.write(('%s' % os.path.join(args.root, _img))) else: f.write(('%s\n' % os.path.join(args.root, _img))) with open(os.path.join(args.dest, 'testB.txt'), 'w') as f: for (i, _img) in enumerate(testB): if (i == (len(testB) - 1)): f.write(('%s' % os.path.join(args.root, _img))) else: f.write(('%s\n' % os.path.join(args.root, _img))) with open(os.path.join(args.dest, 'trainA.txt'), 'w') as f: for (i, _img) in enumerate(trainA): if (i == (len(trainA) - 1)): f.write(('%s' % os.path.join(args.root, _img))) else: f.write(('%s\n' % os.path.join(args.root, _img))) with open(os.path.join(args.dest, 'trainB.txt'), 'w') as f: for (i, _img) in enumerate(trainB): if (i == (len(trainB) - 1)): f.write(('%s' % os.path.join(args.root, _img))) else: f.write(('%s\n' % os.path.join(args.root, _img)))
def male_no_5_oclock(line): return ((int(line[21]) == 1) and (int(line[1]) == (- 1)))
def beard(line): return ((int(line[23]) == 1) or (int(line[17]) == 1) or (int(line[25]) == (- 1)))
def glasses(line): return (int(line[16]) == 1)
def smile(line): return (int(line[32]) == 1)
def blonde_hair(line): return (int(line[10]) == 1)
def black_hair(line): return (int(line[9]) == 1)
def preprocess_folders(args): if (not os.path.exists(args.dest)): os.mkdir(args.dest) trainA = os.listdir(os.path.join(args.root, 'trainA')) trainB = os.listdir(os.path.join(args.root, 'trainB')) testA = os.listdir(os.path.join(args.root, 'testA')) testB = os.listdir(os.path.join(args.root, 'testB')) with open(os.path.join(args.dest, 'testA.txt'), 'w') as f: for (i, _img) in enumerate(testA): if (i == (len(testA) - 1)): f.write(('%s' % os.path.join(args.root, _img))) else: f.write(('%s\n' % os.path.join(args.root, _img))) with open(os.path.join(args.dest, 'testB.txt'), 'w') as f: for (i, _img) in enumerate(testB): if (i == (len(testB) - 1)): f.write(('%s' % os.path.join(args.root, _img))) else: f.write(('%s\n' % os.path.join(args.root, _img))) with open(os.path.join(args.dest, 'trainA.txt'), 'w') as f: for (i, _img) in enumerate(trainA): if (i == (len(trainA) - 1)): f.write(('%s' % os.path.join(args.root, _img))) else: f.write(('%s\n' % os.path.join(args.root, _img))) with open(os.path.join(args.dest, 'trainB.txt'), 'w') as f: for (i, _img) in enumerate(trainB): if (i == (len(trainB) - 1)): f.write(('%s' % os.path.join(args.root, _img))) else: f.write(('%s\n' % os.path.join(args.root, _img)))
def train(args): if (not os.path.exists(args.out)): os.makedirs(args.out) _iter = 0 (domA_train, domB_train) = get_train_dataset(args) size = (args.resize // 64) dim = 512 e_common = E_common(args.sep, size, dim=dim) e_separate_A = E_separate_A(args.sep, size) e_separate_B = E_separate_B(args.sep, size) decoder = Decoder(size, dim=dim) disc = Disc(args.sep, size, dim=dim) A_label = torch.full((args.bs,), 1) B_label = torch.full((args.bs,), 0) zero_encoding = torch.full((args.bs, ((args.sep * size) * size)), 0) one_encoding = torch.full((args.bs, ((args.sep * size) * size)), 1) l1 = nn.L1Loss() bce = nn.BCELoss() if torch.cuda.is_available(): e_common = e_common.cuda() e_separate_A = e_separate_A.cuda() e_separate_B = e_separate_B.cuda() decoder = decoder.cuda() disc = disc.cuda() A_label = A_label.cuda() B_label = B_label.cuda() zero_encoding = zero_encoding.cuda() one_encoding = one_encoding.cuda() l1 = l1.cuda() bce = bce.cuda() ae_params = (((list(e_common.parameters()) + list(e_separate_A.parameters())) + list(e_separate_B.parameters())) + list(decoder.parameters())) ae_optimizer = optim.Adam(ae_params, lr=args.lr, betas=(0.5, 0.999)) disc_params = disc.parameters() disc_optimizer = optim.Adam(disc_params, lr=args.disclr, betas=(0.5, 0.999)) if (args.load != ''): save_file = os.path.join(args.load, 'checkpoint') _iter = load_model(save_file, e_common, e_separate_A, e_separate_B, decoder, ae_optimizer, disc, disc_optimizer) e_common = e_common.train() e_separate_A = e_separate_A.train() e_separate_B = e_separate_B.train() decoder = decoder.train() disc = disc.train() logger = Logger(args.out) print('Started training...') while True: domA_loader = torch.utils.data.DataLoader(domA_train, batch_size=args.bs, shuffle=True, num_workers=6) domB_loader = torch.utils.data.DataLoader(domB_train, batch_size=args.bs, shuffle=True, num_workers=6) if (_iter >= args.iters): break for (domA_img, domB_img) in zip(domA_loader, domB_loader): if ((domA_img.size(0) != args.bs) or (domB_img.size(0) != args.bs)): break domA_img = Variable(domA_img) domB_img = Variable(domB_img) if torch.cuda.is_available(): domA_img = domA_img.cuda() domB_img = domB_img.cuda() domA_img = domA_img.view(((- 1), 3, args.resize, args.resize)) domB_img = domB_img.view(((- 1), 3, args.resize, args.resize)) ae_optimizer.zero_grad() A_common = e_common(domA_img) A_separate_A = e_separate_A(domA_img) A_separate_B = e_separate_B(domA_img) if args.no_flag: A_encoding = torch.cat([A_common, A_separate_A, A_separate_A], dim=1) else: A_encoding = torch.cat([A_common, A_separate_A, zero_encoding], dim=1) B_common = e_common(domB_img) B_separate_A = e_separate_A(domB_img) B_separate_B = e_separate_B(domB_img) if args.one_encoding: B_encoding = torch.cat([B_common, B_separate_B, one_encoding], dim=1) elif args.no_flag: B_encoding = torch.cat([B_common, B_separate_B, B_separate_B], dim=1) else: B_encoding = torch.cat([B_common, zero_encoding, B_separate_B], dim=1) A_decoding = decoder(A_encoding) B_decoding = decoder(B_encoding) A_reconstruction_loss = l1(A_decoding, domA_img) B_reconstruction_loss = l1(B_decoding, domB_img) A_separate_B_loss = l1(A_separate_B, zero_encoding) B_separate_A_loss = l1(B_separate_A, zero_encoding) logger.add_value('A_recon', A_reconstruction_loss) logger.add_value('B_recon', B_reconstruction_loss) logger.add_value('A_sep_B', A_separate_B_loss) logger.add_value('B_sep_A', B_separate_A_loss) loss = 0 if (args.reconweight > 0): loss += (args.reconweight * (A_reconstruction_loss + B_reconstruction_loss)) if (args.zeroweight > 0): loss += (args.zeroweight * (A_separate_B_loss + B_separate_A_loss)) if (args.discweight > 0): preds_A = disc(A_common) preds_B = disc(B_common) distribution_adverserial_loss = (args.discweight * (bce(preds_A, B_label) + bce(preds_B, B_label))) logger.add_value('distribution_adverserial', distribution_adverserial_loss) loss += distribution_adverserial_loss loss.backward() torch.nn.utils.clip_grad_norm_(ae_params, 5) ae_optimizer.step() if (args.discweight > 0): disc_optimizer.zero_grad() A_common = e_common(domA_img) B_common = e_common(domB_img) disc_A = disc(A_common) disc_B = disc(B_common) loss = (bce(disc_A, A_label) + bce(disc_B, B_label)) logger.add_value('dist_disc', loss) loss.backward() torch.nn.utils.clip_grad_norm_(disc_params, 5) disc_optimizer.step() if ((_iter % args.progress_iter) == 0): print(('Outfile: %s <<>> Iteration %d' % (args.out, _iter))) if ((_iter % args.log_iter) == 0): logger.log(_iter) logger.reset() if ((_iter % args.display_iter) == 0): e_common = e_common.eval() e_separate_A = e_separate_A.eval() e_separate_B = e_separate_B.eval() decoder = decoder.eval() save_imgs(args, e_common, e_separate_A, e_separate_B, decoder, _iter, size=size, BtoA=True) save_imgs(args, e_common, e_separate_A, e_separate_B, decoder, _iter, size=size, BtoA=False) save_stripped_imgs(args, e_common, e_separate_A, e_separate_B, decoder, _iter, size=size, A=True) save_stripped_imgs(args, e_common, e_separate_A, e_separate_B, decoder, _iter, size=size, A=False) e_common = e_common.train() e_separate_A = e_separate_A.train() e_separate_B = e_separate_B.train() decoder = decoder.train() if ((_iter % args.save_iter) == 0): save_file = os.path.join(args.out, 'checkpoint') save_model(save_file, e_common, e_separate_A, e_separate_B, decoder, ae_optimizer, disc, disc_optimizer, _iter) _iter += 1
def get_test_dataset(args, crop=None, resize=None): if (crop is None): crop = args.crop if (resize is None): resize = args.resize comp_transform = transforms.Compose([transforms.CenterCrop(crop), transforms.Resize(resize), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]) domA_test = CustomDataset(os.path.join(args.root, 'testA.txt'), transform=comp_transform) domB_test = CustomDataset(os.path.join(args.root, 'testB.txt'), transform=comp_transform) return (domA_test, domB_test)
def get_train_dataset(args, crop=None, resize=None): if (crop is None): crop = args.crop if (resize is None): resize = args.resize comp_transform = transforms.Compose([transforms.CenterCrop(crop), transforms.Resize(resize), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]) domA_train = CustomDataset(os.path.join(args.root, 'trainA.txt'), transform=comp_transform) domB_train = CustomDataset(os.path.join(args.root, 'trainB.txt'), transform=comp_transform) return (domA_train, domB_train)
def save_imgs(args, e_common, e_separate_A, e_separate_B, decoder, iters, size, BtoA=True, num_offsets=1): ' saves images of translation B -> A or A -> B' (test_domA, test_domB) = get_test_imgs(args) for k in range(num_offsets): exps = [] for i in range((k * args.num_display), ((k + 1) * args.num_display)): with torch.no_grad(): if (i == (k * args.num_display)): filler = test_domB[i].unsqueeze(0).clone() exps.append(filler.fill_(0)) if BtoA: exps.append(test_domB[i].unsqueeze(0)) else: exps.append(test_domA[i].unsqueeze(0)) if BtoA: for i in range((k * args.num_display), ((k + 1) * args.num_display)): exps.append(test_domA[i].unsqueeze(0)) separate_A = e_separate_A(test_domA[i].unsqueeze(0)) for j in range((k * args.num_display), ((k + 1) * args.num_display)): with torch.no_grad(): common_B = e_common(test_domB[j].unsqueeze(0)) zero_encoding = torch.full((1, ((args.sep * size) * size)), 0) if torch.cuda.is_available(): zero_encoding = zero_encoding.cuda() if args.no_flag: BA_encoding = torch.cat([common_B, separate_A, separate_A], dim=1) BA_decoding = decoder(BA_encoding) exps.append(BA_decoding) else: BA_encoding = torch.cat([common_B, separate_A, zero_encoding], dim=1) BA_decoding = decoder(BA_encoding) exps.append(BA_decoding) else: for i in range((k * args.num_display), ((k + 1) * args.num_display)): exps.append(test_domB[i].unsqueeze(0)) separate_B = e_separate_B(test_domB[i].unsqueeze(0)) for j in range((k * args.num_display), ((k + 1) * args.num_display)): with torch.no_grad(): common_A = e_common(test_domA[j].unsqueeze(0)) zero_encoding = torch.full((1, ((args.sep * size) * size)), 0) one_encoding = torch.full((1, ((args.sep * size) * size)), 1) if torch.cuda.is_available(): zero_encoding = zero_encoding.cuda() one_encoding = one_encoding.cuda() if args.one_encoding: AB_encoding = torch.cat([common_A, separate_B, one_encoding], dim=1) elif args.no_flag: AB_encoding = torch.cat([common_A, separate_B, separate_B], dim=1) else: AB_encoding = torch.cat([common_A, zero_encoding, separate_B], dim=1) AB_decoding = decoder(AB_encoding) exps.append(AB_decoding) with torch.no_grad(): exps = torch.cat(exps, 0) if BtoA: vutils.save_image(exps, ('%s/experiments_%06d_%d-BtoA.png' % (args.out, iters, k)), normalize=True, nrow=(args.num_display + 1)) else: vutils.save_image(exps, ('%s/experiments_%06d_%d-AtoB.png' % (args.out, iters, k)), normalize=True, nrow=(args.num_display + 1))
def get_test_imgs(args, crop=None, resize=None): (domA_test, domB_test) = get_test_dataset(args, crop=crop, resize=resize) domA_test_loader = torch.utils.data.DataLoader(domA_test, batch_size=64, shuffle=False, num_workers=6) domB_test_loader = torch.utils.data.DataLoader(domB_test, batch_size=64, shuffle=False, num_workers=6) for domA_img in domA_test_loader: if torch.cuda.is_available(): domA_img = domA_img.cuda() domA_img = domA_img[:] break for domB_img in domB_test_loader: if torch.cuda.is_available(): domB_img = domB_img.cuda() domB_img = domB_img[:] break return (domA_img, domB_img)
def save_model(out_file, e_common, e_separate_A, e_separate_B, decoder, ae_opt, disc, disc_opt, iters): state = {'e_common': e_common.state_dict(), 'e_separate_A': e_separate_A.state_dict(), 'e_separate_B': e_separate_B.state_dict(), 'decoder': decoder.state_dict(), 'ae_opt': ae_opt.state_dict(), 'disc': disc.state_dict(), 'disc_opt': disc_opt.state_dict(), 'iters': iters} torch.save(state, out_file) return
def load_model(load_path, e_common, e_separate_A, e_separate_B, decoder, ae_opt, disc, disc_opt): state = torch.load(load_path) e_common.load_state_dict(state['e_common']) e_separate_A.load_state_dict(state['e_separate_A']) e_separate_B.load_state_dict(state['e_separate_B']) decoder.load_state_dict(state['decoder']) ae_opt.load_state_dict(state['ae_opt']) disc.load_state_dict(state['disc']) disc_opt.load_state_dict(state['disc_opt']) return state['iters']
def load_model_for_eval(load_path, e_common, e_separate_A, e_separate_B, decoder): state = torch.load(load_path) e_common.load_state_dict(state['e_common']) e_separate_A.load_state_dict(state['e_separate_A']) e_separate_B.load_state_dict(state['e_separate_B']) decoder.load_state_dict(state['decoder']) return state['iters']
def edges_loader(path, train=True): image = Image.open(path).convert('RGB') image_A = image.crop((0, 0, 256, 256)) image_B = image.crop((0, 256, 512, 256)) if train: return image_A else: return image_B
def default_loader(path): return Image.open(path).convert('RGB')
class Logger(): def __init__(self, path): self.full_path = ('%s/log.txt' % path) self.log_file = open(self.full_path, 'w+') self.log_file.close() self.map = {} def add_value(self, tag, value): self.map[tag] = value def log(self, iter): self.log_file = open(self.full_path, 'a') self.log_file.write(('iter: %7d' % iter)) for (k, v) in self.map.items(): self.log_file.write(('\t %s: %10.7f' % (k, v))) self.log_file.write('\n') self.log_file.close() def reset(self): self.map = {}
class CustomDataset(data.Dataset): def __init__(self, path, transform=None, return_paths=False, loader=default_loader): super(CustomDataset, self).__init__() with open(path) as f: imgs = [s.replace('\n', '') for s in f.readlines()] if (len(imgs) == 0): raise RuntimeError(((('Found 0 images in: ' + path) + '\nSupported image extensions are: ') + ','.join(IMG_EXTENSIONS))) self.imgs = imgs self.transform = transform self.return_paths = return_paths self.loader = loader def __getitem__(self, index): path = self.imgs[index] img = self.loader(path) if (self.transform is not None): img = self.transform(img) if self.return_paths: return (img, path) else: return img def __len__(self): return len(self.imgs)
def default_flist_reader(flist): "\n flist format: impath label\nimpath label\n ...(same to caffe's filelist)\n " imlist = [] with open(flist, 'r') as rf: for line in rf.readlines(): impath = line.strip() imlist.append(impath) return imlist
def is_image_file(filename): return any((filename.endswith(extension) for extension in IMG_EXTENSIONS))
def save_stripped_imgs(args, e_common, e_separate_A, e_separate_B, decoder, iters, size, A=True): (test_domA, test_domB) = get_test_imgs(args) exps = [] zero_encoding = torch.full((1, ((args.sep * size) * size)), 0) one_encoding = torch.full((1, ((args.sep * size) * size)), 1) if torch.cuda.is_available(): zero_encoding = zero_encoding.cuda() one_encoding = one_encoding.cuda() for i in range(args.num_display): if A: image = test_domA[i] else: image = test_domB[i] exps.append(image.unsqueeze(0)) common = e_common(image.unsqueeze(0)) content_zero_encoding = torch.full(common.size(), 0) if torch.cuda.is_available(): content_zero_encoding = content_zero_encoding.cuda() separate_A = e_separate_A(image.unsqueeze(0)) separate_B = e_separate_B(image.unsqueeze(0)) if args.one_encoding: exps.append(decoder(torch.cat([content_zero_encoding, separate_A, zero_encoding], dim=1))) exps.append(decoder(torch.cat([content_zero_encoding, separate_B, one_encoding], dim=1))) elif args.no_flag: exps.append(decoder(torch.cat([content_zero_encoding, separate_A, separate_A], dim=1))) exps.append(decoder(torch.cat([content_zero_encoding, separate_B, separate_B], dim=1))) else: exps.append(decoder(torch.cat([common, zero_encoding, zero_encoding], dim=1))) exps.append(decoder(torch.cat([content_zero_encoding, separate_A, zero_encoding], dim=1))) exps.append(decoder(torch.cat([content_zero_encoding, zero_encoding, separate_B], dim=1))) with torch.no_grad(): exps = torch.cat(exps, 0) if A: vutils.save_image(exps, ('%s/experiments_%06d-Astripped.png' % (args.out, iters)), normalize=True, nrow=args.num_display) else: vutils.save_image(exps, ('%s/experiments_%06d-Bstripped.png' % (args.out, iters)), normalize=True, nrow=args.num_display)
def save_chosen_imgs(args, e_common, e_separate_A, e_separate_B, decoder, iters, listA, listB, BtoA=True): ' saves images of translation B -> A or A -> B' (test_domA, test_domB) = get_test_imgs(args) exps = [] for i in range(args.num_display): with torch.no_grad(): if (i == 0): filler = test_domB[i].unsqueeze(0).clone() exps.append(filler.fill_(0)) if BtoA: exps.append(test_domB[listB[i]].unsqueeze(0)) else: exps.append(test_domA[listA[i]].unsqueeze(0)) if BtoA: for i in listA: exps.append(test_domA[i].unsqueeze(0)) separate_A = e_separate_A(test_domA[i].unsqueeze(0)) for j in listB: with torch.no_grad(): common_B = e_common(test_domB[j].unsqueeze(0)) zero_encoding = torch.full((1, ((args.sep * (args.resize // 64)) * (args.resize // 64))), 0) if torch.cuda.is_available(): zero_encoding = zero_encoding.cuda() BA_encoding = torch.cat([common_B, separate_A, zero_encoding], dim=1) BA_decoding = decoder(BA_encoding) exps.append(BA_decoding) else: for i in listB: exps.append(test_domB[i].unsqueeze(0)) separate_B = e_separate_B(test_domB[i].unsqueeze(0)) for j in listA: with torch.no_grad(): common_A = e_common(test_domA[j].unsqueeze(0)) zero_encoding = torch.full((1, ((args.sep * (args.resize // 64)) * (args.resize // 64))), 0) if torch.cuda.is_available(): zero_encoding = zero_encoding.cuda() AB_encoding = torch.cat([common_A, zero_encoding, separate_B], dim=1) AB_decoding = decoder(AB_encoding) exps.append(AB_decoding) with torch.no_grad(): exps = torch.cat(exps, 0) if BtoA: vutils.save_image(exps, ('%s/experiments_%06d-BtoA.png' % (args.out, iters)), normalize=True, nrow=(args.num_display + 1)) else: vutils.save_image(exps, ('%s/experiments_%06d-AtoB.png' % (args.out, iters)), normalize=True, nrow=(args.num_display + 1))
def interpolate_fixed_common(args, e_common, e_separate_A, e_separate_B, decoder, imgA1, imgA2, imgB1, imgB2, content_img): (test_domA, test_domB) = get_test_imgs(args) exps = [] common = e_common(test_domB[content_img].unsqueeze(0)) a1 = e_separate_A(test_domA[imgA1].unsqueeze(0)) a2 = e_separate_A(test_domA[imgA2].unsqueeze(0)) b1 = e_separate_B(test_domB[imgB1].unsqueeze(0)) b2 = e_separate_B(test_domB[imgB2].unsqueeze(0)) with torch.no_grad(): filler = test_domB[0].unsqueeze(0).clone() exps.append(filler.fill_(0)) exps.append(test_domA[imgA1].unsqueeze(0)) for i in range((args.num_display - 2)): exps.append(filler.fill_(0)) exps.append(test_domA[imgA2].unsqueeze(0)) for i in range(args.num_display): if (i == 0): exps.append(test_domB[imgB1].unsqueeze(0)) elif (i == (args.num_display - 1)): exps.append(test_domB[imgB2].unsqueeze(0)) else: exps.append(filler.fill_(0)) for j in range(args.num_display): cur_sep_A = (((float(j) / (args.num_display - 1)) * a2) + ((1 - (float(j) / (args.num_display - 1))) * a1)) cur_sep_B = (((float(i) / (args.num_display - 1)) * b2) + ((1 - (float(i) / (args.num_display - 1))) * b1)) encoding = torch.cat([common, cur_sep_A, cur_sep_B], dim=1) decoding = decoder(encoding) exps.append(decoding) with torch.no_grad(): exps = torch.cat(exps, 0) vutils.save_image(exps, ('%s/interpolation_fixed_C.png' % args.out), normalize=True, nrow=(args.num_display + 1))
def interpolate_fixed_A(args, e_common, e_separate_A, e_separate_B, decoder, imgC1, imgC2, imgB1, imgB2, imgA): (test_domA, test_domB) = get_test_imgs(args) exps = [] c1 = e_common(test_domB[imgC1].unsqueeze(0)) c2 = e_common(test_domB[imgC2].unsqueeze(0)) a = e_separate_A(test_domA[imgA].unsqueeze(0)) b1 = e_separate_B(test_domB[imgB1].unsqueeze(0)) b2 = e_separate_B(test_domB[imgB2].unsqueeze(0)) with torch.no_grad(): filler = test_domB[0].unsqueeze(0).clone() exps.append(filler.fill_(0)) exps.append(test_domB[imgC1].unsqueeze(0)) for i in range((args.num_display - 2)): exps.append(filler.fill_(0)) exps.append(test_domB[imgC2].unsqueeze(0)) for i in range(args.num_display): if (i == 0): exps.append(test_domB[imgB1].unsqueeze(0)) elif (i == (args.num_display - 1)): exps.append(test_domB[imgB2].unsqueeze(0)) else: exps.append(filler.fill_(0)) for j in range(args.num_display): cur_common = (((float(j) / (args.num_display - 1)) * c2) + ((1 - (float(j) / (args.num_display - 1))) * c1)) cur_sep_B = (((float(i) / (args.num_display - 1)) * b2) + ((1 - (float(i) / (args.num_display - 1))) * b1)) encoding = torch.cat([cur_common, a, cur_sep_B], dim=1) decoding = decoder(encoding) exps.append(decoding) with torch.no_grad(): exps = torch.cat(exps, 0) vutils.save_image(exps, ('%s/interpolation_fixed_A.png' % args.out), normalize=True, nrow=(args.num_display + 1))
def interpolate_fixed_B(args, e_common, e_separate_A, e_separate_B, decoder, imgC1, imgC2, imgA1, imgA2, imgB): (test_domA, test_domB) = get_test_imgs(args) exps = [] c1 = e_common(test_domB[imgC1].unsqueeze(0)) c2 = e_common(test_domB[imgC2].unsqueeze(0)) a1 = e_separate_A(test_domA[imgA1].unsqueeze(0)) a2 = e_separate_A(test_domA[imgA2].unsqueeze(0)) b = e_separate_B(test_domB[imgB].unsqueeze(0)) with torch.no_grad(): filler = test_domB[0].unsqueeze(0).clone() exps.append(filler.fill_(0)) exps.append(test_domB[imgC1].unsqueeze(0)) for i in range((args.num_display - 2)): exps.append(filler.fill_(0)) exps.append(test_domB[imgC2].unsqueeze(0)) for i in range(args.num_display): if (i == 0): exps.append(test_domA[imgA1].unsqueeze(0)) elif (i == (args.num_display - 1)): exps.append(test_domA[imgA2].unsqueeze(0)) else: exps.append(filler.fill_(0)) for j in range(args.num_display): cur_common = (((float(j) / (args.num_display - 1)) * c2) + ((1 - (float(j) / (args.num_display - 1))) * c1)) cur_sep_A = (((float(i) / (args.num_display - 1)) * a2) + ((1 - (float(i) / (args.num_display - 1))) * a1)) encoding = torch.cat([cur_common, cur_sep_A, b], dim=1) decoding = decoder(encoding) exps.append(decoding) with torch.no_grad(): exps = torch.cat(exps, 0) vutils.save_image(exps, ('%s/interpolation_fixed_B.png' % args.out), normalize=True, nrow=(args.num_display + 1))
class BUILD_NET_VGG16(): def __init__(self, vgg16_npy_path=None): self.data_dict = np.load(vgg16_npy_path, encoding='latin1').item() print('npy file loaded') def build(self, rgb, ROIMap, NUM_CLASSES, keep_prob): '\n load variable from npy to build the VGG\n\n :param rgb: rgb image [batch, height, width, 3] values 0-255\n ' self.SumWeights = tf.constant(0.0, name='SumFiltersWeights') print('build model started') (red, green, blue) = tf.split(axis=3, num_or_size_splits=3, value=rgb) bgr = tf.concat(axis=3, values=[(blue - VGG_MEAN[0]), (green - VGG_MEAN[1]), (red - VGG_MEAN[2])]) self.conv1_1 = self.conv_layer_NoRelu(bgr, 'conv1_1') W = tf.Variable(tf.truncated_normal([3, 3, 1, 64], mean=0.0, stddev=0.01, dtype=tf.float32), name='W0') B = tf.Variable(tf.truncated_normal([64], mean=0.0, stddev=0.01, dtype=tf.float32), name='B0') self.RelevanceMap = tf.nn.bias_add(tf.nn.conv2d(tf.cast(ROIMap, tf.float32), W, [1, 1, 1, 1], padding='SAME'), B) self.conv1_2 = self.conv_layer(tf.nn.relu((self.conv1_1 * self.RelevanceMap)), 'conv1_2') self.pool1 = self.max_pool(self.conv1_2, 'pool1') self.conv2_1 = self.conv_layer(self.pool1, 'conv2_1') self.conv2_2 = self.conv_layer(self.conv2_1, 'conv2_2') self.pool2 = self.max_pool(self.conv2_2, 'pool2') self.conv3_1 = self.conv_layer(self.pool2, 'conv3_1') self.conv3_2 = self.conv_layer(self.conv3_1, 'conv3_2') self.conv3_3 = self.conv_layer(self.conv3_2, 'conv3_3') self.pool3 = self.max_pool(self.conv3_3, 'pool3') self.conv4_1 = self.conv_layer(self.pool3, 'conv4_1') self.conv4_2 = self.conv_layer(self.conv4_1, 'conv4_2') self.conv4_3 = self.conv_layer(self.conv4_2, 'conv4_3') self.pool4 = self.max_pool(self.conv4_3, 'pool4') self.conv5_1 = self.conv_layer(self.pool4, 'conv5_1') self.conv5_2 = self.conv_layer(self.conv5_1, 'conv5_2') self.conv5_3 = self.conv_layer(self.conv5_2, 'conv5_3') self.pool5 = self.max_pool(self.conv5_3, 'pool5') W6 = utils.weight_variable([7, 7, 512, 4096], name='W6') b6 = utils.bias_variable([4096], name='b6') self.conv6 = utils.conv2d_basic(self.pool5, W6, b6) self.relu6 = tf.nn.relu(self.conv6, name='relu6') self.relu_dropout6 = tf.nn.dropout(self.relu6, keep_prob=keep_prob) W7 = utils.weight_variable([1, 1, 4096, 4096], name='W7') b7 = utils.bias_variable([4096], name='b7') self.conv7 = utils.conv2d_basic(self.relu_dropout6, W7, b7) self.relu7 = tf.nn.relu(self.conv7, name='relu7') self.relu_dropout7 = tf.nn.dropout(self.relu7, keep_prob=keep_prob) W8 = utils.weight_variable([1, 1, 4096, NUM_CLASSES], name='W8') b8 = utils.bias_variable([NUM_CLASSES], name='b8') self.conv8 = utils.conv2d_basic(self.relu_dropout7, W8, b8) deconv_shape1 = self.pool4.get_shape() W_t1 = utils.weight_variable([4, 4, deconv_shape1[3].value, NUM_CLASSES], name='W_t1') b_t1 = utils.bias_variable([deconv_shape1[3].value], name='b_t1') self.conv_t1 = utils.conv2d_transpose_strided(self.conv8, W_t1, b_t1, output_shape=tf.shape(self.pool4)) self.fuse_1 = tf.add(self.conv_t1, self.pool4, name='fuse_1') deconv_shape2 = self.pool3.get_shape() W_t2 = utils.weight_variable([4, 4, deconv_shape2[3].value, deconv_shape1[3].value], name='W_t2') b_t2 = utils.bias_variable([deconv_shape2[3].value], name='b_t2') self.conv_t2 = utils.conv2d_transpose_strided(self.fuse_1, W_t2, b_t2, output_shape=tf.shape(self.pool3)) self.fuse_2 = tf.add(self.conv_t2, self.pool3, name='fuse_2') shape = tf.shape(rgb) W_t3 = utils.weight_variable([16, 16, NUM_CLASSES, deconv_shape2[3].value], name='W_t3') b_t3 = utils.bias_variable([NUM_CLASSES], name='b_t3') self.Prob = utils.conv2d_transpose_strided(self.fuse_2, W_t3, b_t3, output_shape=[shape[0], shape[1], shape[2], NUM_CLASSES], stride=8) self.Pred = tf.argmax(self.Prob, dimension=3, name='Pred') print('FCN model built') def max_pool(self, bottom, name): return tf.nn.max_pool(bottom, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', name=name) def conv_layer(self, bottom, name): with tf.variable_scope(name): filt = self.get_conv_filter(name) conv = tf.nn.conv2d(bottom, filt, [1, 1, 1, 1], padding='SAME') conv_biases = self.get_bias(name) bias = tf.nn.bias_add(conv, conv_biases) relu = tf.nn.relu(bias) return relu def conv_layer_NoRelu(self, bottom, name): with tf.variable_scope(name): filt = self.get_conv_filter(name) conv = tf.nn.conv2d(bottom, filt, [1, 1, 1, 1], padding='SAME') conv_biases = self.get_bias(name) bias = tf.nn.bias_add(conv, conv_biases) return bias def fc_layer(self, bottom, name): with tf.variable_scope(name): shape = bottom.get_shape().as_list() dim = 1 for d in shape[1:]: dim *= d x = tf.reshape(bottom, [(- 1), dim]) weights = self.get_fc_weight(name) biases = self.get_bias(name) fc = tf.nn.bias_add(tf.matmul(x, weights), biases) return fc def get_conv_filter(self, name): var = tf.Variable(self.data_dict[name][0], name=('filter_' + name)) self.SumWeights += tf.nn.l2_loss(var) return var def get_bias(self, name): return tf.Variable(self.data_dict[name][1], name=('biases_' + name)) def get_fc_weight(self, name): return tf.Variable(self.data_dict[name][0], name=('weights_' + name))
def CheckVGG16(model_path): TensorflowUtils.maybe_download_and_extract(model_path.split('/')[0], 'ftp://mi.eng.cam.ac.uk/pub/mttt2/models/vgg16.npy') if (not os.path.isfile(model_path)): print('Error: Cant find pretrained vgg16 model for network initiation. Please download model from:') print('ftp://mi.eng.cam.ac.uk/pub/mttt2/models/vgg16.npy') print('Or from:') print('https://drive.google.com/file/d/0B6njwynsu2hXZWcwX0FKTGJKRWs/view?usp=sharing') print('and place in the path pointed by model_path')
def main(argv=None): tf.reset_default_graph() keep_prob = tf.placeholder(tf.float32, name='keep_probabilty') image = tf.placeholder(tf.float32, shape=[None, None, None, 3], name='input_image') ROIMap = tf.placeholder(tf.int32, shape=[None, None, None, 1], name='ROIMap') Net = BuildNetVgg16.BUILD_NET_VGG16(vgg16_npy_path=model_path) Net.build(image, ROIMap, NUM_CLASSES, keep_prob) ValidReader = Data_Reader.Data_Reader(Image_Dir, ROIMap_Dir, GTLabelDir=Label_Dir, BatchSize=Batch_Size) sess = tf.Session() print('Setting up Saver...') saver = tf.train.Saver() sess.run(tf.global_variables_initializer()) ckpt = tf.train.get_checkpoint_state(logs_dir) if (ckpt and ckpt.model_checkpoint_path): saver.restore(sess, ckpt.model_checkpoint_path) print('Model restored...') else: print((('ERROR NO TRAINED MODEL IN: ' + ckpt.model_checkpoint_path) + 'See TRAIN.py for training')) sys.exit() Union = np.float64(np.zeros(len(Classes))) Intersection = np.float64(np.zeros(len(Classes))) fim = 0 print((('Start Evaluating intersection over union for ' + str(ValidReader.NumFiles)) + ' images')) while (ValidReader.itr < ValidReader.NumFiles): print((str(((fim * 100.0) / ValidReader.NumFiles)) + '%')) fim += 1 (Images, ROIMaps, GTLabels) = ValidReader.ReadNextBatchClean() PredictedLabels = sess.run(Net.Pred, feed_dict={image: Images, ROIMap: ROIMaps, keep_prob: 1.0}) (CIOU, CU) = IOU.GetIOU(PredictedLabels, GTLabels.squeeze(), len(Classes), Classes) Intersection += (CIOU * CU) Union += CU print('---------------------------Mean Prediction----------------------------------------') print('---------------------IOU=Intersection Over Inion----------------------------------') for i in range(len(Classes)): if (Union[i] > 0): print(((Classes[i] + '\t') + str((Intersection[i] / Union[i]))))
def GetIOU(Pred, GT, NumClasses, ClassNames=[], DisplyResults=False): ClassIOU = np.zeros(NumClasses) ClassWeight = np.zeros(NumClasses) for i in range(NumClasses): Intersection = np.float32(np.sum(((Pred == GT) * (GT == i)))) Union = ((np.sum((GT == i)) + np.sum((Pred == i))) - Intersection) if (Union > 0): ClassIOU[i] = (Intersection / Union) ClassWeight[i] = Union if DisplyResults: for i in range(len(ClassNames)): print(((ClassNames[i] + ') ') + str(ClassIOU[i]))) print(('Mean Classes IOU) ' + str(np.mean(ClassIOU)))) print(('Image Predicition Accuracy)' + str((np.float32(np.sum((Pred == GT))) / GT.size)))) return (ClassIOU, ClassWeight)
def main(argv=None): keep_prob = tf.placeholder(tf.float32, name='keep_probabilty') image = tf.placeholder(tf.float32, shape=[None, None, None, 3], name='input_image') ROIMap = tf.placeholder(tf.int32, shape=[None, None, None, 1], name='ROIMap') Net = BuildNetVgg16.BUILD_NET_VGG16(vgg16_npy_path=model_path) Net.build(image, ROIMap, NUM_CLASSES, keep_prob) ValidReader = Data_Reader.Data_Reader(Image_Dir, ROIMap_Dir, BatchSize=1) sess = tf.Session() print('Setting up Saver...') saver = tf.train.Saver() sess.run(tf.global_variables_initializer()) ckpt = tf.train.get_checkpoint_state(logs_dir) if (ckpt and ckpt.model_checkpoint_path): saver.restore(sess, ckpt.model_checkpoint_path) print('Model restored...') else: print((('ERROR NO TRAINED MODEL IN: ' + ckpt.model_checkpoint_path) + ' See Train.py for creating train network ')) sys.exit() if (not os.path.exists(Pred_Dir)): os.makedirs(Pred_Dir) if (not os.path.exists((Pred_Dir + '/OverLay'))): os.makedirs((Pred_Dir + '/OverLay')) if (not os.path.exists((Pred_Dir + '/Label'))): os.makedirs((Pred_Dir + '/Label')) print('Running Predictions:') print(('Saving output to:' + Pred_Dir)) fim = 0 print((('Start Predicting ' + str(ValidReader.NumFiles)) + ' images')) while (ValidReader.itr < ValidReader.NumFiles): print((str(((fim * 100.0) / ValidReader.NumFiles)) + '%')) fim += 1 FileName = ValidReader.OrderedFiles[ValidReader.itr] (Images, ROIMaps) = ValidReader.ReadNextBatchClean() LabelPred = sess.run(Net.Pred, feed_dict={image: Images, keep_prob: 1.0, ROIMap: ROIMaps}) misc.imsave((((Pred_Dir + '/OverLay/') + FileName) + NameEnd), Overlay.OverLayLabelOnImage(Images[0], LabelPred[0], w)) misc.imsave(((((Pred_Dir + '/Label/') + FileName[:(- 4)]) + '.png') + NameEnd), LabelPred[0].astype(np.uint8))
def train(loss_val, var_list): optimizer = tf.train.AdamOptimizer(learning_rate) grads = optimizer.compute_gradients(loss_val, var_list=var_list) return optimizer.apply_gradients(grads)
def main(argv=None): tf.reset_default_graph() keep_prob = tf.placeholder(tf.float32, name='keep_probabilty') image = tf.placeholder(tf.float32, shape=[None, None, None, 3], name='input_image') ROIMap = tf.placeholder(tf.int32, shape=[None, None, None, 1], name='ROIMap') GTLabel = tf.placeholder(tf.int32, shape=[None, None, None, 1], name='GTLabel') Net = BuildNetVgg16.BUILD_NET_VGG16(vgg16_npy_path=model_path) Net.build(image, ROIMap, NUM_CLASSES, keep_prob) Loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(labels=tf.squeeze(GTLabel, squeeze_dims=[3]), logits=Net.Prob, name='Loss')) trainable_var = tf.trainable_variables() train_op = train(Loss, trainable_var) TrainReader = Data_Reader.Data_Reader(Train_Image_Dir, ROIMap_Dir, GTLabelDir=Label_Dir, BatchSize=Batch_Size) if UseValidationSet: ValidReader = Data_Reader.Data_Reader(Valid_Image_Dir, ROIMap_Dir, GTLabelDir=Label_Dir, BatchSize=Batch_Size) sess = tf.Session() print('Setting up Saver...') saver = tf.train.Saver() sess.run(tf.global_variables_initializer()) ckpt = tf.train.get_checkpoint_state(logs_dir) if (ckpt and ckpt.model_checkpoint_path): saver.restore(sess, ckpt.model_checkpoint_path) print('Model restored...') f = open(TrainLossTxtFile, 'w') f.write(('Iteration\tloss\t Learning Rate=' + str(learning_rate))) f.close() if UseValidationSet: f = open(ValidLossTxtFile, 'w') f.write(('Iteration\tloss\t Learning Rate=' + str(learning_rate))) f.close() for itr in range(MAX_ITERATION): (Images, ROIMaps, GTLabels) = TrainReader.ReadAndAugmentNextBatch() feed_dict = {image: Images, GTLabel: GTLabels, ROIMap: ROIMaps, keep_prob: 0.5} sess.run(train_op, feed_dict=feed_dict) if (((itr % 500) == 0) and (itr > 0)): print(('Saving Model to file in' + logs_dir)) saver.save(sess, (logs_dir + 'model.ckpt'), itr) if ((itr % 10) == 0): feed_dict = {image: Images, GTLabel: GTLabels, ROIMap: ROIMaps, keep_prob: 1} TLoss = sess.run(Loss, feed_dict=feed_dict) print(((('Step ' + str(itr)) + ' Train Loss=') + str(TLoss))) with open(TrainLossTxtFile, 'a') as f: f.write(((('\n' + str(itr)) + '\t') + str(TLoss))) f.close() if (UseValidationSet and ((itr % 2000) == 0)): SumLoss = np.float64(0.0) NBatches = np.int(np.ceil((ValidReader.NumFiles / ValidReader.BatchSize))) print((('Calculating Validation on ' + str(ValidReader.NumFiles)) + ' Images')) for i in range(NBatches): (Images, ROIMaps, GTLabels) = ValidReader.ReadNextBatchClean() feed_dict = {image: Images, ROIMap: ROIMaps, GTLabel: GTLabels, keep_prob: 1.0} TLoss = sess.run(Loss, feed_dict=feed_dict) SumLoss += TLoss NBatches += 1 SumLoss /= NBatches print(('Validation Loss: ' + str(SumLoss))) with open(ValidLossTxtFile, 'a') as f: f.write(((('\n' + str(itr)) + '\t') + str(SumLoss))) f.close()
def get_model_data(dir_path, model_url): maybe_download_and_extract(dir_path, model_url) filename = model_url.split('/')[(- 1)] filepath = os.path.join(dir_path, filename) if (not os.path.exists(filepath)): raise IOError('VGG Model not found!') data = scipy.io.loadmat(filepath) return data
def maybe_download_and_extract(dir_path, url_name, is_tarfile=False, is_zipfile=False): if (not os.path.exists(dir_path)): os.makedirs(dir_path) filename = url_name.split('/')[(- 1)] filepath = os.path.join(dir_path, filename) if (not os.path.exists(filepath)): def _progress(count, block_size, total_size): sys.stdout.write(('\r>> Downloading %s %.1f%%' % (filename, ((float((count * block_size)) / float(total_size)) * 100.0)))) sys.stdout.flush() (filepath, _) = urllib.request.urlretrieve(url_name, filepath, reporthook=_progress) print() statinfo = os.stat(filepath) print('Succesfully downloaded', filename, statinfo.st_size, 'bytes.') if is_tarfile: tarfile.open(filepath, 'r:gz').extractall(dir_path) elif is_zipfile: with zipfile.ZipFile(filepath) as zf: zip_dir = zf.namelist()[0] zf.extractall(dir_path)
def save_image(image, save_dir, name, mean=None): '\n Save image by unprocessing if mean given else just save\n :param mean:\n :param image:\n :param save_dir:\n :param name:\n :return:\n ' if mean: image = unprocess_image(image, mean) misc.imsave(os.path.join(save_dir, (name + '.png')), image)
def get_variable(weights, name): init = tf.constant_initializer(weights, dtype=tf.float32) var = tf.get_variable(name=name, initializer=init, shape=weights.shape) return var
def weight_variable(shape, stddev=0.02, name=None): initial = tf.truncated_normal(shape, stddev=stddev) if (name is None): return tf.Variable(initial) else: return tf.get_variable(name, initializer=initial)
def bias_variable(shape, name=None): initial = tf.constant(0.0, shape=shape) if (name is None): return tf.Variable(initial) else: return tf.get_variable(name, initializer=initial)
def get_tensor_size(tensor): from operator import mul return reduce(mul, (d.value for d in tensor.get_shape()), 1)
def conv2d_basic(x, W, bias): conv = tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME') return tf.nn.bias_add(conv, bias)
def conv2d_strided(x, W, b): conv = tf.nn.conv2d(x, W, strides=[1, 2, 2, 1], padding='SAME') return tf.nn.bias_add(conv, b)
def conv2d_transpose_strided(x, W, b, output_shape=None, stride=2): if (output_shape is None): output_shape = x.get_shape().as_list() output_shape[1] *= 2 output_shape[2] *= 2 output_shape[3] = W.get_shape().as_list()[2] conv = tf.nn.conv2d_transpose(x, W, output_shape, strides=[1, stride, stride, 1], padding='SAME') return tf.nn.bias_add(conv, b)
def leaky_relu(x, alpha=0.0, name=''): return tf.maximum((alpha * x), x, name)
def max_pool_2x2(x): return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
def avg_pool_2x2(x): return tf.nn.avg_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
def local_response_norm(x): return tf.nn.lrn(x, depth_radius=5, bias=2, alpha=0.0001, beta=0.75)
def batch_norm(x, n_out, phase_train, scope='bn', decay=0.9, eps=1e-05): '\n Code taken from http://stackoverflow.com/a/34634291/2267819\n ' with tf.variable_scope(scope): beta = tf.get_variable(name='beta', shape=[n_out], initializer=tf.constant_initializer(0.0), trainable=True) gamma = tf.get_variable(name='gamma', shape=[n_out], initializer=tf.random_normal_initializer(1.0, 0.02), trainable=True) (batch_mean, batch_var) = tf.nn.moments(x, [0, 1, 2], name='moments') ema = tf.train.ExponentialMovingAverage(decay=decay) def mean_var_with_update(): ema_apply_op = ema.apply([batch_mean, batch_var]) with tf.control_dependencies([ema_apply_op]): return (tf.identity(batch_mean), tf.identity(batch_var)) (mean, var) = tf.cond(phase_train, mean_var_with_update, (lambda : (ema.average(batch_mean), ema.average(batch_var)))) normed = tf.nn.batch_normalization(x, mean, var, beta, gamma, eps) return normed
def process_image(image, mean_pixel): return (image - mean_pixel)
def unprocess_image(image, mean_pixel): return (image + mean_pixel)
def bottleneck_unit(x, out_chan1, out_chan2, down_stride=False, up_stride=False, name=None): '\n Modified implementation from github ry?!\n ' def conv_transpose(tensor, out_channel, shape, strides, name=None): out_shape = tensor.get_shape().as_list() in_channel = out_shape[(- 1)] kernel = weight_variable([shape, shape, out_channel, in_channel], name=name) shape[(- 1)] = out_channel return tf.nn.conv2d_transpose(x, kernel, output_shape=out_shape, strides=[1, strides, strides, 1], padding='SAME', name='conv_transpose') def conv(tensor, out_chans, shape, strides, name=None): in_channel = tensor.get_shape().as_list()[(- 1)] kernel = weight_variable([shape, shape, in_channel, out_chans], name=name) return tf.nn.conv2d(x, kernel, strides=[1, strides, strides, 1], padding='SAME', name='conv') def bn(tensor, name=None): '\n :param tensor: 4D tensor input\n :param name: name of the operation\n :return: local response normalized tensor - not using batch normalization :(\n ' return tf.nn.lrn(tensor, depth_radius=5, bias=2, alpha=0.0001, beta=0.75, name=name) in_chans = x.get_shape().as_list()[3] if (down_stride or up_stride): first_stride = 2 else: first_stride = 1 with tf.variable_scope(('res%s' % name)): if (in_chans == out_chan2): b1 = x else: with tf.variable_scope('branch1'): if up_stride: b1 = conv_transpose(x, out_chans=out_chan2, shape=1, strides=first_stride, name=('res%s_branch1' % name)) else: b1 = conv(x, out_chans=out_chan2, shape=1, strides=first_stride, name=('res%s_branch1' % name)) b1 = bn(b1, ('bn%s_branch1' % name), ('scale%s_branch1' % name)) with tf.variable_scope('branch2a'): if up_stride: b2 = conv_transpose(x, out_chans=out_chan1, shape=1, strides=first_stride, name=('res%s_branch2a' % name)) else: b2 = conv(x, out_chans=out_chan1, shape=1, strides=first_stride, name=('res%s_branch2a' % name)) b2 = bn(b2, ('bn%s_branch2a' % name), ('scale%s_branch2a' % name)) b2 = tf.nn.relu(b2, name='relu') with tf.variable_scope('branch2b'): b2 = conv(b2, out_chans=out_chan1, shape=3, strides=1, name=('res%s_branch2b' % name)) b2 = bn(b2, ('bn%s_branch2b' % name), ('scale%s_branch2b' % name)) b2 = tf.nn.relu(b2, name='relu') with tf.variable_scope('branch2c'): b2 = conv(b2, out_chans=out_chan2, shape=1, strides=1, name=('res%s_branch2c' % name)) b2 = bn(b2, ('bn%s_branch2c' % name), ('scale%s_branch2c' % name)) x = (b1 + b2) return tf.nn.relu(x, name='relu')
def add_to_regularization_and_summary(var): if (var is not None): tf.summary.histogram(var.op.name, var) tf.add_to_collection('reg_loss', tf.nn.l2_loss(var))
def add_activation_summary(var): if (var is not None): tf.summary.histogram((var.op.name + '/activation'), var) tf.summary.scalar((var.op.name + '/sparsity'), tf.nn.zero_fraction(var))
def add_gradient_summary(grad, var): if (grad is not None): tf.summary.histogram((var.op.name + '/gradient'), grad)
class BUILD_NET_VGG16(): def __init__(self, vgg16_npy_path=None): self.data_dict = np.load(vgg16_npy_path, encoding='latin1').item() print('npy file loaded') def build(self, rgb, NUM_CLASSES, keep_prob): '\n load variable from npy to build the VGG\n\n :param rgb: rgb image [batch, height, width, 3] values 0-255\n ' self.SumWeights = tf.constant(0.0, name='SumFiltersWeights') print('build model started') (red, green, blue) = tf.split(axis=3, num_or_size_splits=3, value=rgb) bgr = tf.concat(axis=3, values=[(blue - VGG_MEAN[0]), (green - VGG_MEAN[1]), (red - VGG_MEAN[2])]) self.conv1_1 = self.conv_layer(bgr, 'conv1_1') self.conv1_2 = self.conv_layer(self.conv1_1, 'conv1_2') self.pool1 = self.max_pool(self.conv1_2, 'pool1') self.conv2_1 = self.conv_layer(self.pool1, 'conv2_1') self.conv2_2 = self.conv_layer(self.conv2_1, 'conv2_2') self.pool2 = self.max_pool(self.conv2_2, 'pool2') self.conv3_1 = self.conv_layer(self.pool2, 'conv3_1') self.conv3_2 = self.conv_layer(self.conv3_1, 'conv3_2') self.conv3_3 = self.conv_layer(self.conv3_2, 'conv3_3') self.pool3 = self.max_pool(self.conv3_3, 'pool3') self.conv4_1 = self.conv_layer(self.pool3, 'conv4_1') self.conv4_2 = self.conv_layer(self.conv4_1, 'conv4_2') self.conv4_3 = self.conv_layer(self.conv4_2, 'conv4_3') self.pool4 = self.max_pool(self.conv4_3, 'pool4') self.conv5_1 = self.conv_layer(self.pool4, 'conv5_1') self.conv5_2 = self.conv_layer(self.conv5_1, 'conv5_2') self.conv5_3 = self.conv_layer(self.conv5_2, 'conv5_3') self.pool5 = self.max_pool(self.conv5_3, 'pool5') W6 = utils.weight_variable([7, 7, 512, 4096], name='W6') b6 = utils.bias_variable([4096], name='b6') self.conv6 = utils.conv2d_basic(self.pool5, W6, b6) self.relu6 = tf.nn.relu(self.conv6, name='relu6') self.relu_dropout6 = tf.nn.dropout(self.relu6, keep_prob=keep_prob) W7 = utils.weight_variable([1, 1, 4096, 4096], name='W7') b7 = utils.bias_variable([4096], name='b7') self.conv7 = utils.conv2d_basic(self.relu_dropout6, W7, b7) self.relu7 = tf.nn.relu(self.conv7, name='relu7') self.relu_dropout7 = tf.nn.dropout(self.relu7, keep_prob=keep_prob) W8 = utils.weight_variable([1, 1, 4096, NUM_CLASSES], name='W8') b8 = utils.bias_variable([NUM_CLASSES], name='b8') self.conv8 = utils.conv2d_basic(self.relu_dropout7, W8, b8) deconv_shape1 = self.pool4.get_shape() W_t1 = utils.weight_variable([4, 4, deconv_shape1[3].value, NUM_CLASSES], name='W_t1') b_t1 = utils.bias_variable([deconv_shape1[3].value], name='b_t1') self.conv_t1 = utils.conv2d_transpose_strided(self.conv8, W_t1, b_t1, output_shape=tf.shape(self.pool4)) self.fuse_1 = tf.add(self.conv_t1, self.pool4, name='fuse_1') deconv_shape2 = self.pool3.get_shape() W_t2 = utils.weight_variable([4, 4, deconv_shape2[3].value, deconv_shape1[3].value], name='W_t2') b_t2 = utils.bias_variable([deconv_shape2[3].value], name='b_t2') self.conv_t2 = utils.conv2d_transpose_strided(self.fuse_1, W_t2, b_t2, output_shape=tf.shape(self.pool3)) self.fuse_2 = tf.add(self.conv_t2, self.pool3, name='fuse_2') shape = tf.shape(rgb) W_t3 = utils.weight_variable([16, 16, NUM_CLASSES, deconv_shape2[3].value], name='W_t3') b_t3 = utils.bias_variable([NUM_CLASSES], name='b_t3') self.Prob = utils.conv2d_transpose_strided(self.fuse_2, W_t3, b_t3, output_shape=[shape[0], shape[1], shape[2], NUM_CLASSES], stride=8) self.Pred = tf.argmax(self.Prob, dimension=3, name='Pred') print('FCN model built') def max_pool(self, bottom, name): return tf.nn.max_pool(bottom, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', name=name) def conv_layer(self, bottom, name): with tf.variable_scope(name): filt = self.get_conv_filter(name) conv = tf.nn.conv2d(bottom, filt, [1, 1, 1, 1], padding='SAME') conv_biases = self.get_bias(name) bias = tf.nn.bias_add(conv, conv_biases) relu = tf.nn.relu(bias) return relu def conv_layer_NoRelu(self, bottom, name): with tf.variable_scope(name): filt = self.get_conv_filter(name) conv = tf.nn.conv2d(bottom, filt, [1, 1, 1, 1], padding='SAME') conv_biases = self.get_bias(name) bias = tf.nn.bias_add(conv, conv_biases) return bias def fc_layer(self, bottom, name): with tf.variable_scope(name): shape = bottom.get_shape().as_list() dim = 1 for d in shape[1:]: dim *= d x = tf.reshape(bottom, [(- 1), dim]) weights = self.get_fc_weight(name) biases = self.get_bias(name) fc = tf.nn.bias_add(tf.matmul(x, weights), biases) return fc def get_conv_filter(self, name): var = tf.Variable(self.data_dict[name][0], name=('filter_' + name)) self.SumWeights += tf.nn.l2_loss(var) return var def get_bias(self, name): return tf.Variable(self.data_dict[name][1], name=('biases_' + name)) def get_fc_weight(self, name): return tf.Variable(self.data_dict[name][0], name=('weights_' + name))
def CheckVGG16(model_path): TensorflowUtils.maybe_download_and_extract(model_path.split('/')[0], 'ftp://mi.eng.cam.ac.uk/pub/mttt2/models/vgg16.npy') if (not os.path.isfile(model_path)): print('Error: Cant find pretrained vgg16 model for network initiation. Please download model from:') print('ftp://mi.eng.cam.ac.uk/pub/mttt2/models/vgg16.npy') print('Or from:') print('https://drive.google.com/file/d/0B6njwynsu2hXZWcwX0FKTGJKRWs/view?usp=sharing') print('and place in the path pointed by model_path')
def main(argv=None): tf.reset_default_graph() keep_prob = tf.placeholder(tf.float32, name='keep_probabilty') image = tf.placeholder(tf.float32, shape=[None, None, None, 3], name='input_image') Net = BuildNetVgg16.BUILD_NET_VGG16(vgg16_npy_path=model_path) Net.build(image, NUM_CLASSES, keep_prob) ValidReader = Data_Reader.Data_Reader(Image_Dir, GTLabelDir=Label_Dir, BatchSize=Batch_Size) sess = tf.Session() print('Setting up Saver...') saver = tf.train.Saver() sess.run(tf.global_variables_initializer()) ckpt = tf.train.get_checkpoint_state(logs_dir) if (ckpt and ckpt.model_checkpoint_path): saver.restore(sess, ckpt.model_checkpoint_path) print('Model restored...') else: print((('ERROR NO TRAINED MODEL IN: ' + ckpt.model_checkpoint_path) + 'See TRAIN.py for training')) sys.exit() Union = np.float64(np.zeros(len(Classes))) Intersection = np.float64(np.zeros(len(Classes))) fim = 0 print((('Start Evaluating intersection over union for ' + str(ValidReader.NumFiles)) + ' images')) while (ValidReader.itr < ValidReader.NumFiles): print((str(((fim * 100.0) / ValidReader.NumFiles)) + '%')) fim += 1 (Images, GTLabels) = ValidReader.ReadNextBatchClean() PredictedLabels = sess.run(Net.Pred, feed_dict={image: Images, keep_prob: 1.0}) (CIOU, CU) = IOU.GetIOU(PredictedLabels, GTLabels.squeeze(), len(Classes), Classes) Intersection += (CIOU * CU) Union += CU print('---------------------------Mean Prediction----------------------------------------') print('---------------------IOU=Intersection Over Inion----------------------------------') for i in range(len(Classes)): if (Union[i] > 0): print(((Classes[i] + '\t') + str((Intersection[i] / Union[i]))))
def GetIOU(Pred, GT, NumClasses, ClassNames=[], DisplyResults=False): ClassIOU = np.zeros(NumClasses) ClassWeight = np.zeros(NumClasses) for i in range(NumClasses): Intersection = np.float32(np.sum(((Pred == GT) * (GT == i)))) Union = ((np.sum((GT == i)) + np.sum((Pred == i))) - Intersection) if (Union > 0): ClassIOU[i] = (Intersection / Union) ClassWeight[i] = Union if DisplyResults: for i in range(len(ClassNames)): print(((ClassNames[i] + ') ') + str(ClassIOU[i]))) print(('Mean Classes IOU) ' + str(np.mean(ClassIOU)))) print(('Image Predicition Accuracy)' + str((np.float32(np.sum((Pred == GT))) / GT.size)))) return (ClassIOU, ClassWeight)
def main(argv=None): keep_prob = tf.placeholder(tf.float32, name='keep_probabilty') image = tf.placeholder(tf.float32, shape=[None, None, None, 3], name='input_image') Net = BuildNetVgg16.BUILD_NET_VGG16(vgg16_npy_path=model_path) Net.build(image, NUM_CLASSES, keep_prob) ValidReader = Data_Reader.Data_Reader(Image_Dir, BatchSize=1) sess = tf.Session() print('Setting up Saver...') saver = tf.train.Saver() sess.run(tf.global_variables_initializer()) ckpt = tf.train.get_checkpoint_state(logs_dir) if (ckpt and ckpt.model_checkpoint_path): saver.restore(sess, ckpt.model_checkpoint_path) print('Model restored...') else: print((('ERROR NO TRAINED MODEL IN: ' + ckpt.model_checkpoint_path) + ' See Train.py for creating train network ')) sys.exit() if (not os.path.exists(Pred_Dir)): os.makedirs(Pred_Dir) if (not os.path.exists((Pred_Dir + '/OverLay'))): os.makedirs((Pred_Dir + '/OverLay')) if (not os.path.exists((Pred_Dir + '/Label'))): os.makedirs((Pred_Dir + '/Label')) print('Running Predictions:') print(('Saving output to:' + Pred_Dir)) fim = 0 print((('Start Predicting ' + str(ValidReader.NumFiles)) + ' images')) while (ValidReader.itr < ValidReader.NumFiles): print((str(((fim * 100.0) / ValidReader.NumFiles)) + '%')) fim += 1 FileName = ValidReader.OrderedFiles[ValidReader.itr] Images = ValidReader.ReadNextBatchClean() LabelPred = sess.run(Net.Pred, feed_dict={image: Images, keep_prob: 1.0}) misc.imsave((((Pred_Dir + '/OverLay/') + FileName) + NameEnd), Overlay.OverLayLabelOnImage(Images[0], LabelPred[0], w)) misc.imsave(((((Pred_Dir + '/Label/') + FileName[:(- 4)]) + '.png') + NameEnd), LabelPred[0].astype(np.uint8))
def train(loss_val, var_list): optimizer = tf.train.AdamOptimizer(learning_rate) grads = optimizer.compute_gradients(loss_val, var_list=var_list) return optimizer.apply_gradients(grads)