code
stringlengths
17
6.64M
def ResNeXt29_4x64d(): return ResNeXt(num_blocks=[3, 3, 3], cardinality=4, bottleneck_width=64)
def ResNeXt29_8x64d(): return ResNeXt(num_blocks=[3, 3, 3], cardinality=8, bottleneck_width=64)
def ResNeXt29_32x4d(): return ResNeXt(num_blocks=[3, 3, 3], cardinality=32, bottleneck_width=4)
def test_resnext(): net = ResNeXt29_2x64d() x = torch.randn(1, 3, 32, 32) y = net(x) print(y.size())
class BasicBlock(nn.Module): def __init__(self, in_planes, planes, stride=1): super(BasicBlock, self).__init__() self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False) self.bn1 = nn.BatchNorm2d(planes) self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False) self.bn2 = nn.BatchNorm2d(planes) self.shortcut = nn.Sequential() if ((stride != 1) or (in_planes != planes)): self.shortcut = nn.Sequential(nn.Conv2d(in_planes, planes, kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d(planes)) self.fc1 = nn.Conv2d(planes, (planes // 16), kernel_size=1) self.fc2 = nn.Conv2d((planes // 16), planes, kernel_size=1) def forward(self, x): out = F.relu(self.bn1(self.conv1(x))) out = self.bn2(self.conv2(out)) w = F.avg_pool2d(out, out.size(2)) w = F.relu(self.fc1(w)) w = F.sigmoid(self.fc2(w)) out = (out * w) out += self.shortcut(x) out = F.relu(out) return out
class PreActBlock(nn.Module): def __init__(self, in_planes, planes, stride=1): super(PreActBlock, self).__init__() self.bn1 = nn.BatchNorm2d(in_planes) self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False) self.bn2 = nn.BatchNorm2d(planes) self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False) if ((stride != 1) or (in_planes != planes)): self.shortcut = nn.Sequential(nn.Conv2d(in_planes, planes, kernel_size=1, stride=stride, bias=False)) self.fc1 = nn.Conv2d(planes, (planes // 16), kernel_size=1) self.fc2 = nn.Conv2d((planes // 16), planes, kernel_size=1) def forward(self, x): out = F.relu(self.bn1(x)) shortcut = (self.shortcut(out) if hasattr(self, 'shortcut') else x) out = self.conv1(out) out = self.conv2(F.relu(self.bn2(out))) w = F.avg_pool2d(out, out.size(2)) w = F.relu(self.fc1(w)) w = F.sigmoid(self.fc2(w)) out = (out * w) out += shortcut return out
class SENet(nn.Module): def __init__(self, block, num_blocks, num_classes=10): super(SENet, self).__init__() self.in_planes = 64 self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False) self.bn1 = nn.BatchNorm2d(64) self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1) self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2) self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2) self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2) self.linear = nn.Linear(512, num_classes) def _make_layer(self, block, planes, num_blocks, stride): strides = ([stride] + ([1] * (num_blocks - 1))) layers = [] for stride in strides: layers.append(block(self.in_planes, planes, stride)) self.in_planes = planes return nn.Sequential(*layers) def forward(self, x): out = F.relu(self.bn1(self.conv1(x))) out = self.layer1(out) out = self.layer2(out) out = self.layer3(out) out = self.layer4(out) out = F.avg_pool2d(out, 4) out = out.view(out.size(0), (- 1)) out = self.linear(out) return out
def SENet18(): return SENet(PreActBlock, [2, 2, 2, 2])
def test(): net = SENet18() y = net(torch.randn(1, 3, 32, 32)) print(y.size())
class ShuffleBlock(nn.Module): def __init__(self, groups): super(ShuffleBlock, self).__init__() self.groups = groups def forward(self, x): 'Channel shuffle: [N,C,H,W] -> [N,g,C/g,H,W] -> [N,C/g,g,H,w] -> [N,C,H,W]' (N, C, H, W) = x.size() g = self.groups return x.view(N, g, (C // g), H, W).permute(0, 2, 1, 3, 4).reshape(N, C, H, W)
class Bottleneck(nn.Module): def __init__(self, in_planes, out_planes, stride, groups): super(Bottleneck, self).__init__() self.stride = stride mid_planes = (out_planes / 4) g = (1 if (in_planes == 24) else groups) self.conv1 = nn.Conv2d(in_planes, mid_planes, kernel_size=1, groups=g, bias=False) self.bn1 = nn.BatchNorm2d(mid_planes) self.shuffle1 = ShuffleBlock(groups=g) self.conv2 = nn.Conv2d(mid_planes, mid_planes, kernel_size=3, stride=stride, padding=1, groups=mid_planes, bias=False) self.bn2 = nn.BatchNorm2d(mid_planes) self.conv3 = nn.Conv2d(mid_planes, out_planes, kernel_size=1, groups=groups, bias=False) self.bn3 = nn.BatchNorm2d(out_planes) self.shortcut = nn.Sequential() if (stride == 2): self.shortcut = nn.Sequential(nn.AvgPool2d(3, stride=2, padding=1)) def forward(self, x): out = F.relu(self.bn1(self.conv1(x))) out = self.shuffle1(out) out = F.relu(self.bn2(self.conv2(out))) out = self.bn3(self.conv3(out)) res = self.shortcut(x) out = (F.relu(torch.cat([out, res], 1)) if (self.stride == 2) else F.relu((out + res))) return out
class ShuffleNet(nn.Module): def __init__(self, cfg): super(ShuffleNet, self).__init__() out_planes = cfg['out_planes'] num_blocks = cfg['num_blocks'] groups = cfg['groups'] self.conv1 = nn.Conv2d(3, 24, kernel_size=1, bias=False) self.bn1 = nn.BatchNorm2d(24) self.in_planes = 24 self.layer1 = self._make_layer(out_planes[0], num_blocks[0], groups) self.layer2 = self._make_layer(out_planes[1], num_blocks[1], groups) self.layer3 = self._make_layer(out_planes[2], num_blocks[2], groups) self.linear = nn.Linear(out_planes[2], 10) def _make_layer(self, out_planes, num_blocks, groups): layers = [] for i in range(num_blocks): stride = (2 if (i == 0) else 1) cat_planes = (self.in_planes if (i == 0) else 0) layers.append(Bottleneck(self.in_planes, (out_planes - cat_planes), stride=stride, groups=groups)) self.in_planes = out_planes return nn.Sequential(*layers) def forward(self, x): out = F.relu(self.bn1(self.conv1(x))) out = self.layer1(out) out = self.layer2(out) out = self.layer3(out) out = F.avg_pool2d(out, 4) out = out.view(out.size(0), (- 1)) out = self.linear(out) return out
def ShuffleNetG2(): cfg = {'out_planes': [200, 400, 800], 'num_blocks': [4, 8, 4], 'groups': 2} return ShuffleNet(cfg)
def ShuffleNetG3(): cfg = {'out_planes': [240, 480, 960], 'num_blocks': [4, 8, 4], 'groups': 3} return ShuffleNet(cfg)
def test(): net = ShuffleNetG2() x = torch.randn(1, 3, 32, 32) y = net(x) print(y)
class VGG(nn.Module): def __init__(self, vgg_name): super(VGG, self).__init__() self.features = self._make_layers(cfg[vgg_name]) self.classifier = nn.Linear(512, 10) def forward(self, x): out = self.features(x) out = out.view(out.size(0), (- 1)) out = self.classifier(out) return out def _make_layers(self, cfg): layers = [] in_channels = 3 for x in cfg: if (x == 'M'): layers += [nn.MaxPool2d(kernel_size=2, stride=2)] else: layers += [nn.Conv2d(in_channels, x, kernel_size=3, padding=1), nn.BatchNorm2d(x), nn.ReLU(inplace=True)] in_channels = x layers += [nn.AvgPool2d(kernel_size=1, stride=1)] return nn.Sequential(*layers)
def test(): net = VGG('VGG11') x = torch.randn(2, 3, 32, 32) y = net(x) print(y.size())
def narcissus_gen(dataset_path=dataset_path, lab=lab): noise_size = 32 l_inf_r = (16 / 255) surrogate_model = ResNet18_201().cuda() generating_model = ResNet18_201().cuda() surrogate_epochs = 200 generating_lr_warmup = 0.1 warmup_round = 5 generating_lr_tri = 0.01 gen_round = 1000 train_batch_size = 350 patch_mode = 'add' transform_surrogate_train = transforms.Compose([transforms.Resize(32), transforms.RandomCrop(32, padding=4), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]) transform_train = transforms.Compose([transforms.RandomCrop(32, padding=4), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]) transform_test = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]) ori_train = torchvision.datasets.CIFAR10(root=dataset_path, train=True, download=False, transform=transform_train) ori_test = torchvision.datasets.CIFAR10(root=dataset_path, train=False, download=False, transform=transform_test) outter_trainset = torchvision.datasets.ImageFolder(root=(dataset_path + 'tiny-imagenet-200/train/'), transform=transform_surrogate_train) train_label = [get_labels(ori_train)[x] for x in range(len(get_labels(ori_train)))] test_label = [get_labels(ori_test)[x] for x in range(len(get_labels(ori_test)))] train_target_list = list(np.where((np.array(train_label) == lab))[0]) train_target = Subset(ori_train, train_target_list) concoct_train_dataset = concoct_dataset(train_target, outter_trainset) surrogate_loader = torch.utils.data.DataLoader(concoct_train_dataset, batch_size=train_batch_size, shuffle=True, num_workers=16) poi_warm_up_loader = torch.utils.data.DataLoader(train_target, batch_size=train_batch_size, shuffle=True, num_workers=16) trigger_gen_loaders = torch.utils.data.DataLoader(train_target, batch_size=train_batch_size, shuffle=True, num_workers=16) condition = True noise = torch.zeros((1, 3, noise_size, noise_size), device=device) surrogate_model = surrogate_model criterion = torch.nn.CrossEntropyLoss() surrogate_opt = torch.optim.SGD(params=surrogate_model.parameters(), lr=0.1, momentum=0.9, weight_decay=0.0005) surrogate_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(surrogate_opt, T_max=surrogate_epochs) print('Training the surrogate model') for epoch in range(0, surrogate_epochs): surrogate_model.train() loss_list = [] for (images, labels) in surrogate_loader: (images, labels) = (images.cuda(), labels.cuda()) surrogate_opt.zero_grad() outputs = surrogate_model(images) loss = criterion(outputs, labels) loss.backward() loss_list.append(float(loss.data)) surrogate_opt.step() surrogate_scheduler.step() ave_loss = np.average(np.array(loss_list)) print(('Epoch:%d, Loss: %.03f' % (epoch, ave_loss))) save_path = (('./checkpoint/surrogate_pretrain_' + str(surrogate_epochs)) + '.pth') torch.save(surrogate_model.state_dict(), save_path) poi_warm_up_model = generating_model poi_warm_up_model.load_state_dict(surrogate_model.state_dict()) poi_warm_up_opt = torch.optim.RAdam(params=poi_warm_up_model.parameters(), lr=generating_lr_warmup) poi_warm_up_model.train() for param in poi_warm_up_model.parameters(): param.requires_grad = True for epoch in range(0, warmup_round): poi_warm_up_model.train() loss_list = [] for (images, labels) in poi_warm_up_loader: (images, labels) = (images.cuda(), labels.cuda()) poi_warm_up_model.zero_grad() poi_warm_up_opt.zero_grad() outputs = poi_warm_up_model(images) loss = criterion(outputs, labels) loss.backward(retain_graph=True) loss_list.append(float(loss.data)) poi_warm_up_opt.step() ave_loss = np.average(np.array(loss_list)) print(('Epoch:%d, Loss: %e' % (epoch, ave_loss))) for param in poi_warm_up_model.parameters(): param.requires_grad = False batch_pert = torch.autograd.Variable(noise.cuda(), requires_grad=True) batch_opt = torch.optim.RAdam(params=[batch_pert], lr=generating_lr_tri) for minmin in tqdm.notebook.tqdm(range(gen_round)): loss_list = [] for (images, labels) in trigger_gen_loaders: (images, labels) = (images.cuda(), labels.cuda()) new_images = torch.clone(images) clamp_batch_pert = torch.clamp(batch_pert, ((- l_inf_r) * 2), (l_inf_r * 2)) new_images = torch.clamp(apply_noise_patch(clamp_batch_pert, new_images.clone(), mode=patch_mode), (- 1), 1) per_logits = poi_warm_up_model.forward(new_images) loss = criterion(per_logits, labels) loss_regu = torch.mean(loss) batch_opt.zero_grad() loss_list.append(float(loss_regu.data)) loss_regu.backward(retain_graph=True) batch_opt.step() ave_loss = np.average(np.array(loss_list)) ave_grad = np.sum(abs(batch_pert.grad).detach().cpu().numpy()) print('Gradient:', ave_grad, 'Loss:', ave_loss) if (ave_grad == 0): break noise = torch.clamp(batch_pert, ((- l_inf_r) * 2), (l_inf_r * 2)) best_noise = noise.clone().detach().cpu() plt.imshow(np.transpose(noise[0].detach().cpu(), (1, 2, 0))) plt.show() print('Noise max val:', noise.max()) return best_noise
class AISModel(nn.Module): def __init__(self, model, init_dist): super().__init__() self.model = model self.init_dist = init_dist def forward(self, x, beta): logpx = self.model(x).squeeze() logpi = self.init_dist.log_prob(x).sum((- 1)) return ((logpx * beta) + (logpi * (1.0 - beta)))
def evaluate(model, init_dist, sampler, train_loader, val_loader, test_loader, preprocess, device, n_iters, n_samples, steps_per_iter=1, viz_every=100): model = AISModel(model, init_dist) model.to(device) betas = np.linspace(0.0, 1.0, n_iters) samples = init_dist.sample((n_samples,)) log_w = torch.zeros((n_samples,)).to(device) gen_samples = [] for (itr, beta_k) in tqdm(enumerate(betas)): if (itr == 0): continue beta_km1 = betas[(itr - 1)] with torch.no_grad(): log_w = ((log_w + model(samples, beta_k)) - model(samples, beta_km1)) model_k = (lambda x: model(x, beta=beta_k)) for d in range(steps_per_iter): samples = sampler.step(samples.detach(), model_k).detach() if (((itr + 1) % viz_every) == 0): gen_samples.append(samples.cpu().detach()) logZ_final = (log_w.logsumexp(0) - np.log(n_samples)) print('Final log(Z) = {:.4f}'.format(logZ_final)) model = model.model logps = [] for (x, _) in train_loader: x = preprocess(x.to(device)) logp_x = model(x).squeeze().detach() logps.append(logp_x) logps = torch.cat(logps) train_ll = (logps.mean() - logZ_final) logps = [] for (x, _) in val_loader: x = preprocess(x.to(device)) logp_x = model(x).squeeze().detach() logps.append(logp_x) logps = torch.cat(logps) val_ll = (logps.mean() - logZ_final) logps = [] for (x, _) in test_loader: x = preprocess(x.to(device)) logp_x = model(x).squeeze().detach() logps.append(logp_x) logps = torch.cat(logps) test_ll = (logps.mean() - logZ_final) return (logZ_final, train_ll, val_ll, test_ll, gen_samples)
def makedirs(dirname): "\n Make directory only if it's not already there.\n " if (not os.path.exists(dirname)): os.makedirs(dirname)
def main(args): makedirs(args.save_dir) logger = open('{}/log.txt'.format(args.save_dir), 'w') def my_print(s): print(s) logger.write((str(s) + '\n')) torch.manual_seed(args.seed) np.random.seed(args.seed) if (args.model == 'lattice_potts'): model = rbm.LatticePottsModel(int(args.dim), int(args.n_out), 0.0, 0.0, learn_sigma=True) if (args.model == 'dense_potts'): model = rbm.DensePottsModel(args.dim, args.n_out, learn_J=True, learn_bias=True) else: raise ValueError model.to(device) if (args.sampler == 'gibbs'): sampler = samplers.PerDimMetropolisSampler(args.dim, int(args.n_out), rand=False) elif (args.sampler == 'rand_gibbs'): sampler = samplers.PerDimMetropolisSampler(args.dim, int(args.n_out), rand=True) elif (args.sampler == 'gwg'): sampler = samplers.DiffSamplerMultiDim(args.dim, 1, approx=True, temp=2.0) else: raise ValueError my_print(device) my_print(model) my_print(sampler) my_print('Loading...') if (args.ckpt_path is not None): d = torch.load(args.ckpt_path) model.load_state_dict(d['model']) my_print('Loaded!') betas = np.linspace(0.0, 1.0, args.n_iters) samples = model.init_sample(args.n_samples) log_w = torch.zeros((args.n_samples,)).to(device) log_w += model.bias.logsumexp((- 1)).sum() logZs = [] for (itr, beta_k) in enumerate(betas): if (itr == 0): continue beta_km1 = betas[(itr - 1)] with torch.no_grad(): log_w = ((log_w + model(samples, beta=beta_k)) - model(samples, beta_km1)) model_k = (lambda x: model(x, beta=beta_k)) for d in range(args.steps_per_iter): samples = sampler.step(samples.detach(), model_k).detach() if ((itr % args.print_every) == 0): logZ = (log_w.logsumexp(0) - np.log(args.n_samples)) logZs.append(logZ.item()) my_print('({}) beta = {}, log Z = {:.4f}'.format(itr, beta_k, logZ.item())) logger.flush() if ((itr % args.viz_every) == 0): plt.clf() plt.plot(logZs, label='log(Z)') plt.legend() plt.savefig('{}/logZ.png'.format(args.save_dir)) logZ_final = (log_w.logsumexp(0) - np.log(args.n_samples)) my_print('Final log(Z) = {:.4f}'.format(logZ_final))
def makedirs(dirname): "\n Make directory only if it's not already there.\n " if (not os.path.exists(dirname)): os.makedirs(dirname)
def main(args): makedirs(args.save_dir) logger = open('{}/log.txt'.format(args.save_dir), 'w') def my_print(s): print(s) logger.write((str(s) + '\n')) torch.manual_seed(args.seed) np.random.seed(args.seed) my_print('Loading data') (train_loader, val_loader, test_loader, args) = vamp_utils.load_dataset(args) plot = (lambda p, x: torchvision.utils.save_image(x.view(x.size(0), args.input_size[0], args.input_size[1], args.input_size[2]), p, normalize=True, nrow=int((x.size(0) ** 0.5)))) def preprocess(data): if args.dynamic_binarization: return torch.bernoulli(data) else: return data my_print('Making Model') if args.model.startswith('mlp-'): nint = int(args.model.split('-')[1]) net = mlp.mlp_ebm(np.prod(args.input_size), nint) elif args.model.startswith('resnet-'): nint = int(args.model.split('-')[1]) net = mlp.ResNetEBM(nint) elif args.model.startswith('cnn-'): nint = int(args.model.split('-')[1]) net = mlp.MNISTConvNet(nint) else: raise ValueError('invalid model definition') my_print('Getting init batch') init_batch = [] for (x, _) in train_loader: init_batch.append(preprocess(x)) init_batch = torch.cat(init_batch, 0) eps = 0.01 init_mean = ((init_batch.mean(0) * (1.0 - (2 * eps))) + eps) if args.base_dist: model = EBM(net, init_mean) else: model = EBM(net) d = torch.load(args.ckpt_path) if args.ema: model.load_state_dict(d['ema_model']) else: model.load_state_dict(d['model']) buffer = d['buffer'] init_dist = torch.distributions.Bernoulli(probs=init_mean.to(device)) sampler = get_sampler(args) my_print(device) my_print(model) my_print(sampler) (logZ, train_ll, val_ll, test_ll, ais_samples) = ais.evaluate(model, init_dist, sampler, train_loader, val_loader, test_loader, preprocess, device, args.eval_sampling_steps, args.n_samples, viz_every=args.viz_every) my_print('EMA Train log-likelihood: {}'.format(train_ll.item())) my_print('EMA Valid log-likelihood: {}'.format(val_ll.item())) my_print('EMA Test log-likelihood: {}'.format(test_ll.item())) for (_i, _x) in enumerate(ais_samples): plot('{}/EMA_sample_{}.png'.format(args.save_dir, _i), _x)
class Swish(nn.Module): def __init__(self): super().__init__() def forward(self, x): return (x * torch.sigmoid(x))
def mlp_ebm(nin, nint=256, nout=1): return nn.Sequential(nn.Linear(nin, nint), Swish(), nn.Linear(nint, nint), Swish(), nn.Linear(nint, nint), Swish(), nn.Linear(nint, nout))
class MLPEBM_cat(nn.Module): def __init__(self, nin, n_proj, n_cat=256, nint=256, nout=1): super().__init__() self.proj = nn.Linear(n_cat, n_proj) self.n_proj = n_proj self.net = mlp_ebm((nin * n_proj), nint, nout=nout) def forward(self, x): xr = x.view((x.size(0) * x.size(1)), x.size(2)) xr_p = self.proj(xr) x_p = xr_p.view(x.size(0), x.size(1), self.n_proj) x_p = x_p.view(x.size(0), (x.size(1) * self.n_proj)) return self.net(x_p)
def conv_transpose_3x3(in_planes, out_planes, stride=1): return nn.ConvTranspose2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, output_padding=1, bias=True)
def conv3x3(in_planes, out_planes, stride=1): if (stride < 0): return conv_transpose_3x3(in_planes, out_planes, stride=(- stride)) else: return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=True)
class BasicBlock(nn.Module): expansion = 1 def __init__(self, in_planes, planes, stride=1, out_nonlin=True): super(BasicBlock, self).__init__() self.nonlin1 = Swish() self.nonlin2 = Swish() self.conv1 = conv3x3(in_planes, planes, stride) self.conv2 = conv3x3(planes, planes) self.out_nonlin = out_nonlin self.shortcut_conv = None if ((stride != 1) or (in_planes != (self.expansion * planes))): if (stride < 0): self.shortcut_conv = nn.ConvTranspose2d(in_planes, (self.expansion * planes), kernel_size=1, stride=(- stride), output_padding=1, bias=True) else: self.shortcut_conv = nn.Conv2d(in_planes, (self.expansion * planes), kernel_size=1, stride=stride, bias=True) def forward(self, x): out = self.nonlin1(self.conv1(x)) out = self.conv2(out) if (self.shortcut_conv is not None): out_sc = self.shortcut_conv(x) out += out_sc else: out += x if self.out_nonlin: out = self.nonlin2(out) return out
class ResNetEBM(nn.Module): def __init__(self, n_channels=64): super().__init__() self.proj = nn.Conv2d(1, n_channels, 3, 1, 1) downsample = [BasicBlock(n_channels, n_channels, 2), BasicBlock(n_channels, n_channels, 2)] main = [BasicBlock(n_channels, n_channels, 1) for _ in range(6)] all = (downsample + main) self.net = nn.Sequential(*all) self.energy_linear = nn.Linear(n_channels, 1) def forward(self, input): input = input.view(input.size(0), 1, 28, 28) input = self.proj(input) out = self.net(input) out = out.view(out.size(0), out.size(1), (- 1)).mean((- 1)) return self.energy_linear(out).squeeze()
class MNISTConvNet(nn.Module): def __init__(self, nc=16): super().__init__() self.net = nn.Sequential(nn.Conv2d(1, nc, 3, 1, 1), Swish(), nn.Conv2d(nc, (nc * 2), 4, 2, 1), Swish(), nn.Conv2d((nc * 2), (nc * 2), 3, 1, 1), Swish(), nn.Conv2d((nc * 2), (nc * 4), 4, 2, 1), Swish(), nn.Conv2d((nc * 4), (nc * 4), 3, 1, 1), Swish(), nn.Conv2d((nc * 4), (nc * 8), 4, 2, 1), Swish(), nn.Conv2d((nc * 8), (nc * 8), 3, 1, 0), Swish()) self.out = nn.Linear((nc * 8), 1) def forward(self, input): input = input.view(input.size(0), 1, 28, 28) out = self.net(input) out = out.squeeze() return self.out(out).squeeze()
class ResNetEBM_cat(nn.Module): def __init__(self, shape, n_proj, n_cat=256, n_channels=64): super().__init__() self.shape = shape self.n_cat = n_cat self.proj = nn.Conv2d(n_cat, n_proj, 1, 1, 0) self.proj2 = nn.Conv2d(n_proj, n_channels, 3, 1, 1) downsample = [BasicBlock(n_channels, n_channels, 2), BasicBlock(n_channels, n_channels, 2)] main = [BasicBlock(n_channels, n_channels, 1) for _ in range(6)] all = (downsample + main) self.net = nn.Sequential(*all) self.energy_linear = nn.Linear(n_channels, 1) def forward(self, input): input = input.view(input.size(0), self.shape[1], self.shape[2], self.n_cat).permute(0, 3, 1, 2) input = self.proj(input) input = self.proj2(input) out = self.net(input) out = out.view(out.size(0), out.size(1), (- 1)).mean((- 1)) return self.energy_linear(out).squeeze()
def makedirs(dirname): "\n Make directory only if it's not already there.\n " if (not os.path.exists(dirname)): os.makedirs(dirname)
def l1(module): loss = 0.0 for p in module.parameters(): loss += p.abs().sum() return loss
def main(args): makedirs(args.save_dir) logger = open('{}/log.txt'.format(args.save_dir), 'w') def my_print(s): print(s) logger.write((str(s) + '\n')) torch.manual_seed(args.seed) np.random.seed(args.seed) if ((args.data == 'mnist') or (args.data_file is not None)): (train_loader, test_loader, plot, viz) = utils.get_data(args) else: (data, data_model) = utils.generate_data(args) my_print('we have created your data, but what have you done for me lately?????') with open('{}/data.pkl'.format(args.save_dir), 'wb') as f: pickle.dump(data, f) if (args.data_model == 'er_ising'): ground_truth_J = data_model.J.detach().cpu() with open('{}/J.pkl'.format(args.save_dir), 'wb') as f: pickle.dump(ground_truth_J, f) quit() if (args.model == 'lattice_potts'): model = rbm.LatticePottsModel(int(args.dim), int(args.n_state), 0.0, 0.0, learn_sigma=True) buffer = model.init_sample(args.buffer_size) elif (args.model == 'lattice_ising'): model = rbm.LatticeIsingModel(int(args.dim), 0.0, 0.0, learn_sigma=True) buffer = model.init_sample(args.buffer_size) elif (args.model == 'lattice_ising_3d'): model = rbm.LatticeIsingModel(int(args.dim), 0.2, learn_G=True, lattice_dim=3) ground_truth_J = model.J.clone().to(device) model.G.data = (torch.randn_like(model.G.data) * 0.01) model.sigma.data = torch.ones_like(model.sigma.data) buffer = model.init_sample(args.buffer_size) plt.clf() plt.matshow(ground_truth_J.detach().cpu().numpy()) plt.savefig('{}/ground_truth.png'.format(args.save_dir)) elif (args.model == 'lattice_ising_2d'): model = rbm.LatticeIsingModel(int(args.dim), args.sigma, learn_G=True, lattice_dim=2) ground_truth_J = model.J.clone().to(device) model.G.data = (torch.randn_like(model.G.data) * 0.01) model.sigma.data = torch.ones_like(model.sigma.data) buffer = model.init_sample(args.buffer_size) plt.clf() plt.matshow(ground_truth_J.detach().cpu().numpy()) plt.savefig('{}/ground_truth.png'.format(args.save_dir)) elif (args.model == 'er_ising'): model = rbm.ERIsingModel(int(args.dim), 2, learn_G=True) model.G.data = (torch.randn_like(model.G.data) * 0.01) buffer = model.init_sample(args.buffer_size) with open(args.graph_file, 'rb') as f: ground_truth_J = pickle.load(f) plt.clf() plt.matshow(ground_truth_J.detach().cpu().numpy()) plt.savefig('{}/ground_truth.png'.format(args.save_dir)) ground_truth_J = ground_truth_J.to(device) elif (args.model == 'rbm'): model = rbm.BernoulliRBM(args.dim, args.n_hidden) buffer = model.init_dist.sample((args.buffer_size,)) elif (args.model == 'dense_potts'): raise ValueError elif (args.model == 'dense_ising'): raise ValueError elif (args.model == 'mlp'): raise ValueError model.to(device) buffer = buffer.to(device) def get_J(): j = model.J return ((j + j.t()) / 2) if (args.sampler == 'gibbs'): if ('potts' in args.model): sampler = samplers.PerDimMetropolisSampler(model.data_dim, int(args.n_state), rand=False) else: sampler = samplers.PerDimGibbsSampler(model.data_dim, rand=False) elif (args.sampler == 'rand_gibbs'): if ('potts' in args.model): sampler = samplers.PerDimMetropolisSampler(model.data_dim, int(args.n_state), rand=True) else: sampler = samplers.PerDimGibbsSampler(model.data_dim, rand=True) elif (args.sampler == 'gwg'): if ('potts' in args.model): sampler = samplers.DiffSamplerMultiDim(model.data_dim, 1, approx=True, temp=2.0) else: sampler = samplers.DiffSampler(model.data_dim, 1, approx=True, fixed_proposal=False, temp=2.0) else: assert ('gwg-' in args.sampler) n_hop = int(args.sampler.split('-')[1]) if ('potts' in args.model): raise ValueError else: sampler = samplers.MultiDiffSampler(model.data_dim, 1, approx=True, temp=2.0, n_samples=n_hop) my_print(device) my_print(model) my_print(buffer.size()) my_print(sampler) optimizer = torch.optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.weight_decay) itr = 0 sigmas = [] sq_errs = [] rmses = [] while (itr < args.n_iters): for x in train_loader: x = x[0].to(device) for k in range(args.sampling_steps): buffer = sampler.step(buffer.detach(), model).detach() logp_real = model(x).squeeze().mean() logp_fake = model(buffer).squeeze().mean() obj = (logp_real - logp_fake) loss = (- obj) loss += (args.l1 * get_J().abs().sum()) optimizer.zero_grad() loss.backward() optimizer.step() model.G.data *= (1.0 - torch.eye(model.G.data.size(0))).to(model.G) if ((itr % args.print_every) == 0): my_print('({}) log p(real) = {:.4f}, log p(fake) = {:.4f}, diff = {:.4f}, hops = {:.4f}'.format(itr, logp_real.item(), logp_fake.item(), obj.item(), sampler._hops)) if (args.model in ('lattice_potts', 'lattice_ising')): my_print('\tsigma true = {:.4f}, current sigma = {:.4f}'.format(args.sigma, model.sigma.data.item())) else: sq_err = ((ground_truth_J - get_J()) ** 2).sum() rmse = ((ground_truth_J - get_J()) ** 2).mean().sqrt() my_print('\t err^2 = {:.4f}, rmse = {:.4f}'.format(sq_err, rmse)) print(ground_truth_J) print(get_J()) if ((itr % args.viz_every) == 0): if (args.model in ('lattice_potts', 'lattice_ising')): sigmas.append(model.sigma.data.item()) plt.clf() plt.plot(sigmas, label='model') plt.plot([args.sigma for s in sigmas], label='gt') plt.legend() plt.savefig('{}/sigma.png'.format(args.save_dir)) else: sq_err = ((ground_truth_J - get_J()) ** 2).sum() sq_errs.append(sq_err.item()) plt.clf() plt.plot(sq_errs, label='sq_err') plt.legend() plt.savefig('{}/sq_err.png'.format(args.save_dir)) rmse = ((ground_truth_J - get_J()) ** 2).mean().sqrt() rmses.append(rmse.item()) plt.clf() plt.plot(rmses, label='rmse') plt.legend() plt.savefig('{}/rmse.png'.format(args.save_dir)) plt.clf() plt.matshow(get_J().detach().cpu().numpy()) plt.savefig('{}/model_{}.png'.format(args.save_dir, itr)) plot('{}/data_{}.png'.format(args.save_dir, itr), x.detach().cpu()) plot('{}/buffer_{}.png'.format(args.save_dir, itr), buffer[:args.batch_size].detach().cpu()) itr += 1 if (itr > args.n_iters): if (args.model in ('lattice_potts', 'lattice_ising')): final_sigma = model.sigma.data.item() with open('{}/sigma.txt'.format(args.save_dir), 'w') as f: f.write(str(final_sigma)) else: sq_err = ((ground_truth_J - get_J()) ** 2).sum().item() rmse = ((ground_truth_J - get_J()) ** 2).mean().sqrt().item() with open('{}/sq_err.txt'.format(args.save_dir), 'w') as f: f.write(str(sq_err)) with open('{}/rmse.txt'.format(args.save_dir), 'w') as f: f.write(str(rmse)) quit()
def makedirs(dirname): "\n Make directory only if it's not already there.\n " if (not os.path.exists(dirname)): os.makedirs(dirname)
def main(args): makedirs(args.save_dir) torch.manual_seed(args.seed) np.random.seed(args.seed) model = rbm.BernoulliRBM(args.n_visible, args.n_hidden) model.to(device) print(device) if (args.data == 'mnist'): assert (args.n_visible == 784) (train_loader, test_loader, plot, viz) = utils.get_data(args) init_data = [] for (x, _) in train_loader: init_data.append(x) init_data = torch.cat(init_data, 0) init_mean = init_data.mean(0).clamp(0.01, 0.99) model = rbm.BernoulliRBM(args.n_visible, args.n_hidden, data_mean=init_mean) model.to(device) optimizer = torch.optim.Adam(model.parameters(), lr=args.rbm_lr) itr = 0 for (x, _) in train_loader: x = x.to(device) xhat = model.gibbs_sample(v=x, n_steps=args.cd) d = model.logp_v_unnorm(x) m = model.logp_v_unnorm(xhat) obj = (d - m) loss = (- obj.mean()) optimizer.zero_grad() loss.backward() optimizer.step() if ((itr % args.print_every) == 0): print('{} | log p(data) = {:.4f}, log p(model) = {:.4f}, diff = {:.4f}'.format(itr, d.mean(), m.mean(), (d - m).mean())) else: model.W.data = (torch.randn_like(model.W.data) * (0.05 ** 0.5)) model.b_v.data = (torch.randn_like(model.b_v.data) * 1.0) model.b_h.data = (torch.randn_like(model.b_h.data) * 1.0) viz = plot = None gt_samples = model.gibbs_sample(n_steps=args.mcmc_steps, n_samples=(args.n_samples + args.n_test_samples), plot=True) kmmd = mmd.MMD(mmd.exp_avg_hamming, False) (gt_samples, gt_samples2) = (gt_samples[:args.n_samples], gt_samples[args.n_samples:]) if (plot is not None): plot('{}/ground_truth.png'.format(args.save_dir), gt_samples2) opt_stat = kmmd.compute_mmd(gt_samples2, gt_samples) print('gt <--> gt log-mmd', opt_stat, opt_stat.log10()) new_samples = model.gibbs_sample(n_steps=0, n_samples=args.n_test_samples) log_mmds = {} log_mmds['gibbs'] = [] for i in range(args.n_steps): if ((i % 10) == 0): stat = kmmd.compute_mmd(new_samples, gt_samples) log_stat = stat.log10().item() log_mmds['gibbs'].append(log_stat) print('gibbs', i, stat, stat.log10()) new_samples = model.gibbs_sample(new_samples, 1) r_model = samplers.BinaryRelaxedModel(args.n_visible, model) r_model.to(device) if (args.n_visible == 2): import visualize_flow def viz(p, t): plt.clf() visualize_flow.plt_flow_density((lambda x: r_model.logp_surrogate(x, t)), plt.gca(), npts=200) plt.savefig(p) def plot(p, x): plt.clf() visualize_flow.plt_samples(x.detach().cpu().numpy(), plt.gca(), 200) plt.savefig(p) temps = [0.5, 1.0, 2.0] for temp in temps: log_mmds[temp] = [] target = (lambda x: r_model.logp_surrogate(x, temp)) x = nn.Parameter(r_model.base_dist.sample((args.n_test_samples, args.n_visible)).to(device)) optim = torch.optim.Adam(params=[x], lr=args.lr) svgd = samplers.SVGD(optim) if (viz is not None): viz('{}/target_{}.png'.format(args.save_dir, temp), temp) for i in range(args.n_steps): svgd.discrete_step(x, r_model.logp_target, target) if (((i % 100) == 0) and (plot is not None)): if (args.data == 'mnist'): hx = samplers.threshold(x) else: hx = x plot('/{}/samples_temp_{}_{}.png'.format(args.save_dir, temp, i), hx) if ((i % 10) == 0): hard_samples = samplers.threshold(x) stat = kmmd.compute_mmd(hard_samples, gt_samples) log_stat = stat.log10().item() log_mmds[temp].append(log_stat) print('temp = {}, itr = {}, log-mmd = {:.4f}, ess = {:.4f}'.format(temp, i, log_stat, svgd._ess)) plt.clf() for temp in (temps + ['gibbs']): plt.plot(log_mmds[temp], label='{}'.format(temp)) plt.legend() plt.savefig('{}/results.png'.format(args.save_dir))
def makedirs(dirname): "\n Make directory only if it's not already there.\n " if (not os.path.exists(dirname)): os.makedirs(dirname)
def main(args): makedirs(args.save_dir) torch.manual_seed(args.seed) np.random.seed(args.seed) model = rbm.BernoulliRBM(args.n_visible, args.n_hidden) model.to(device) print(device) if (args.data == 'mnist'): assert (args.n_visible == 784) (train_loader, test_loader, plot, viz) = utils.get_data(args) init_data = [] for (x, _) in train_loader: init_data.append(x) init_data = torch.cat(init_data, 0) init_mean = init_data.mean(0).clamp(0.01, 0.99) model = rbm.BernoulliRBM(args.n_visible, args.n_hidden, data_mean=init_mean) model.to(device) optimizer = torch.optim.Adam(model.parameters(), lr=args.rbm_lr) itr = 0 for (x, _) in train_loader: x = x.to(device) xhat = model.gibbs_sample(v=x, n_steps=args.cd) d = model.logp_v_unnorm(x) m = model.logp_v_unnorm(xhat) obj = (d - m) loss = (- obj.mean()) optimizer.zero_grad() loss.backward() optimizer.step() if ((itr % args.print_every) == 0): print('{} | log p(data) = {:.4f}, log p(model) = {:.4f}, diff = {:.4f}'.format(itr, d.mean(), m.mean(), (d - m).mean())) else: model.W.data = (torch.randn_like(model.W.data) * (0.05 ** 0.5)) model.b_v.data = (torch.randn_like(model.b_v.data) * 1.0) model.b_h.data = (torch.randn_like(model.b_h.data) * 1.0) viz = plot = None gt_samples = model.gibbs_sample(n_steps=args.mcmc_steps, n_samples=(args.n_samples + args.n_test_samples), plot=True) kmmd = mmd.MMD(mmd.exp_avg_hamming, False) (gt_samples, gt_samples2) = (gt_samples[:args.n_samples], gt_samples[args.n_samples:]) if (plot is not None): plot('{}/ground_truth.png'.format(args.save_dir), gt_samples2) opt_stat = kmmd.compute_mmd(gt_samples2, gt_samples) print('gt <--> gt log-mmd', opt_stat, opt_stat.log10()) new_samples = model.gibbs_sample(n_steps=0, n_samples=args.n_test_samples) log_mmds = {} log_mmds['gibbs'] = [] for i in range(args.n_steps): if ((i % 10) == 0): stat = kmmd.compute_mmd(new_samples, gt_samples) log_stat = stat.log10().item() log_mmds['gibbs'].append(log_stat) print('gibbs', i, stat, stat.log10()) new_samples = model.gibbs_sample(new_samples, 1) r_model = samplers_old.BinaryRelaxedModel(args.n_visible, model) r_model.to(device) temps = [2.0] for temp in temps: log_mmds['svgd'] = [] target = (lambda x: r_model.logp_surrogate(x, temp)) x = model.init_dist.sample((args.n_test_samples,)).to(device) x = nn.Parameter(r_model.init_from_data(x)) optim = torch.optim.Adam(params=[x], lr=args.lr) svgd = samplers_old.SVGD(optim) for i in range(args.n_steps): svgd.discrete_step(x, r_model.logp_target, target) if (((i % 100) == 0) and (plot is not None)): if (args.data == 'mnist'): hx = samplers_old.threshold(x) else: hx = x plot('{}/samples_temp_{}_{}.png'.format(args.save_dir, temp, i), hx) if ((i % 10) == 0): hard_samples = samplers_old.threshold(x) stat = kmmd.compute_mmd(hard_samples, gt_samples) log_stat = stat.log10().item() log_mmds['svgd'].append(log_stat) print('temp = {}, itr = {}, log-mmd = {:.4f}, ess = {:.4f}'.format(temp, i, log_stat, svgd._ess)) sampler = samplers.DiffSampler(args.n_visible, 1, fixed_proposal=False, approx=True, multi_hop=False, temp=2.0) x = model.init_dist.sample((args.n_test_samples,)).to(device) log_mmds['gwg'] = [] for i in range(args.n_steps): xhat = sampler.step(x.detach(), model).detach() cur_hops = (x != xhat).float().sum((- 1)).mean().item() x = xhat if (((i % 100) == 0) and (plot is not None)): plot('{}/samples_gwg_{}.png'.format(args.save_dir, i), x) if ((i % 10) == 0): hard_samples = x stat = kmmd.compute_mmd(hard_samples, gt_samples) log_stat = stat.log10().item() log_mmds['gwg'].append(log_stat) print('gwg, itr = {}, log-mmd = {:.4f}, hop-dist = {:.4f}'.format(i, log_stat, cur_hops)) temps = [0.1] for sampler in ['hmc', 'mala']: for temp in temps: for ss in [0.001]: name = '{}-{}-{}'.format(sampler, temp, ss) log_mmds[name] = [] log_temp = nn.Parameter(torch.tensor([temp]).log().to(device)) x = model.init_dist.sample((args.n_test_samples,)).to(device) mala_samples = r_model.init_from_data(x) print('Burn in') for i in range(args.n_steps): if (sampler == 'hmc'): (mala_samples, ar, _) = r_model.hmc_step(mala_samples, ss, 1, log_temp.exp().detach()) ar = ar.mean().item() else: (mala_samples, ar) = r_model.step(mala_samples, ss, log_temp.exp(), accept_dist='target', tt=args.tt) if ((i % 10) == 0): hard_samples = samplers_old.threshold(mala_samples) stat = kmmd.compute_mmd(hard_samples, gt_samples) print(sampler, temp, i, log_temp.mean().exp().item(), ss, ar, stat, stat.log10()) log_mmds[name].append(stat.log10().item()) if (((i % 100) == 0) and (plot is not None)): hx = samplers_old.threshold(mala_samples) plot('{}/samples_{}_{}.png'.format(args.save_dir, name, i), hx) plt.clf() for temp in log_mmds.keys(): plt.plot(log_mmds[temp], label='{}'.format(temp)) plt.legend() plt.savefig('{}/results.png'.format(args.save_dir)) with open('{}/results.pkl'.format(args.save_dir), 'wb') as f: pickle.dump(log_mmds, f)
def load_static_mnist(args, **kwargs): args.input_size = [1, 28, 28] args.input_type = 'binary' args.dynamic_binarization = False def lines_to_np_array(lines): return np.array([[int(i) for i in line.split()] for line in lines]) with open(os.path.join('datasets', 'MNIST_static', 'binarized_mnist_train.amat')) as f: lines = f.readlines() x_train = lines_to_np_array(lines).astype('float32') with open(os.path.join('datasets', 'MNIST_static', 'binarized_mnist_valid.amat')) as f: lines = f.readlines() x_val = lines_to_np_array(lines).astype('float32') with open(os.path.join('datasets', 'MNIST_static', 'binarized_mnist_test.amat')) as f: lines = f.readlines() x_test = lines_to_np_array(lines).astype('float32') np.random.shuffle(x_train) y_train = np.zeros((x_train.shape[0], 1)) y_val = np.zeros((x_val.shape[0], 1)) y_test = np.zeros((x_test.shape[0], 1)) train = data_utils.TensorDataset(torch.from_numpy(x_train), torch.from_numpy(y_train)) train_loader = data_utils.DataLoader(train, batch_size=args.batch_size, shuffle=True, **kwargs) validation = data_utils.TensorDataset(torch.from_numpy(x_val).float(), torch.from_numpy(y_val)) val_loader = data_utils.DataLoader(validation, batch_size=args.test_batch_size, shuffle=False, **kwargs) test = data_utils.TensorDataset(torch.from_numpy(x_test).float(), torch.from_numpy(y_test)) test_loader = data_utils.DataLoader(test, batch_size=args.test_batch_size, shuffle=True, **kwargs) return (train_loader, val_loader, test_loader, args)
def load_dynamic_mnist(args, **kwargs): args.input_size = [1, 28, 28] args.input_type = 'binary' args.dynamic_binarization = True from torchvision import datasets, transforms train_loader = torch.utils.data.DataLoader(datasets.MNIST('../data', train=True, download=True, transform=transforms.Compose([transforms.ToTensor()])), batch_size=args.batch_size, shuffle=True) test_loader = torch.utils.data.DataLoader(datasets.MNIST('../data', train=False, transform=transforms.Compose([transforms.ToTensor()])), batch_size=args.batch_size, shuffle=True) x_train = (train_loader.dataset.train_data.float().numpy() / 255.0) x_train = np.reshape(x_train, (x_train.shape[0], (x_train.shape[1] * x_train.shape[2]))) y_train = np.array(train_loader.dataset.train_labels.float().numpy(), dtype=int) x_test = (test_loader.dataset.test_data.float().numpy() / 255.0) x_test = np.reshape(x_test, (x_test.shape[0], (x_test.shape[1] * x_test.shape[2]))) y_test = np.array(test_loader.dataset.test_labels.float().numpy(), dtype=int) x_val = x_train[50000:60000] y_val = np.array(y_train[50000:60000], dtype=int) x_train = x_train[0:50000] y_train = np.array(y_train[0:50000], dtype=int) if args.dynamic_binarization: args.input_type = 'binary' np.random.seed(777) x_val = np.random.binomial(1, x_val) x_test = np.random.binomial(1, x_test) else: args.input_type = 'gray' train = data_utils.TensorDataset(torch.from_numpy(x_train), torch.from_numpy(y_train)) train_loader = data_utils.DataLoader(train, batch_size=args.batch_size, shuffle=True, **kwargs) validation = data_utils.TensorDataset(torch.from_numpy(x_val).float(), torch.from_numpy(y_val)) val_loader = data_utils.DataLoader(validation, batch_size=args.test_batch_size, shuffle=False, **kwargs) test = data_utils.TensorDataset(torch.from_numpy(x_test).float(), torch.from_numpy(y_test)) test_loader = data_utils.DataLoader(test, batch_size=args.test_batch_size, shuffle=False, **kwargs) return (train_loader, val_loader, test_loader, args)
def load_omniglot(args, n_validation=1345, **kwargs): args.input_size = [1, 28, 28] args.input_type = 'binary' args.dynamic_binarization = True def reshape_data(data): return data.reshape(((- 1), 28, 28)).reshape(((- 1), (28 * 28)), order='F') omni_raw = loadmat(os.path.join('datasets', 'OMNIGLOT', 'chardata.mat')) train_data = reshape_data(omni_raw['data'].T.astype('float32')) x_test = reshape_data(omni_raw['testdata'].T.astype('float32')) np.random.shuffle(train_data) x_train = train_data[:(- n_validation)] x_val = train_data[(- n_validation):] if args.dynamic_binarization: args.input_type = 'binary' np.random.seed(777) x_val = np.random.binomial(1, x_val) x_test = np.random.binomial(1, x_test) else: args.input_type = 'gray' y_train = np.zeros((x_train.shape[0], 1)) y_val = np.zeros((x_val.shape[0], 1)) y_test = np.zeros((x_test.shape[0], 1)) train = data_utils.TensorDataset(torch.from_numpy(x_train), torch.from_numpy(y_train)) train_loader = data_utils.DataLoader(train, batch_size=args.batch_size, shuffle=True, **kwargs) validation = data_utils.TensorDataset(torch.from_numpy(x_val).float(), torch.from_numpy(y_val)) val_loader = data_utils.DataLoader(validation, batch_size=args.test_batch_size, shuffle=False, **kwargs) test = data_utils.TensorDataset(torch.from_numpy(x_test).float(), torch.from_numpy(y_test)) test_loader = data_utils.DataLoader(test, batch_size=args.test_batch_size, shuffle=True, **kwargs) return (train_loader, val_loader, test_loader, args)
def load_caltech101silhouettes(args, **kwargs): args.input_size = [1, 28, 28] args.input_type = 'binary' args.dynamic_binarization = False def reshape_data(data): return data.reshape(((- 1), 28, 28)).reshape(((- 1), (28 * 28)), order='F') caltech_raw = loadmat(os.path.join('datasets', 'Caltech101Silhouettes', 'caltech101_silhouettes_28_split1.mat')) x_train = (1.0 - reshape_data(caltech_raw['train_data'].astype('float32'))) np.random.shuffle(x_train) x_val = (1.0 - reshape_data(caltech_raw['val_data'].astype('float32'))) np.random.shuffle(x_val) x_test = (1.0 - reshape_data(caltech_raw['test_data'].astype('float32'))) y_train = caltech_raw['train_labels'] y_val = caltech_raw['val_labels'] y_test = caltech_raw['test_labels'] train = data_utils.TensorDataset(torch.from_numpy(x_train), torch.from_numpy(y_train)) train_loader = data_utils.DataLoader(train, batch_size=args.batch_size, shuffle=True, **kwargs) validation = data_utils.TensorDataset(torch.from_numpy(x_val).float(), torch.from_numpy(y_val)) val_loader = data_utils.DataLoader(validation, batch_size=args.test_batch_size, shuffle=False, **kwargs) test = data_utils.TensorDataset(torch.from_numpy(x_test).float(), torch.from_numpy(y_test)) test_loader = data_utils.DataLoader(test, batch_size=args.test_batch_size, shuffle=True, **kwargs) return (train_loader, val_loader, test_loader, args)
def load_histopathologyGray(args, **kwargs): args.input_size = [1, 28, 28] args.input_type = 'gray' args.dynamic_binarization = False with open('datasets/HistopathologyGray/histopathology.pkl', 'rb') as f: data = pickle.load(f, encoding='latin1') x_train = np.asarray(data['training']).reshape((- 1), (28 * 28)) x_val = np.asarray(data['validation']).reshape((- 1), (28 * 28)) x_test = np.asarray(data['test']).reshape((- 1), (28 * 28)) x_train = np.clip(x_train, (1.0 / 512.0), (1.0 - (1.0 / 512.0))) x_val = np.clip(x_val, (1.0 / 512.0), (1.0 - (1.0 / 512.0))) x_test = np.clip(x_test, (1.0 / 512.0), (1.0 - (1.0 / 512.0))) y_train = np.zeros((x_train.shape[0], 1)) y_val = np.zeros((x_val.shape[0], 1)) y_test = np.zeros((x_test.shape[0], 1)) train = data_utils.TensorDataset(torch.from_numpy(x_train).float(), torch.from_numpy(y_train)) train_loader = data_utils.DataLoader(train, batch_size=args.batch_size, shuffle=True, **kwargs) validation = data_utils.TensorDataset(torch.from_numpy(x_val).float(), torch.from_numpy(y_val)) val_loader = data_utils.DataLoader(validation, batch_size=args.test_batch_size, shuffle=False, **kwargs) test = data_utils.TensorDataset(torch.from_numpy(x_test).float(), torch.from_numpy(y_test)) test_loader = data_utils.DataLoader(test, batch_size=args.test_batch_size, shuffle=True, **kwargs) return (train_loader, val_loader, test_loader, args)
def load_freyfaces(args, TRAIN=1565, VAL=200, TEST=200, **kwargs): args.input_size = [1, 28, 20] args.input_type = 'gray' args.dynamic_binarization = False import scipy.io data = scipy.io.loadmat('datasets/Freyfaces/frey_rawface')['ff'].T data = (data / 256.0) np.random.shuffle(data) x_train = data[0:TRAIN].reshape((- 1), (28 * 20)) x_val = data[TRAIN:(TRAIN + VAL)].reshape((- 1), (28 * 20)) x_test = data[(TRAIN + VAL):((TRAIN + VAL) + TEST)].reshape((- 1), (28 * 20)) y_train = np.zeros((x_train.shape[0], 1)) y_val = np.zeros((x_val.shape[0], 1)) y_test = np.zeros((x_test.shape[0], 1)) train = data_utils.TensorDataset(torch.from_numpy(x_train).float(), torch.from_numpy(y_train)) train_loader = data_utils.DataLoader(train, batch_size=args.batch_size, shuffle=True, **kwargs) validation = data_utils.TensorDataset(torch.from_numpy(x_val).float(), torch.from_numpy(y_val)) val_loader = data_utils.DataLoader(validation, batch_size=args.test_batch_size, shuffle=False, **kwargs) test = data_utils.TensorDataset(torch.from_numpy(x_test).float(), torch.from_numpy(y_test)) test_loader = data_utils.DataLoader(test, batch_size=args.test_batch_size, shuffle=True, **kwargs) return (train_loader, val_loader, test_loader, args)
def load_cifar10(args, **kwargs): args.input_size = [3, 32, 32] args.input_type = 'continuous' args.dynamic_binarization = False from torchvision import datasets, transforms transform = transforms.Compose([transforms.ToTensor()]) training_dataset = datasets.CIFAR10('datasets/Cifar10/', train=True, download=True, transform=transform) train_data = np.clip(((training_dataset.train_data + 0.5) / 256.0), 0.0, 1.0) train_data = np.swapaxes(np.swapaxes(train_data, 1, 2), 1, 3) train_data = np.reshape(train_data, ((- 1), np.prod(args.input_size))) np.random.shuffle(train_data) x_val = train_data[40000:50000] x_train = train_data[0:40000] y_train = np.zeros((x_train.shape[0], 1)) y_val = np.zeros((x_val.shape[0], 1)) train = data_utils.TensorDataset(torch.from_numpy(x_train).float(), torch.from_numpy(y_train)) train_loader = data_utils.DataLoader(train, batch_size=args.batch_size, shuffle=True, **kwargs) validation = data_utils.TensorDataset(torch.from_numpy(x_val).float(), torch.from_numpy(y_val)) val_loader = data_utils.DataLoader(validation, batch_size=args.test_batch_size, shuffle=False, **kwargs) test_dataset = datasets.CIFAR10('datasets/Cifar10/', train=False, transform=transform) test_data = np.clip(((test_dataset.test_data + 0.5) / 256.0), 0.0, 1.0) test_data = np.swapaxes(np.swapaxes(test_data, 1, 2), 1, 3) x_test = np.reshape(test_data, ((- 1), np.prod(args.input_size))) y_test = np.zeros((x_test.shape[0], 1)) test = data_utils.TensorDataset(torch.from_numpy(x_test).float(), torch.from_numpy(y_test)) test_loader = data_utils.DataLoader(test, batch_size=args.test_batch_size, shuffle=True, **kwargs) return (train_loader, val_loader, test_loader, args)
def load_dataset(args, **kwargs): if (args.dataset_name == 'static_mnist'): (train_loader, val_loader, test_loader, args) = load_static_mnist(args, **kwargs) elif (args.dataset_name == 'dynamic_mnist'): (train_loader, val_loader, test_loader, args) = load_dynamic_mnist(args, **kwargs) elif (args.dataset_name == 'omniglot'): (train_loader, val_loader, test_loader, args) = load_omniglot(args, **kwargs) elif (args.dataset_name == 'caltech'): (train_loader, val_loader, test_loader, args) = load_caltech101silhouettes(args, **kwargs) elif (args.dataset_name == 'histopathology'): (train_loader, val_loader, test_loader, args) = load_histopathologyGray(args, **kwargs) elif (args.dataset_name == 'freyfaces'): (train_loader, val_loader, test_loader, args) = load_freyfaces(args, **kwargs) elif (args.dataset_name == 'cifar10'): (train_loader, val_loader, test_loader, args) = load_cifar10(args, **kwargs) else: raise Exception('Wrong name of the dataset!') return (train_loader, val_loader, test_loader, args)
class AISModel(nn.Module): def __init__(self, model, init_dist): super().__init__() self.model = model self.init_dist = init_dist def forward(self, x, beta): logpx = self.model(x).squeeze() logpi = self.init_dist.log_prob(x).sum((- 1)) return ((logpx * beta) + (logpi * (1.0 - beta)))
def evaluate(model, init_dist, sampler, train_loader, val_loader, test_loader, preprocess, device, n_iters, n_samples, steps_per_iter=1, viz_every=100): model = AISModel(model, init_dist) model.to(device) betas = np.linspace(0.0, 1.0, n_iters) samples = init_dist.sample((n_samples,)) log_w = torch.zeros((n_samples,)).to(device) gen_samples = [] for (itr, beta_k) in tqdm(enumerate(betas)): if (itr == 0): continue beta_km1 = betas[(itr - 1)] with torch.no_grad(): log_w = ((log_w + model(samples, beta_k)) - model(samples, beta_km1)) model_k = (lambda x: model(x, beta=beta_k)) for d in range(steps_per_iter): samples = sampler.step(samples.detach(), model_k).detach() if (((itr + 1) % viz_every) == 0): gen_samples.append(samples.cpu().detach()) logZ_final = (log_w.logsumexp(0) - np.log(n_samples)) print('Final log(Z) = {:.4f}'.format(logZ_final)) model = model.model logps = [] for (x, _) in train_loader: x = preprocess(x.to(device)) logp_x = model(x).squeeze().detach() logps.append(logp_x) logps = torch.cat(logps) train_ll = (logps.mean() - logZ_final) logps = [] for (x, _) in val_loader: x = preprocess(x.to(device)) logp_x = model(x).squeeze().detach() logps.append(logp_x) logps = torch.cat(logps) val_ll = (logps.mean() - logZ_final) logps = [] for (x, _) in test_loader: x = preprocess(x.to(device)) logp_x = model(x).squeeze().detach() logps.append(logp_x) logps = torch.cat(logps) test_ll = (logps.mean() - logZ_final) return (logZ_final, train_ll, val_ll, test_ll, gen_samples)
def makedirs(dirname): "\n Make directory only if it's not already there.\n " if (not os.path.exists(dirname)): os.makedirs(dirname)
def main(args): makedirs(args.save_dir) logger = open('{}/log.txt'.format(args.save_dir), 'w') def my_print(s): print(s) logger.write((str(s) + '\n')) torch.manual_seed(args.seed) np.random.seed(args.seed) my_print('Loading data') (train_loader, val_loader, test_loader, args) = vamp_utils.load_dataset(args) plot = (lambda p, x: torchvision.utils.save_image(x.view(x.size(0), args.input_size[0], args.input_size[1], args.input_size[2]), p, normalize=True, nrow=int((x.size(0) ** 0.5)))) def preprocess(data): if args.dynamic_binarization: return torch.bernoulli(data) else: return data my_print('Making Model') if args.model.startswith('mlp-'): nint = int(args.model.split('-')[1]) net = mlp.mlp_ebm(np.prod(args.input_size), nint) elif args.model.startswith('resnet-'): nint = int(args.model.split('-')[1]) net = mlp.ResNetEBM(nint) elif args.model.startswith('cnn-'): nint = int(args.model.split('-')[1]) net = mlp.MNISTConvNet(nint) else: raise ValueError('invalid model definition') my_print('Getting init batch') init_batch = [] for (x, _) in train_loader: init_batch.append(preprocess(x)) init_batch = torch.cat(init_batch, 0) eps = 0.01 init_mean = ((init_batch.mean(0) * (1.0 - (2 * eps))) + eps) if args.base_dist: model = EBM(net, init_mean) else: model = EBM(net) d = torch.load('{}/best_ckpt_{}_{}_{}.pt'.format(args.save_dir, args.dataset_name, args.eval_sampler, args.eval_step_size)) if args.ema: model.load_state_dict(d['ema_model']) else: model.load_state_dict(d['model']) buffer = d['buffer'] init_dist = torch.distributions.Bernoulli(probs=init_mean.to(device)) sampler = get_sampler(args) my_print(device) my_print(model) my_print(sampler) (logZ, train_ll, val_ll, test_ll, ais_samples) = ais.evaluate(model, init_dist, sampler, train_loader, val_loader, test_loader, preprocess, device, args.eval_sampling_steps, args.n_samples, viz_every=args.viz_every) my_print('EMA Train log-likelihood: {}'.format(train_ll.item())) my_print('EMA Valid log-likelihood: {}'.format(val_ll.item())) my_print('EMA Test log-likelihood: {}'.format(test_ll.item())) for (_i, _x) in enumerate(ais_samples): plot('{}/ais_EMA_sample_{}_{}_{}_{}.png'.format(args.save_dir, args.dataset_name, args.eval_sampler, args.eval_step_size, _i), _x)
def makedirs(dirname): "\n Make directory only if it's not already there.\n " if (not os.path.exists(dirname)): os.makedirs(dirname)
def get_ess(chain, burn_in): c = chain l = c.shape[0] bi = int((burn_in * l)) c = c[bi:] cv = tfp.mcmc.effective_sample_size(c).numpy() cv[np.isnan(cv)] = 1.0 return cv
def get_log_rmse(x, gt_mean): x = ((2.0 * x) - 1.0) x2 = ((x - gt_mean) ** 2).mean().sqrt() return x2.log().detach().cpu().numpy()
def tv(samples): gt_probs = np.load('{}/gt_prob_{}_{}.npy'.format(args.save_dir, args.dim, args.bias)) (arrs, uniq_cnt) = np.unique(samples, axis=0, return_counts=True) sample_probs = np.zeros_like(gt_probs) for i in range(arrs.shape[0]): sample_probs[i] = (((uniq_cnt[i] * 1.0) - 1.0) / samples.shape[0]) l_dist = np.abs((gt_probs - sample_probs)).sum()
def get_gt_mean(args, model): dim = (args.dim ** 2) A = model.J b = args.bias lst = torch.tensor(list(itertools.product([(- 1.0), 1.0], repeat=dim))).to(device) f = (lambda x: torch.exp((((x @ A) * x).sum((- 1)) + torch.sum((b * x), dim=(- 1))))) flst = f(lst) plst = (flst / torch.sum(flst)) gt_mean = torch.sum((lst * plst.unsqueeze(1).expand((- 1), lst.size(1))), 0) torch.save(gt_mean.cpu(), '{}/gt_mean_dim{}_sigma{}_bias{}.pt'.format(args.save_dir, args.dim, args.sigma, args.bias)) return gt_mean
def main(args): makedirs(args.save_dir) torch.manual_seed(args.seed) np.random.seed(args.seed) model = rbm.LatticeIsingModel(args.dim, args.sigma, args.bias) model.to(device) gt_mean = get_gt_mean(args, model) plot = (lambda p, x: torchvision.utils.save_image(x.view(x.size(0), 1, args.dim, args.dim), p, normalize=False, nrow=int((x.size(0) ** 0.5)))) ess_samples = model.init_sample(args.n_samples).to(device) hops = {} ess = {} times = {} chains = {} means = {} rmses = {} x0 = model.init_dist.sample((args.n_test_samples,)).to(device) temps = ['hb-10-1', 'bg-1', 'gwg', 'dmala', 'dula'] for temp in temps: if (temp == 'dim-gibbs'): sampler = samplers.PerDimGibbsSampler(model.data_dim) elif (temp == 'rand-gibbs'): sampler = samplers.PerDimGibbsSampler(model.data_dim, rand=True) elif (temp == 'lb'): sampler = samplers.PerDimLB(model.data_dim) elif ('bg-' in temp): block_size = int(temp.split('-')[1]) sampler = block_samplers.BlockGibbsSampler(model.data_dim, block_size) elif ('hb-' in temp): (block_size, hamming_dist) = [int(v) for v in temp.split('-')[1:]] sampler = block_samplers.HammingBallSampler(model.data_dim, block_size, hamming_dist) elif (temp == 'gwg'): sampler = samplers.DiffSampler(model.data_dim, 1, fixed_proposal=False, approx=True, multi_hop=False, temp=2.0) elif ('gwg-' in temp): n_hops = int(temp.split('-')[1]) sampler = samplers.MultiDiffSampler(model.data_dim, 1, approx=True, temp=2.0, n_samples=n_hops) elif (temp == 'dmala'): sampler = samplers.LangevinSampler(model.data_dim, 1, fixed_proposal=False, approx=True, multi_hop=False, temp=2.0, step_size=0.4, mh=True) elif (temp == 'dula'): sampler = samplers.LangevinSampler(model.data_dim, 1, fixed_proposal=False, approx=True, multi_hop=False, temp=2.0, step_size=0.2, mh=False) else: raise ValueError('Invalid sampler...') x = x0.clone().detach() times[temp] = [] hops[temp] = [] chain = [] cur_time = 0.0 mean = torch.zeros_like(x) time_list = [] rmses[temp] = [] for i in range(args.n_steps): st = time.time() xhat = sampler.step(x.detach(), model).detach() cur_time += (time.time() - st) cur_hops = (x != xhat).float().sum((- 1)).mean().item() x = xhat mean = (mean + x) if ((i % args.subsample) == 0): if (args.ess_statistic == 'dims'): chain.append(x.cpu().numpy()[0][None]) else: xc = x h = (xc != ess_samples[0][None]).float().sum((- 1)) chain.append(h.detach().cpu().numpy()[None]) if (((i % args.viz_every) == 0) and (plot is not None)): time_list.append(cur_time) rmse = get_log_rmse((mean / (i + 1)), gt_mean) rmses[temp].append(rmse) if ((i % args.print_every) == 0): times[temp].append(cur_time) hops[temp].append(cur_hops) means[temp] = (mean / args.n_steps) chain = np.concatenate(chain, 0) chains[temp] = chain if (not args.no_ess): ess[temp] = get_ess(chain, args.burn_in) print('ess = {} +/- {}'.format(ess[temp].mean(), ess[temp].std())) np.save('{}/ising_sample_times_{}.npy'.format(args.save_dir, temp), time_list) np.save('{}/ising_sample_logrmses_{}.npy'.format(args.save_dir, temp), rmses) plt.clf() for temp in temps: plt.plot(rmses[temp], label='{}'.format(temp)) plt.legend() plt.savefig('{}/log_rmse.png'.format(args.save_dir)) if (not args.no_ess): ess_temps = temps plt.clf() ess_list = [ess[temp] for temp in ess_temps] plt.boxplot(ess_list, labels=ess_temps, showfliers=False) plt.savefig('{}/ess.png'.format(args.save_dir)) plt.clf() plt.boxplot([((ess[temp] / times[temp][(- 1)]) / (1.0 - args.burn_in)) for temp in ess_temps], labels=ess_temps, showfliers=False) plt.savefig('{}/ess_per_sec.png'.format(args.save_dir))
class Swish(nn.Module): def __init__(self): super().__init__() def forward(self, x): return (x * torch.sigmoid(x))
def mlp_ebm(nin, nint=256, nout=1): return nn.Sequential(nn.Linear(nin, nint), Swish(), nn.Linear(nint, nint), Swish(), nn.Linear(nint, nint), Swish(), nn.Linear(nint, nout))
class MLPEBM_cat(nn.Module): def __init__(self, nin, n_proj, n_cat=256, nint=256, nout=1): super().__init__() self.proj = nn.Linear(n_cat, n_proj) self.n_proj = n_proj self.net = mlp_ebm((nin * n_proj), nint, nout=nout) def forward(self, x): xr = x.view((x.size(0) * x.size(1)), x.size(2)) xr_p = self.proj(xr) x_p = xr_p.view(x.size(0), x.size(1), self.n_proj) x_p = x_p.view(x.size(0), (x.size(1) * self.n_proj)) return self.net(x_p)
def conv_transpose_3x3(in_planes, out_planes, stride=1): return nn.ConvTranspose2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, output_padding=1, bias=True)
def conv3x3(in_planes, out_planes, stride=1): if (stride < 0): return conv_transpose_3x3(in_planes, out_planes, stride=(- stride)) else: return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=True)
class BasicBlock(nn.Module): expansion = 1 def __init__(self, in_planes, planes, stride=1, out_nonlin=True): super(BasicBlock, self).__init__() self.nonlin1 = Swish() self.nonlin2 = Swish() self.conv1 = conv3x3(in_planes, planes, stride) self.conv2 = conv3x3(planes, planes) self.out_nonlin = out_nonlin self.shortcut_conv = None if ((stride != 1) or (in_planes != (self.expansion * planes))): if (stride < 0): self.shortcut_conv = nn.ConvTranspose2d(in_planes, (self.expansion * planes), kernel_size=1, stride=(- stride), output_padding=1, bias=True) else: self.shortcut_conv = nn.Conv2d(in_planes, (self.expansion * planes), kernel_size=1, stride=stride, bias=True) def forward(self, x): out = self.nonlin1(self.conv1(x)) out = self.conv2(out) if (self.shortcut_conv is not None): out_sc = self.shortcut_conv(x) out += out_sc else: out += x if self.out_nonlin: out = self.nonlin2(out) return out
class ResNetEBM(nn.Module): def __init__(self, n_channels=64): super().__init__() self.proj = nn.Conv2d(1, n_channels, 3, 1, 1) downsample = [BasicBlock(n_channels, n_channels, 2), BasicBlock(n_channels, n_channels, 2)] main = [BasicBlock(n_channels, n_channels, 1) for _ in range(6)] all = (downsample + main) self.net = nn.Sequential(*all) self.energy_linear = nn.Linear(n_channels, 1) def forward(self, input): input = input.view(input.size(0), 1, 28, 28) input = self.proj(input) out = self.net(input) out = out.view(out.size(0), out.size(1), (- 1)).mean((- 1)) return self.energy_linear(out).squeeze()
class MNISTConvNet(nn.Module): def __init__(self, nc=16): super().__init__() self.net = nn.Sequential(nn.Conv2d(1, nc, 3, 1, 1), Swish(), nn.Conv2d(nc, (nc * 2), 4, 2, 1), Swish(), nn.Conv2d((nc * 2), (nc * 2), 3, 1, 1), Swish(), nn.Conv2d((nc * 2), (nc * 4), 4, 2, 1), Swish(), nn.Conv2d((nc * 4), (nc * 4), 3, 1, 1), Swish(), nn.Conv2d((nc * 4), (nc * 8), 4, 2, 1), Swish(), nn.Conv2d((nc * 8), (nc * 8), 3, 1, 0), Swish()) self.out = nn.Linear((nc * 8), 1) def forward(self, input): input = input.view(input.size(0), 1, 28, 28) out = self.net(input) out = out.squeeze() return self.out(out).squeeze()
class ResNetEBM_cat(nn.Module): def __init__(self, shape, n_proj, n_cat=256, n_channels=64): super().__init__() self.shape = shape self.n_cat = n_cat self.proj = nn.Conv2d(n_cat, n_proj, 1, 1, 0) self.proj2 = nn.Conv2d(n_proj, n_channels, 3, 1, 1) downsample = [BasicBlock(n_channels, n_channels, 2), BasicBlock(n_channels, n_channels, 2)] main = [BasicBlock(n_channels, n_channels, 1) for _ in range(6)] all = (downsample + main) self.net = nn.Sequential(*all) self.energy_linear = nn.Linear(n_channels, 1) def forward(self, input): input = input.view(input.size(0), self.shape[1], self.shape[2], self.n_cat).permute(0, 3, 1, 2) input = self.proj(input) input = self.proj2(input) out = self.net(input) out = out.view(out.size(0), out.size(1), (- 1)).mean((- 1)) return self.energy_linear(out).squeeze()
def makedirs(dirname): "\n Make directory only if it's not already there.\n " if (not os.path.exists(dirname)): os.makedirs(dirname)
def l1(module): loss = 0.0 for p in module.parameters(): loss += p.abs().sum() return loss
def main(args): makedirs(args.save_dir) logger = open('{}/log.txt'.format(args.save_dir), 'w') def my_print(s): print(s) logger.write((str(s) + '\n')) torch.manual_seed(args.seed) np.random.seed(args.seed) if ((args.data == 'mnist') or (args.data_file is not None)): (train_loader, test_loader, plot, viz) = utils.get_data(args) else: (data, data_model) = utils.generate_data(args) my_print('we have created your data, but what have you done for me lately?????') with open('{}/data.pkl'.format(args.save_dir), 'wb') as f: pickle.dump(data, f) if (args.data_model == 'er_ising'): ground_truth_J = data_model.J.detach().cpu() with open('{}/J.pkl'.format(args.save_dir), 'wb') as f: pickle.dump(ground_truth_J, f) quit() if (args.model == 'lattice_potts'): model = rbm.LatticePottsModel(int(args.dim), int(args.n_state), 0.0, 0.0, learn_sigma=True) buffer = model.init_sample(args.buffer_size) elif (args.model == 'lattice_ising'): model = rbm.LatticeIsingModel(int(args.dim), 0.0, 0.0, learn_sigma=True) buffer = model.init_sample(args.buffer_size) elif (args.model == 'lattice_ising_3d'): model = rbm.LatticeIsingModel(int(args.dim), 0.2, learn_G=True, lattice_dim=3) ground_truth_J = model.J.clone().to(device) model.G.data = (torch.randn_like(model.G.data) * 0.01) model.sigma.data = torch.ones_like(model.sigma.data) buffer = model.init_sample(args.buffer_size) plt.clf() plt.matshow(ground_truth_J.detach().cpu().numpy()) plt.savefig('{}/ground_truth.png'.format(args.save_dir)) elif (args.model == 'lattice_ising_2d'): model = rbm.LatticeIsingModel(int(args.dim), args.sigma, learn_G=True, lattice_dim=2) ground_truth_J = model.J.clone().to(device) model.G.data = (torch.randn_like(model.G.data) * 0.01) model.sigma.data = torch.ones_like(model.sigma.data) buffer = model.init_sample(args.buffer_size) plt.clf() plt.matshow(ground_truth_J.detach().cpu().numpy()) plt.savefig('{}/ground_truth.png'.format(args.save_dir)) elif (args.model == 'er_ising'): model = rbm.ERIsingModel(int(args.dim), 2, learn_G=True) model.G.data = (torch.randn_like(model.G.data) * 0.01) buffer = model.init_sample(args.buffer_size) with open(args.graph_file, 'rb') as f: ground_truth_J = pickle.load(f) plt.clf() plt.matshow(ground_truth_J.detach().cpu().numpy()) plt.savefig('{}/ground_truth.png'.format(args.save_dir)) ground_truth_J = ground_truth_J.to(device) elif (args.model == 'rbm'): model = rbm.BernoulliRBM(args.dim, args.n_hidden) buffer = model.init_dist.sample((args.buffer_size,)) elif (args.model == 'dense_potts'): raise ValueError elif (args.model == 'dense_ising'): raise ValueError elif (args.model == 'mlp'): raise ValueError model.to(device) buffer = buffer.to(device) def get_J(): j = model.J return ((j + j.t()) / 2) if (args.sampler == 'gibbs'): if ('potts' in args.model): sampler = samplers.PerDimMetropolisSampler(model.data_dim, int(args.n_state), rand=False) else: sampler = samplers.PerDimGibbsSampler(model.data_dim, rand=False) elif (args.sampler == 'rand_gibbs'): if ('potts' in args.model): sampler = samplers.PerDimMetropolisSampler(model.data_dim, int(args.n_state), rand=True) else: sampler = samplers.PerDimGibbsSampler(model.data_dim, rand=True) elif (args.sampler == 'gwg'): if ('potts' in args.model): sampler = samplers.DiffSamplerMultiDim(model.data_dim, 1, approx=True, temp=2.0) else: sampler = samplers.DiffSampler(model.data_dim, 1, approx=True, fixed_proposal=False, temp=2.0) elif (args.sampler == 'dmala'): sampler = samplers.LangevinSampler(model.data_dim, 1, approx=True, fixed_proposal=False, temp=2.0, step_size=0.2, mh=True) elif (args.sampler == 'dula'): sampler = samplers.LangevinSampler(model.data_dim, 1, approx=True, fixed_proposal=False, temp=2.0, step_size=0.1, mh=False) else: assert ('gwg-' in args.sampler) n_hop = int(args.sampler.split('-')[1]) if ('potts' in args.model): raise ValueError else: sampler = samplers.MultiDiffSampler(model.data_dim, 1, approx=True, temp=2.0, n_samples=n_hop) my_print(device) my_print(model) my_print(buffer.size()) my_print(sampler) optimizer = torch.optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.weight_decay) itr = 0 sigmas = [] sq_errs = [] rmses = [] start_time = time.time() time_list = [] while (itr < args.n_iters): for x in train_loader: x = x[0].to(device) for k in range(args.sampling_steps): buffer = sampler.step(buffer.detach(), model).detach() logp_real = model(x).squeeze().mean() logp_fake = model(buffer).squeeze().mean() obj = (logp_real - logp_fake) loss = (- obj) loss += (args.l1 * get_J().abs().sum()) optimizer.zero_grad() loss.backward() optimizer.step() model.G.data *= (1.0 - torch.eye(model.G.data.size(0))).to(model.G) if ((itr % args.print_every) == 0): my_print('({}) log p(real) = {:.4f}, log p(fake) = {:.4f}, diff = {:.4f}, hops = {:.4f}'.format(itr, logp_real.item(), logp_fake.item(), obj.item(), sampler._hops)) if (args.model in ('lattice_potts', 'lattice_ising')): my_print('\tsigma true = {:.4f}, current sigma = {:.4f}'.format(args.sigma, model.sigma.data.item())) else: sq_err = ((ground_truth_J - get_J()) ** 2).sum() rmse = ((ground_truth_J - get_J()) ** 2).mean().sqrt() my_print('\t err^2 = {:.4f}, log rmse = {:.4f}'.format(sq_err, torch.log(rmse))) if ((itr % args.viz_every) == 0): running_time = (time.time() - start_time) time_list.append(running_time) if (args.model in ('lattice_potts', 'lattice_ising')): sigmas.append(model.sigma.data.item()) plt.clf() plt.plot(sigmas, label='model') plt.plot([args.sigma for s in sigmas], label='gt') plt.legend() plt.savefig('{}/sigma.png'.format(args.save_dir)) else: sq_err = ((ground_truth_J - get_J()) ** 2).sum() sq_errs.append(sq_err.item()) plt.clf() plt.plot(sq_errs, label='sq_err') plt.legend() plt.savefig('{}/sq_err.png'.format(args.save_dir)) rmse = ((ground_truth_J - get_J()) ** 2).mean().sqrt() rmses.append(rmse.item()) plt.clf() plt.plot(rmses, label='rmse') plt.legend() plt.savefig('{}/rmse.png'.format(args.save_dir)) plt.clf() plt.matshow(get_J().detach().cpu().numpy()) plt.savefig('{}/model_{}.png'.format(args.save_dir, itr)) plot('{}/data_{}.png'.format(args.save_dir, itr), x.detach().cpu()) plot('{}/buffer_{}.png'.format(args.save_dir, itr), buffer[:args.batch_size].detach().cpu()) itr += 1 if (itr > args.n_iters): if (args.model in ('lattice_potts', 'lattice_ising')): final_sigma = model.sigma.data.item() with open('{}/sigma.txt'.format(args.save_dir), 'w') as f: f.write(str(final_sigma)) else: np.save('{}/sqerr_{}_{}_{}.npy'.format(args.save_dir, args.sampler, args.sigma, args.sampling_steps), sq_errs) np.save('{}/rmse_{}_{}_{}.npy'.format(args.save_dir, args.sampler, args.sigma, args.sampling_steps), rmses) np.save('{}/times_{}_{}_{}.npy'.format(args.save_dir, args.sampler, args.sigma, args.sampling_steps), time_list) quit()
def makedirs(dirname): "\n Make directory only if it's not already there.\n " if (not os.path.exists(dirname)): os.makedirs(dirname)
def get_ess(chain, burn_in): c = chain l = c.shape[0] bi = int((burn_in * l)) c = c[bi:] cv = tfp.mcmc.effective_sample_size(c).numpy() cv[np.isnan(cv)] = 1.0 return cv
def main(args): makedirs(args.save_dir) torch.manual_seed(args.seed) np.random.seed(args.seed) model = rbm.BernoulliRBM(args.n_visible, args.n_hidden) model.to(device) if (args.data == 'mnist'): assert (args.n_visible == 784) (train_loader, test_loader, plot, viz) = utils.get_data(args) init_data = [] for (x, _) in train_loader: init_data.append(x) init_data = torch.cat(init_data, 0) init_mean = init_data.mean(0).clamp(0.01, 0.99) model = rbm.BernoulliRBM(args.n_visible, args.n_hidden, data_mean=init_mean) model.to(device) optimizer = torch.optim.Adam(model.parameters(), lr=args.rbm_lr) itr = 0 for (x, _) in train_loader: x = x.to(device) xhat = model.gibbs_sample(v=x, n_steps=args.cd) d = model.logp_v_unnorm(x) m = model.logp_v_unnorm(xhat) obj = (d - m) loss = (- obj.mean()) optimizer.zero_grad() loss.backward() optimizer.step() if ((itr % args.print_every) == 0): print('{} | log p(data) = {:.4f}, log p(model) = {:.4f}, diff = {:.4f}'.format(itr, d.mean(), m.mean(), (d - m).mean())) else: model.W.data = (torch.randn_like(model.W.data) * (0.05 ** 0.5)) model.b_v.data = (torch.randn_like(model.b_v.data) * 1.0) model.b_h.data = (torch.randn_like(model.b_h.data) * 1.0) viz = plot = None gt_samples = model.gibbs_sample(n_steps=args.gt_steps, n_samples=(args.n_samples + args.n_test_samples), plot=True) kmmd = mmd.MMD(mmd.exp_avg_hamming, False) (gt_samples, gt_samples2) = (gt_samples[:args.n_samples], gt_samples[args.n_samples:]) if (plot is not None): plot('{}/ground_truth.png'.format(args.save_dir), gt_samples2) opt_stat = kmmd.compute_mmd(gt_samples2, gt_samples) print('gt <--> gt log-mmd', opt_stat, opt_stat.log10()) new_samples = model.gibbs_sample(n_steps=0, n_samples=args.n_test_samples) log_mmds = {} log_mmds['gibbs'] = [] ars = {} hops = {} ess = {} times = {} chains = {} chain = [] x0 = model.init_dist.sample((args.n_test_samples,)).to(device) temps = ['bg-1', 'hb-10-1', 'gwg', 'dmala', 'dula'] for temp in temps: if (temp == 'dim-gibbs'): sampler = samplers.PerDimGibbsSampler(args.n_visible) elif (temp == 'rand-gibbs'): sampler = samplers.PerDimGibbsSampler(args.n_visible, rand=True) elif ('bg-' in temp): block_size = int(temp.split('-')[1]) sampler = block_samplers.BlockGibbsSampler(args.n_visible, block_size) elif ('hb-' in temp): (block_size, hamming_dist) = [int(v) for v in temp.split('-')[1:]] sampler = block_samplers.HammingBallSampler(args.n_visible, block_size, hamming_dist) elif (temp == 'gwg'): sampler = samplers.DiffSampler(args.n_visible, 1, fixed_proposal=False, approx=True, multi_hop=False, temp=2.0) elif ('gwg-' in temp): n_hops = int(temp.split('-')[1]) sampler = samplers.MultiDiffSampler(args.n_visible, 1, approx=True, temp=2.0, n_samples=n_hops) elif (temp == 'dmala'): sampler = samplers.LangevinSampler(args.n_visible, 1, fixed_proposal=False, approx=True, multi_hop=False, temp=2.0, step_size=0.2, mh=True) elif (temp == 'dula'): sampler = samplers.LangevinSampler(args.n_visible, 1, fixed_proposal=False, approx=True, multi_hop=False, temp=2.0, step_size=0.1, mh=False) else: raise ValueError('Invalid sampler...') x = x0.clone().detach() log_mmds[temp] = [] ars[temp] = [] hops[temp] = [] times[temp] = [] chain = [] cur_time = 0.0 for i in range(args.n_steps): st = time.time() xhat = sampler.step(x.detach(), model).detach() cur_time += (time.time() - st) cur_hops = (x != xhat).float().sum((- 1)).mean().item() x = xhat if ((i % args.subsample) == 0): if (args.ess_statistic == 'dims'): chain.append(x.cpu().numpy()[0][None]) else: xc = x[0][None] h = (xc != gt_samples).float().sum((- 1)) chain.append(h.detach().cpu().numpy()[None]) if (((i % args.viz_every) == 0) and (plot is not None)): plot('{}/temp_{}_samples_{}.png'.format(args.save_dir, temp, i), x) if ((i % args.print_every) == 0): hard_samples = x stat = kmmd.compute_mmd(hard_samples, gt_samples) log_stat = stat.log().item() log_mmds[temp].append(log_stat) times[temp].append(cur_time) hops[temp].append(cur_hops) print('temp {}, itr = {}, log-mmd = {:.4f}, hop-dist = {:.4f}'.format(temp, i, log_stat, cur_hops)) chain = np.concatenate(chain, 0) ess[temp] = get_ess(chain, args.burn_in) chains[temp] = chain print('ess = {} +/- {}'.format(ess[temp].mean(), ess[temp].std())) plt.clf() for temp in temps: plt.plot(log_mmds[temp], label='{}'.format(temp)) plt.legend() plt.savefig('{}/logmmd.png'.format(args.save_dir))
def load_static_mnist(args, **kwargs): args.input_size = [1, 28, 28] args.input_type = 'binary' args.dynamic_binarization = False def lines_to_np_array(lines): return np.array([[int(i) for i in line.split()] for line in lines]) with open(os.path.join('datasets', 'MNIST_static', 'binarized_mnist_train.amat')) as f: lines = f.readlines() x_train = lines_to_np_array(lines).astype('float32') with open(os.path.join('datasets', 'MNIST_static', 'binarized_mnist_valid.amat')) as f: lines = f.readlines() x_val = lines_to_np_array(lines).astype('float32') with open(os.path.join('datasets', 'MNIST_static', 'binarized_mnist_test.amat')) as f: lines = f.readlines() x_test = lines_to_np_array(lines).astype('float32') np.random.shuffle(x_train) y_train = np.zeros((x_train.shape[0], 1)) y_val = np.zeros((x_val.shape[0], 1)) y_test = np.zeros((x_test.shape[0], 1)) train = data_utils.TensorDataset(torch.from_numpy(x_train), torch.from_numpy(y_train)) train_loader = data_utils.DataLoader(train, batch_size=args.batch_size, shuffle=True, **kwargs) validation = data_utils.TensorDataset(torch.from_numpy(x_val).float(), torch.from_numpy(y_val)) val_loader = data_utils.DataLoader(validation, batch_size=args.test_batch_size, shuffle=False, **kwargs) test = data_utils.TensorDataset(torch.from_numpy(x_test).float(), torch.from_numpy(y_test)) test_loader = data_utils.DataLoader(test, batch_size=args.test_batch_size, shuffle=True, **kwargs) return (train_loader, val_loader, test_loader, args)
def load_dynamic_mnist(args, **kwargs): args.input_size = [1, 28, 28] args.input_type = 'binary' args.dynamic_binarization = True from torchvision import datasets, transforms train_loader = torch.utils.data.DataLoader(datasets.MNIST('../data', train=True, download=True, transform=transforms.Compose([transforms.ToTensor()])), batch_size=args.batch_size, shuffle=True) test_loader = torch.utils.data.DataLoader(datasets.MNIST('../data', train=False, transform=transforms.Compose([transforms.ToTensor()])), batch_size=args.batch_size, shuffle=True) x_train = (train_loader.dataset.train_data.float().numpy() / 255.0) x_train = np.reshape(x_train, (x_train.shape[0], (x_train.shape[1] * x_train.shape[2]))) y_train = np.array(train_loader.dataset.train_labels.float().numpy(), dtype=int) x_test = (test_loader.dataset.test_data.float().numpy() / 255.0) x_test = np.reshape(x_test, (x_test.shape[0], (x_test.shape[1] * x_test.shape[2]))) y_test = np.array(test_loader.dataset.test_labels.float().numpy(), dtype=int) x_val = x_train[50000:60000] y_val = np.array(y_train[50000:60000], dtype=int) x_train = x_train[0:50000] y_train = np.array(y_train[0:50000], dtype=int) if args.dynamic_binarization: args.input_type = 'binary' np.random.seed(777) x_val = np.random.binomial(1, x_val) x_test = np.random.binomial(1, x_test) else: args.input_type = 'gray' train = data_utils.TensorDataset(torch.from_numpy(x_train), torch.from_numpy(y_train)) train_loader = data_utils.DataLoader(train, batch_size=args.batch_size, shuffle=True, **kwargs) validation = data_utils.TensorDataset(torch.from_numpy(x_val).float(), torch.from_numpy(y_val)) val_loader = data_utils.DataLoader(validation, batch_size=args.test_batch_size, shuffle=False, **kwargs) test = data_utils.TensorDataset(torch.from_numpy(x_test).float(), torch.from_numpy(y_test)) test_loader = data_utils.DataLoader(test, batch_size=args.test_batch_size, shuffle=False, **kwargs) return (train_loader, val_loader, test_loader, args)
def load_omniglot(args, n_validation=1345, **kwargs): args.input_size = [1, 28, 28] args.input_type = 'binary' args.dynamic_binarization = True def reshape_data(data): return data.reshape(((- 1), 28, 28)).reshape(((- 1), (28 * 28)), order='F') omni_raw = loadmat(os.path.join('datasets', 'OMNIGLOT', 'chardata.mat')) train_data = reshape_data(omni_raw['data'].T.astype('float32')) x_test = reshape_data(omni_raw['testdata'].T.astype('float32')) np.random.shuffle(train_data) x_train = train_data[:(- n_validation)] x_val = train_data[(- n_validation):] if args.dynamic_binarization: args.input_type = 'binary' np.random.seed(777) x_val = np.random.binomial(1, x_val) x_test = np.random.binomial(1, x_test) else: args.input_type = 'gray' y_train = np.zeros((x_train.shape[0], 1)) y_val = np.zeros((x_val.shape[0], 1)) y_test = np.zeros((x_test.shape[0], 1)) train = data_utils.TensorDataset(torch.from_numpy(x_train), torch.from_numpy(y_train)) train_loader = data_utils.DataLoader(train, batch_size=args.batch_size, shuffle=True, **kwargs) validation = data_utils.TensorDataset(torch.from_numpy(x_val).float(), torch.from_numpy(y_val)) val_loader = data_utils.DataLoader(validation, batch_size=args.test_batch_size, shuffle=False, **kwargs) test = data_utils.TensorDataset(torch.from_numpy(x_test).float(), torch.from_numpy(y_test)) test_loader = data_utils.DataLoader(test, batch_size=args.test_batch_size, shuffle=True, **kwargs) return (train_loader, val_loader, test_loader, args)
def load_caltech101silhouettes(args, **kwargs): args.input_size = [1, 28, 28] args.input_type = 'binary' args.dynamic_binarization = False def reshape_data(data): return data.reshape(((- 1), 28, 28)).reshape(((- 1), (28 * 28)), order='F') caltech_raw = loadmat(os.path.join('datasets', 'Caltech101Silhouettes', 'caltech101_silhouettes_28_split1.mat')) x_train = (1.0 - reshape_data(caltech_raw['train_data'].astype('float32'))) np.random.shuffle(x_train) x_val = (1.0 - reshape_data(caltech_raw['val_data'].astype('float32'))) np.random.shuffle(x_val) x_test = (1.0 - reshape_data(caltech_raw['test_data'].astype('float32'))) y_train = caltech_raw['train_labels'] y_val = caltech_raw['val_labels'] y_test = caltech_raw['test_labels'] train = data_utils.TensorDataset(torch.from_numpy(x_train), torch.from_numpy(y_train)) train_loader = data_utils.DataLoader(train, batch_size=args.batch_size, shuffle=True, **kwargs) validation = data_utils.TensorDataset(torch.from_numpy(x_val).float(), torch.from_numpy(y_val)) val_loader = data_utils.DataLoader(validation, batch_size=args.test_batch_size, shuffle=False, **kwargs) test = data_utils.TensorDataset(torch.from_numpy(x_test).float(), torch.from_numpy(y_test)) test_loader = data_utils.DataLoader(test, batch_size=args.test_batch_size, shuffle=True, **kwargs) return (train_loader, val_loader, test_loader, args)
def load_histopathologyGray(args, **kwargs): args.input_size = [1, 28, 28] args.input_type = 'gray' args.dynamic_binarization = False with open('datasets/HistopathologyGray/histopathology.pkl', 'rb') as f: data = pickle.load(f, encoding='latin1') x_train = np.asarray(data['training']).reshape((- 1), (28 * 28)) x_val = np.asarray(data['validation']).reshape((- 1), (28 * 28)) x_test = np.asarray(data['test']).reshape((- 1), (28 * 28)) x_train = np.clip(x_train, (1.0 / 512.0), (1.0 - (1.0 / 512.0))) x_val = np.clip(x_val, (1.0 / 512.0), (1.0 - (1.0 / 512.0))) x_test = np.clip(x_test, (1.0 / 512.0), (1.0 - (1.0 / 512.0))) y_train = np.zeros((x_train.shape[0], 1)) y_val = np.zeros((x_val.shape[0], 1)) y_test = np.zeros((x_test.shape[0], 1)) train = data_utils.TensorDataset(torch.from_numpy(x_train).float(), torch.from_numpy(y_train)) train_loader = data_utils.DataLoader(train, batch_size=args.batch_size, shuffle=True, **kwargs) validation = data_utils.TensorDataset(torch.from_numpy(x_val).float(), torch.from_numpy(y_val)) val_loader = data_utils.DataLoader(validation, batch_size=args.test_batch_size, shuffle=False, **kwargs) test = data_utils.TensorDataset(torch.from_numpy(x_test).float(), torch.from_numpy(y_test)) test_loader = data_utils.DataLoader(test, batch_size=args.test_batch_size, shuffle=True, **kwargs) return (train_loader, val_loader, test_loader, args)
def load_freyfaces(args, TRAIN=1565, VAL=200, TEST=200, **kwargs): args.input_size = [1, 28, 20] args.input_type = 'gray' args.dynamic_binarization = False import scipy.io data = scipy.io.loadmat('datasets/Freyfaces/frey_rawface')['ff'].T data = (data / 256.0) np.random.shuffle(data) x_train = data[0:TRAIN].reshape((- 1), (28 * 20)) x_val = data[TRAIN:(TRAIN + VAL)].reshape((- 1), (28 * 20)) x_test = data[(TRAIN + VAL):((TRAIN + VAL) + TEST)].reshape((- 1), (28 * 20)) y_train = np.zeros((x_train.shape[0], 1)) y_val = np.zeros((x_val.shape[0], 1)) y_test = np.zeros((x_test.shape[0], 1)) train = data_utils.TensorDataset(torch.from_numpy(x_train).float(), torch.from_numpy(y_train)) train_loader = data_utils.DataLoader(train, batch_size=args.batch_size, shuffle=True, **kwargs) validation = data_utils.TensorDataset(torch.from_numpy(x_val).float(), torch.from_numpy(y_val)) val_loader = data_utils.DataLoader(validation, batch_size=args.test_batch_size, shuffle=False, **kwargs) test = data_utils.TensorDataset(torch.from_numpy(x_test).float(), torch.from_numpy(y_test)) test_loader = data_utils.DataLoader(test, batch_size=args.test_batch_size, shuffle=True, **kwargs) return (train_loader, val_loader, test_loader, args)
def load_cifar10(args, **kwargs): args.input_size = [3, 32, 32] args.input_type = 'continuous' args.dynamic_binarization = False from torchvision import datasets, transforms transform = transforms.Compose([transforms.ToTensor()]) training_dataset = datasets.CIFAR10('datasets/Cifar10/', train=True, download=True, transform=transform) train_data = np.clip(((training_dataset.train_data + 0.5) / 256.0), 0.0, 1.0) train_data = np.swapaxes(np.swapaxes(train_data, 1, 2), 1, 3) train_data = np.reshape(train_data, ((- 1), np.prod(args.input_size))) np.random.shuffle(train_data) x_val = train_data[40000:50000] x_train = train_data[0:40000] y_train = np.zeros((x_train.shape[0], 1)) y_val = np.zeros((x_val.shape[0], 1)) train = data_utils.TensorDataset(torch.from_numpy(x_train).float(), torch.from_numpy(y_train)) train_loader = data_utils.DataLoader(train, batch_size=args.batch_size, shuffle=True, **kwargs) validation = data_utils.TensorDataset(torch.from_numpy(x_val).float(), torch.from_numpy(y_val)) val_loader = data_utils.DataLoader(validation, batch_size=args.test_batch_size, shuffle=False, **kwargs) test_dataset = datasets.CIFAR10('datasets/Cifar10/', train=False, transform=transform) test_data = np.clip(((test_dataset.test_data + 0.5) / 256.0), 0.0, 1.0) test_data = np.swapaxes(np.swapaxes(test_data, 1, 2), 1, 3) x_test = np.reshape(test_data, ((- 1), np.prod(args.input_size))) y_test = np.zeros((x_test.shape[0], 1)) test = data_utils.TensorDataset(torch.from_numpy(x_test).float(), torch.from_numpy(y_test)) test_loader = data_utils.DataLoader(test, batch_size=args.test_batch_size, shuffle=True, **kwargs) return (train_loader, val_loader, test_loader, args)
def load_dataset(args, **kwargs): if (args.dataset_name == 'static_mnist'): (train_loader, val_loader, test_loader, args) = load_static_mnist(args, **kwargs) elif (args.dataset_name == 'dynamic_mnist'): (train_loader, val_loader, test_loader, args) = load_dynamic_mnist(args, **kwargs) elif (args.dataset_name == 'omniglot'): (train_loader, val_loader, test_loader, args) = load_omniglot(args, **kwargs) elif (args.dataset_name == 'caltech'): (train_loader, val_loader, test_loader, args) = load_caltech101silhouettes(args, **kwargs) elif (args.dataset_name == 'histopathology'): (train_loader, val_loader, test_loader, args) = load_histopathologyGray(args, **kwargs) elif (args.dataset_name == 'freyfaces'): (train_loader, val_loader, test_loader, args) = load_freyfaces(args, **kwargs) elif (args.dataset_name == 'cifar10'): (train_loader, val_loader, test_loader, args) = load_cifar10(args, **kwargs) else: raise Exception('Wrong name of the dataset!') return (train_loader, val_loader, test_loader, args)
def tf_to_pth(tensorflow_model): reader = pywrap_tensorflow.NewCheckpointReader(tensorflow_model) var_to_shape_map = reader.get_variable_to_shape_map() var_dict = {k: reader.get_tensor(k) for k in var_to_shape_map.keys()} if ('beta1_power' in var_dict): del var_dict['beta1_power'] if ('beta2_power' in var_dict): del var_dict['beta2_power'] for k in list(var_dict.keys()): if ('Adam' in k): del var_dict[k] for k in list(var_dict.keys()): if ('weights' in k): m = re.search('gcn_dense_mse/graphconvolution_(\\d+)_vars/weights_(\\d+)', k) var_dict[('layers.%d.weights_%d' % ((int(m.group(1)) - 1), int(m.group(2))))] = var_dict[k] del var_dict[k] if ('bias' in k): m = re.search('gcn_dense_mse/graphconvolution_(\\d+)_vars/bias', k) var_dict[('layers.%d.bias' % (int(m.group(1)) - 1))] = var_dict[k] del var_dict[k] for k in list(var_dict.keys()): var_dict[k] = torch.from_numpy(var_dict[k]).float() return var_dict
def sparse_dropout(x, keep_prob, noise_shape): 'Dropout for sparse tensors.' random_tensor = keep_prob random_tensor += tf.random_uniform(noise_shape) dropout_mask = tf.cast(tf.floor(random_tensor), dtype=tf.bool) pre_out = tf.sparse_retain(x, dropout_mask) return (pre_out * (1.0 / keep_prob))
class Dense(Module): 'Dense layer.' def __init__(self, input_dim, output_dim, support_num=0, dropout=0.0, bias=False): super(Dense, self).__init__() self.dropout = dropout self.weights = Parameter(torch.Tensor(input_dim, output_dim)) if bias: self.bias = torch.zeros([output_dim]) else: self.register_parameter('bias', None) self.reset_parameters() def reset_parameters(self): for (name, p) in self.named_parameters(): if ('weight' in name): torch.nn.init.xavier_uniform_(p) elif ('bias' in name): torch.nn.init.constant_(p) def forward(self, inputs, adjs, num_features_nonzero=None): x = inputs x = F.dropout(x, self.dropout, self.training) output = torch.matmul(x, self.weights) if (self.bias is not None): output = (output + self.bias) return output
class SparseMM(torch.autograd.Function): '\n Sparse x dense matrix multiplication with autograd support.\n Implementation by Soumith Chintala:\n https://discuss.pytorch.org/t/\n does-pytorch-support-autograd-on-sparse-matrix/6156/7\n ' def __init__(self, sparse): super(SparseMM, self).__init__() self.sparse = sparse def forward(self, dense): return torch.mm(self.sparse, dense) def backward(self, grad_output): grad_input = None if self.needs_input_grad[0]: grad_input = torch.mm(self.sparse.t(), grad_output) return grad_input
class GraphConvolution(Module): 'Graph convolution layer.' def __init__(self, input_dim, output_dim, support_num, dropout=0.0, bias=False): super(GraphConvolution, self).__init__() self.input_dim = input_dim self.outptu_dim = output_dim self.dropout = dropout for i in range(support_num): setattr(self, ('weights_' + str(i)), Parameter(torch.Tensor(input_dim, output_dim))) if bias: self.bias = torch.zeros([output_dim]) else: self.register_parameter('bias', None) self.reset_parameters() def reset_parameters(self): for (name, p) in self.named_parameters(): if ('weight' in name): torch.nn.init.xavier_uniform_(p) elif ('bias' in name): torch.nn.init.constant_(p) def forward(self, inputs, adjs, num_features_nonzero=None): x = inputs if x.is_sparse: assert False, 'Features can not be sparse' else: x = F.dropout(x, self.dropout, self.training) supports = [] for i in range(len(adjs)): pre_sup = torch.mm(x, getattr(self, ('weights_' + str(i)))) support = SparseMM(adjs[i])(pre_sup) supports.append(support) output = sum(supports) if (self.bias is not None): output = (output + self.bias) return output def __repr__(self): return (((((self.__class__.__name__ + ' (') + str(self.input_dim)) + ' -> ') + str(self.output_dim)) + ')')
def masked_softmax_cross_entropy(preds, labels, mask): 'Softmax cross-entropy loss with masking.' loss = tf.nn.softmax_cross_entropy_with_logits(logits=preds, labels=labels) mask = tf.cast(mask, dtype=tf.float32) mask /= tf.reduce_mean(mask) loss *= mask return tf.reduce_mean(loss)
def masked_accuracy(preds, labels, mask): 'Accuracy with masking.' correct_prediction = tf.equal(tf.argmax(preds, 1), tf.argmax(labels, 1)) accuracy_all = tf.cast(correct_prediction, tf.float32) mask = tf.cast(mask, dtype=tf.float32) mask /= tf.reduce_mean(mask) accuracy_all *= mask return tf.reduce_mean(accuracy_all)
def masked_sigmoid_cross_entropy(preds, labels, mask): 'Sigmoid cross-entropy loss with masking.' loss = tf.nn.sigmoid_cross_entropy_with_logits(logits=preds, labels=labels) mask = tf.cast(mask, dtype=tf.float32) mask /= tf.reduce_mean(mask) loss *= mask return tf.reduce_mean(loss)
def mask_mse_loss(preds, labels, mask): 'Sigmoid cross-entropy loss with masking.' mask = mask.float() mask /= mask.mean() labels = (labels * mask) preds = (preds * mask) loss = (F.mse_loss(labels, preds, size_average=False) / 2) return loss
def parse_hiddens(dim_in, dim_out): hidden_layers = FLAGS.hiddens if (len(hidden_layers) == 0): hidden_layers = str(dim_out) elif (len(hidden_layers) == 1): hidden_layers = (str(dim_out) + 'd') elif ((hidden_layers[(- 1)] == ',') or (hidden_layers[(- 2)] == ',')): hidden_layers = ((hidden_layers[:(hidden_layers.rfind(',') + 1)] + str(dim_out)) + hidden_layers[(hidden_layers.rfind(',') + 1):]) else: hidden_layers = ((hidden_layers + ',') + str(dim_out)) hiddens = hidden_layers.split(',') for i in range(len(hiddens)): if (hiddens[i][(- 1)] == 'd'): hiddens[i] = (dim_in, int(hiddens[i][:(- 1)]), True) else: hiddens[i] = (dim_in, int(hiddens[i]), False) dim_in = hiddens[i][1] return hiddens
class Model_dense_mse(nn.Module): def __init__(self, layer_func, input_dim, output_dim, support_num, dropout, logging, features=None): super(Model_dense_mse, self).__init__() if FLAGS.trainable_embedding: self.register_parameter('features', nn.Parameter(torch.from_numpy(features).float())) self.layer_func = layer_func self.input_dim = input_dim self.output_dim = output_dim self.logging = logging self.layers = nn.ModuleList() hiddens = parse_hiddens(self.input_dim, self.output_dim) for i in range(len(hiddens)): self.layers.append(self.layer_func(input_dim=hiddens[i][0], output_dim=hiddens[i][1], support_num=support_num, dropout=(dropout if hiddens[i][2] else 0))) def forward(self, features, adjs, labels=None, labels_mask=None): if FLAGS.trainable_embedding: features = self.features if FLAGS.normalize_embedding: features = F.normalize(features) inputs = features num_features_nonzero = features[1].shape self.activations = [] self.activations.append(inputs) for (i, layer) in enumerate(self.layers): if (i == (len(self.layers) - 1)): act = (lambda x: F.normalize(x, dim=1)) else: act = (lambda x: F.leaky_relu(x, FLAGS.lrelu_slope)) hidden = act(layer(self.activations[(- 1)], adjs, num_features_nonzero)) self.activations.append(hidden) outputs = self.activations[(- 1)] if (labels is not None): loss = mask_mse_loss(outputs, F.normalize(labels, dim=1), labels_mask) return loss else: return outputs
class GCN_dense_mse(Model_dense_mse): def __init__(self, *args, **kwargs): super(GCN_dense_mse, self).__init__(GraphConvolution, *args, **kwargs)
class Pure_dense_mse(Model_dense_mse): def __init__(self, *args, **kwargs): super(Pure_dense_mse, self).__init__(Dense, *args, **kwargs)
def to_sparse(x): ' converts dense tensor x to sparse format ' return torch.sparse.FloatTensor(torch.from_numpy(x[0]).long().t(), torch.from_numpy(x[1]).float(), x[2])
def test_imagenet_zero(fc_file_pred, has_train=1): with open(classids_file_retrain) as fp: classids = json.load(fp) with open(word2vec_file, 'rb') as fp: word2vec_feat = pkl.load(fp) testlist = [] testlabels = [] with open(vallist_folder) as fp: for line in fp: (fname, lbl) = line.split() assert (int(lbl) >= 1000) feat_name = os.path.join(feat_folder, fname.replace('.JPEG', '.npy')) if (not os.path.exists(feat_name)): print('not feature', feat_name) continue testlist.append(feat_name) testlabels.append(int(lbl)) with open(fc_file_pred, 'rb') as fp: fc_layers_pred = pkl.load(fp) fc_layers_pred = np.array(fc_layers_pred) print('fc output', fc_layers_pred.shape) valid_clss = np.zeros(22000) cnt_zero_wv = 0 for j in range(len(classids)): if (classids[j][1] == 1): twv = word2vec_feat[j] twv = (twv / (np.linalg.norm(twv) + 1e-06)) if (np.linalg.norm(twv) == 0): cnt_zero_wv = (cnt_zero_wv + 1) continue valid_clss[classids[j][0]] = 1 cnt_zero_wv = 0 (labels_train, word2vec_train) = ([], []) fc_now = [] for j in range(len(classids)): tfc = fc_layers_pred[j] if has_train: if (classids[j][0] < 0): continue elif (classids[j][1] == 0): continue if (classids[j][0] >= 0): twv = word2vec_feat[j] if (np.linalg.norm(twv) == 0): cnt_zero_wv = (cnt_zero_wv + 1) continue labels_train.append(classids[j][0]) word2vec_train.append(twv) feat_len = len(tfc) tfc = tfc[(feat_len - fc_dim):feat_len] fc_now.append(tfc) fc_now = torch.from_numpy(np.array(fc_now)).cuda() print(('skip candidate class due to no word embedding: %d / %d:' % (cnt_zero_wv, (len(labels_train) + cnt_zero_wv)))) print('candidate class shape: ', fc_now.shape) fc_now = fc_now.t() labels_train = np.array(labels_train) print('train + test class: ', len(labels_train)) topKs = [1] top_retrv = [1, 2, 5, 10, 20] hit_count = np.zeros((len(topKs), len(top_retrv))) cnt_valid = 0 t = time.time() for j in range(len(testlist)): featname = testlist[j] if (valid_clss[testlabels[j]] == 0): continue cnt_valid = (cnt_valid + 1) matfeat = np.load(featname) matfeat = torch.from_numpy(matfeat).cuda() scores = torch.matmul(matfeat, fc_now).squeeze() (_, ids) = torch.sort((- scores)) ids = ids.data.cpu().numpy() for k in range(len(topKs)): for k2 in range(len(top_retrv)): current_len = top_retrv[k2] for sort_id in range(current_len): lbl = labels_train[ids[sort_id]] if (lbl == testlabels[j]): hit_count[k][k2] = (hit_count[k][k2] + 1) break if ((j % 10000) == 0): inter = (time.time() - t) print(('processing %d / %d ' % (j, len(testlist))), ', Estimated time: ', ((inter / (j - 1)) * (len(testlist) - j))) hit_count = (hit_count / cnt_valid) fout = open((fc_file_pred + '_result_pred_zero.txt'), 'w') for j in range(len(topKs)): outstr = '' for k in range(len(top_retrv)): outstr = ((outstr + ' ') + str(hit_count[j][k])) print(outstr) print('total: %d', cnt_valid) fout.write((outstr + '\n')) fout.close() return hit_count
class Dummy(torch.utils.data.Dataset): def __init__(self, testlist, testlabel, valid_clss, labels_train): self.inv_labels_train = {v: k for (k, v) in enumerate(labels_train)} (self.testlist, self.testlabel) = zip(*[(_, __) for (_, __) in zip(testlist, testlabel) if (valid_clss[__] != 0)]) def __getitem__(self, index): try: return (np.load(self.testlist[index]), self.inv_labels_train[self.testlabel[index]]) except: return None def __len__(self): return len(self.testlist)
def accuracy(output, target, topk=(1,)): 'Computes the precision@k for the specified values of k' with torch.no_grad(): maxk = max(topk) batch_size = target.size(0) (_, pred) = output.topk(maxk, 1, True, True) pred = pred.t() correct = pred.eq(target.view(1, (- 1)).expand_as(pred)) res = [] for k in topk: correct_k = correct[:k].view((- 1)).float().sum(0, keepdim=True) res.append(correct_k.mul_((100.0 / batch_size))) return res
def test_imagenet_zero(fc_file_pred, has_train=1): with open(classids_file_retrain) as fp: classids = json.load(fp) with open(word2vec_file, 'rb') as fp: word2vec_feat = pkl.load(fp) testlist = [] testlabels = [] with open(vallist_folder) as fp: for line in fp: (fname, lbl) = line.split() assert (int(lbl) >= 1000) feat_name = os.path.join(feat_folder, fname.replace('.JPEG', '.npy')) testlist.append(feat_name) testlabels.append(int(lbl)) with open(fc_file_pred, 'rb') as fp: fc_layers_pred = pkl.load(fp) fc_layers_pred = np.array(fc_layers_pred) print('fc output', fc_layers_pred.shape) valid_clss = np.zeros(22000) cnt_zero_wv = 0 for j in range(len(classids)): if (classids[j][1] == 1): twv = word2vec_feat[j] twv = (twv / (np.linalg.norm(twv) + 1e-06)) valid_clss[classids[j][0]] = 1 cnt_zero_wv = 0 (labels_train, word2vec_train) = ([], []) fc_now = [] w2v_1k = [None for _ in range(1000)] for j in range(len(classids)): tfc = fc_layers_pred[j] if (classids[j][1] == 0): assert (classids[j][0] < 1000) w2v_1k[classids[j][0]] = word2vec_feat[j] if has_train: if (classids[j][0] < 0): continue elif (classids[j][1] == 0): continue if (classids[j][0] >= 0): twv = word2vec_feat[j] labels_train.append(classids[j][0]) word2vec_train.append(twv) feat_len = len(tfc) tfc = tfc[(feat_len - fc_dim):feat_len] fc_now.append(tfc) fc_now = torch.from_numpy(np.array(fc_now)).float().cuda() if args.wv_normalize: fc_now = F.normalize(fc_now) w2v_1k = torch.from_numpy(np.array(w2v_1k)).float().cuda() print(('skip candidate class due to no word embedding: %d / %d:' % (cnt_zero_wv, (len(labels_train) + cnt_zero_wv)))) print('candidate class shape: ', fc_now.shape) fc_now = F.normalize(fc_now).t() labels_train = np.array(labels_train) print('train + test class: ', len(labels_train)) topKs = [1] top_retrv = [1, 2, 5, 10, 20] hit_count = np.zeros((len(topKs), len(top_retrv))) cnt_valid = 0 t = time.time() res50_weights = torch.load('/home-nfs/rluo/rluo/model/pytorch-resnet/resnet50-caffe.pth') res50_fc_weight = res50_weights['fc.weight'].cuda() res50_fc_bias = res50_weights['fc.bias'].cuda() dataset = Dummy(testlist, testlabels, valid_clss, labels_train) loader = torch.utils.data.DataLoader(dataset, 1000, shuffle=False, num_workers=4, collate_fn=(lambda x: torch.utils.data.dataloader.default_collate([_ for _ in x if (_ is not None)]))) for (i, (matfeat, label)) in enumerate(loader): (matfeat, label) = (matfeat.cuda(), label.cuda()) cnt_valid += matfeat.size(0) prob = F.softmax(((torch.matmul(matfeat, res50_fc_weight.t()) + res50_fc_bias) / args.temperature), dim=1) (topk_prob, topk_idx) = torch.topk(prob, args.top_k) gath = torch.gather(w2v_1k.unsqueeze(0).expand(matfeat.size(0), (- 1), (- 1)), 1, topk_idx.unsqueeze(2).expand((- 1), (- 1), w2v_1k.size(1))) matfeat = torch.bmm(topk_prob.unsqueeze(1), gath).squeeze(1) scores = torch.matmul(matfeat, fc_now).squeeze() tmp = accuracy(scores, label, top_retrv) for k in range(len(topKs)): for k2 in range(len(top_retrv)): hit_count[k][k2] = (hit_count[k][k2] + (tmp[k2] * matfeat.size(0))) if ((cnt_valid % 1) == 0): inter = (time.time() - t) print(('processing %d / %d ' % (cnt_valid, len(dataset))), ', Estimated time: ', ((inter / (i + 1)) * ((len(loader) - i) - 1))) print((hit_count / cnt_valid)) hit_count = (hit_count / cnt_valid) fout = open((fc_file_pred + '_result_pred_zero.txt'), 'w') for j in range(len(topKs)): outstr = '' for k in range(len(top_retrv)): outstr = ((outstr + ' ') + str(hit_count[j][k])) print(outstr) print(('total: %d' % cnt_valid)) fout.write((outstr + '\n')) fout.close() return (hit_count / 100)
class Dummy(torch.utils.data.Dataset): def __init__(self, testlist, testlabel, valid_clss, labels_train): self.inv_labels_train = {v: k for (k, v) in enumerate(labels_train)} (self.testlist, self.testlabel) = zip(*[(_, __) for (_, __) in zip(testlist, testlabel) if (valid_clss[__] != 0)]) def __getitem__(self, index): try: return (np.load(self.testlist[index]), self.inv_labels_train[self.testlabel[index]]) except: None def __len__(self): return len(self.testlist)