repo
stringlengths
2
99
file
stringlengths
13
225
code
stringlengths
0
18.3M
file_length
int64
0
18.3M
avg_line_length
float64
0
1.36M
max_line_length
int64
0
4.26M
extension_type
stringclasses
1 value
deficient-efficient
deficient-efficient-master/load_wrn50_2.py
import re import torch import torch.nn.functional as F from torch.utils import model_zoo from models.blocks import Conv from models.wide_resnet import WRN_50_2 from collections import OrderedDict def all_equal(iterable_1, iterable_2): return all([x == y for x,y in zip(iterable_1, iterable_2)]) # functional model definition from functional zoo: https://github.com/szagoruyko/functional-zoo/blob/master/imagenet-validation.py#L27-L47 def define_model(params): def conv2d(input, params, base, stride=1, pad=0): return F.conv2d(input, params[base + '.weight'], params[base + '.bias'], stride, pad) def group(input, params, base, stride, n): o = input for i in range(0,n): b_base = ('%s.block%d.conv') % (base, i) x = o o = conv2d(x, params, b_base + '0') o = F.relu(o) o = conv2d(o, params, b_base + '1', stride=i==0 and stride or 1, pad=1) o = F.relu(o) o = conv2d(o, params, b_base + '2') if i == 0: o += conv2d(x, params, b_base + '_dim', stride=stride) else: o += x o = F.relu(o) return o # determine network size by parameters blocks = [sum([re.match('group%d.block\d+.conv0.weight'%j, k) is not None for k in params.keys()]) for j in range(4)] def f(input, params): o = F.conv2d(input, params['conv0.weight'], params['conv0.bias'], 2, 3) o = F.relu(o) o = F.max_pool2d(o, 3, 2, 1) o_g0 = group(o, params, 'group0', 1, blocks[0]) o_g1 = group(o_g0, params, 'group1', 2, blocks[1]) o_g2 = group(o_g1, params, 'group2', 2, blocks[2]) o_g3 = group(o_g2, params, 'group3', 2, blocks[3]) o = F.avg_pool2d(o_g3, 7, 1, 0) o = o.view(o.size(0), -1) o = F.linear(o, params['z.fc.weight'], params['z.fc.bias']) return o return f if __name__ == '__main__': # our model definition net = WRN_50_2(Conv) # load parameters from model zoo params = model_zoo.load_url('https://s3.amazonaws.com/modelzoo-networks/wide-resnet-50-2-export-5ae25d50.pth') # otherwise the ordering will be messed up params['z.fc.weight'] = params.pop('fc.weight') params['z.fc.bias'] = params.pop('fc.bias') params = sorted(params.items()) # list of tuples, in order # make state_dict from model_zoo parameters state_dict = OrderedDict() w_i, b_i = 0, 0 for n,p in net.state_dict().items(): if 'weight' in n and 'bn' not in n: while 'weight' not in params[w_i][0]: w_i += 1 k, v = params[w_i] print(k, " == ", n) assert all_equal(v.shape, p.size()), f"{v.shape} =/= {p.size()}" state_dict[n] = v w_i += 1 elif 'bias' in n: while 'bias' not in params[b_i][0]: b_i += 1 k, v = params[b_i] print(k, " == ", n) assert all_equal(v.shape, p.size()), f"{v.shape} =/= {p.size()}" state_dict[n] = v b_i += 1 else: state_dict[n] = p assert max(w_i, b_i) == len(params) # all params are matched # test if this is the same as the functional implementation params = OrderedDict(params) f = define_model(params) net.load_state_dict(state_dict) net.eval() X = torch.randn(2,3,224,224) func_out, net_out = f(X, params), net(X)[0] error = torch.abs(func_out - net_out) assert error.max() < 1e-3, "%f"%error.max() print("Output given random input is equal within %f"%error.max()) # now save a new checkpoint file, with correct saved terms save_dict = {} save_dict['net'] = state_dict save_dict['epoch'] = 100 save_dict['conv'] = 'Conv' save_dict['blocktype'] = None save_dict['module'] = None torch.save(save_dict, 'checkpoints/wrn_50_2.imagenet.modelzoo.t7')
4,000
36.046296
138
py
deficient-efficient
deficient-efficient-master/imagenet_experiments.py
import json #settings = ['ACDC_%i'%n for n in [12, 28]] +\ # ['SepHashed_%.2f'%s for s in [0.08, 0.58]] +\ settings = ['Generic_%.2f'%s for s in [0.03, 0.21]] +\ ['Tucker_%.2f'%s for s in [0.25, 0.73]] +\ ['TensorTrain_%.2f'%s for s in [0.27, 0.75]] +\ ['Shuffle_%i'%n for n in [7, 1]] experiments = [] import datetime now = datetime.datetime.now() monthday = now.strftime("%B")[:3]+"%i"%now.day # use these settings to train WideResNets from scratch for s in settings: experiment = ["python", "main.py", "imagenet", "teacher", "--conv", s, "-t", "wrn_50_2.%s.%s"%(s.lower(), monthday), "--network", "WRN_50_2", "--GPU", "0,1,2,3"] experiments.append(experiment) # and to train WideResNets with a teacher for s in settings: experiment = ["python", "main.py", "imagenet", "student", "--conv", s, "-t", "wrn_50_2.imagenet.modelzoo", "-s", "wrn_50_2.%s.student.%s"%(s.lower(), monthday), "--network", "WRN_50_2", "--alpha", "0.", "--beta", "1e3", "--GPU", "0,1,2,3"] experiments.append(experiment) with open("wrn_50_2_imagenet.json", "w") as f: f.write(json.dumps(experiments))
1,203
36.625
77
py
deficient-efficient
deficient-efficient-master/collate_results.py
# open schedule json, then search for which machines the longest progressed job # has run on import json import sys import os import torch import subprocess from subprocess import PIPE from collections import OrderedDict from funcs import what_conv_block from models.wide_resnet import WideResNet, WRN_50_2 from models.darts import DARTS from count import measure_model from tqdm import tqdm with open('machine_list.json', 'r') as f: # list of strings with "hostname:path" where the deficient-efficient repos # can be found machines = json.load(f) def ckpt_name(experiment): if '-s' in experiment: prefix = '-s' else: prefix = '-t' ckpt_idx = [i for i, arg in enumerate(experiment) if arg == prefix][0]+1 return experiment[ckpt_idx] def parse_name(path): monthday = path.split(".")[-2] path = path.split('.')[1:] # split off part containing settings # index to cut out for settings idx = [i for i,s in enumerate(path) if monthday == s or 'student' == s][0] method, setting = (".".join(path[:idx])).split("_") # this is just the settings string now return 'student' in path, method, setting def parse_checkpoint(ckpt_name, ckpt_contents): results = OrderedDict() results['epoch'] = ckpt_contents['epoch'] results['val_errors'] = [float(x) for x in ckpt_contents['val_errors']] results['train_errors'] = [float(x) for x in ckpt_contents['train_errors']] # hard part: count parameters by making an instance of the network network = {'wrn_28_10': 'WideResNet', 'darts': 'DARTS', 'wrn_50_2': 'WRN_50_2'}[ckpt_name.split(".")[0]] h,w = {'WideResNet': (32,32), 'DARTS': (32,32), 'WRN_50_2': (224,224)}[network] SavedConv, SavedBlock = what_conv_block(ckpt_contents['conv'], ckpt_contents['blocktype'], ckpt_contents['module']) model = build_network(SavedConv, SavedBlock, network) flops, params = measure_model(model, h, w) assert params == sum([p.numel() for p in model.parameters()]) results['no_params'] = params results['flops'] = flops results['settings'] = parse_name(ckpt_name) results['scatter'] = (params, results['val_errors'][-1], results['train_errors'][-1], results['epoch'], flops) return results # instance the model def build_network(Conv, Block, network): if network == 'WideResNet': return WideResNet(28, 10, Conv, Block, num_classes=10, dropRate=0) elif network == 'WRN_50_2': return WRN_50_2(Conv) elif network == 'DARTS': return DARTS(Conv, num_classes=10, drop_path_prob=0., auxiliary=False) def keep_oldest(collated, ckpt_name, ckpt_contents): # if the checkpoint already exists in collated, # keep it if it's run for more epochs ckpt = parse_checkpoint(ckpt_name, ckpt_contents) try: existing_epochs = collated[ckpt_name]['epoch'] except KeyError: # doesn't exist yet so return return ckpt if int(existing_epochs) < int(ckpt['epoch']): return ckpt else: return collated[ckpt_name] def main(): try: # read the schedule from json json_path = sys.argv[1] with open(json_path, "r") as f: schedule = json.load(f) # prepare directory if not os.path.exists("collate"): os.mkdir("collate") else: # clean up directory old_ckpts = os.listdir("collate") for c in old_ckpts: os.remove(os.path.join("collate", c)) # make a list of all the checkpoint files we need to check checkpoints = [] for e in schedule: checkpoints.append(ckpt_name(e)+".t7") # look for these checkpoints on every machine we know about collated = [] for m in tqdm(machines, desc='machine'): # connect to the remote machine hostname, directory = m.split(":") checkpoint_dir = os.path.join(directory, "checkpoints") completed = subprocess.run(f"ssh {hostname} ls {checkpoint_dir}".split(" "), stdout=PIPE, stderr=PIPE) checkpoints_on_remote = completed.stdout.decode().split("\n") # look for overlap between that and the checkpoints we care about overlap = list(set(checkpoints_on_remote) & set(checkpoints)) for checkpoint in tqdm(overlap, desc="copying"): checkpoint_loc = os.path.join(checkpoint_dir, checkpoint) checkpoint_dest = f"collate/{hostname}.{checkpoint}" if not os.path.exists(checkpoint_dest): subprocess.run(f"scp {hostname}:{checkpoint_loc} {checkpoint_dest}".split(" "), stdout=PIPE, stderr=PIPE) except IndexError: pass # iterate over copied files collated = OrderedDict() copied = os.listdir("collate") for checkpoint in tqdm(copied, desc='Opening checkpoints'): checkpoint_loc = os.path.join("collate", checkpoint) hostname = checkpoint.split(".")[0] checkpoint_name = ".".join(checkpoint.split(".")[1:]) checkpoint_contents = torch.load(checkpoint_loc) collated[checkpoint_name] = keep_oldest(collated, checkpoint_name, checkpoint_contents) for k in collated: print(k, collated[k]['epoch'], collated[k]['val_errors'][-1]) with open("collated.json", "w") as f: f.write(json.dumps(collated, indent=2)) if __name__ == "__main__": main()
5,460
37.730496
125
py
deficient-efficient
deficient-efficient-master/history.py
# opens checkpoints and prints the commands used to run each import torch import os import argparse parser = argparse.ArgumentParser(description='Inspect saved checkpoints') parser.add_argument('--match', type=str, default=None, help='Filter checkpoints by keyword.') if __name__ == '__main__': args = parser.parse_args() ckpt_paths = os.listdir("checkpoints") # filter for search term if args.match is not None: ckpt_paths = [c for c in ckpt_paths if args.match in c] for p in ckpt_paths: try: ckpt = torch.load("checkpoints/"+p) if 'args' in ckpt.keys(): print(p + " (%i-%.2f) "%(ckpt['epoch'], ckpt['val_errors'][-1]) + ": " + " ".join(ckpt['args'])) except: pass
766
33.863636
113
py
deficient-efficient
deficient-efficient-master/models/resnet.py
'''This is a rewriting of the native resnet definition that comes with Pytorch, to allow it to use our blocks and convolutions for imagenet experiments. Annoyingly, the pre-trained models don't use pre-activation blocks.''' import torch import torch.nn as nn import math import torchvision.models.resnet import torch.utils.model_zoo as model_zoo from .blocks import * __all__ = ['ResNet', 'resnet18', 'resnet34']#, 'resnet50', 'resnet101','resnet152'] model_urls = { 'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth', 'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth', 'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth', 'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth', 'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth', } class ResNet(nn.Module): def __init__(self, conv, block, n, num_classes=1000, s=1): super(ResNet, self).__init__() self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False) self.bn1 = nn.BatchNorm2d(64) self.relu = nn.ReLU(inplace=True) self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) nChannels =[64, 64, 128, 256, 512] self.layer1 = torch.nn.ModuleList() for i in range(s): self.layer1.append(NetworkBlock(int(n[0] // s), nChannels[0] if i == 0 else nChannels[1], nChannels[1], block, 1, conv=conv)) self.layer2 = torch.nn.ModuleList() for i in range(s): self.layer2.append(NetworkBlock(int(n[1] // s), nChannels[1] if i == 0 else nChannels[2], nChannels[2], block, 2, conv=conv)) self.layer3 = torch.nn.ModuleList() for i in range(s): self.layer3.append(NetworkBlock(int(n[2] // s), nChannels[2] if i == 0 else nChannels[3], nChannels[3], block, 2, conv=conv)) self.layer4 = torch.nn.ModuleList() for i in range(s): self.layer4.append(NetworkBlock(int(n[3] // s), nChannels[3] if i == 0 else nChannels[4], nChannels[4], block, 2, conv=conv)) # self.layer1 = self._make_layer(block, 64, layers[0]) # self.layer2 = self._make_layer(block, 128, layers[1], stride=2) # self.layer3 = self._make_layer(block, 256, layers[2], stride=2) # self.layer4 = self._make_layer(block, 512, layers[3], stride=2) self.avgpool = nn.AvgPool2d(7, stride=1) self.fc = nn.Linear(512, num_classes) for m in self.modules(): if isinstance(m, nn.Conv2d): n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels m.weight.data.normal_(0, math.sqrt(2. / n)) elif isinstance(m, nn.BatchNorm2d): m.weight.data.fill_(1) m.bias.data.zero_() def forward(self, x): activations = [] out = self.maxpool(self.relu(self.bn1(self.conv1(x)))) for sub_block in self.layer1: out = sub_block(out) activations.append(out) for sub_block in self.layer2: out = sub_block(out) activations.append(out) for sub_block in self.layer3: out = sub_block(out) activations.append(out) for sub_block in self.layer4: out = sub_block(out) activations.append(out) out = self.avgpool(out) out = out.view(out.size(0), -1) out = self.fc(out) return out, activations def resnet18(pretrained=False, conv=nnConv, block=OldBlock): """Constructs a ResNet-18 model. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet """ model = ResNet(conv,block, [2, 2, 2, 2]) if pretrained: model.load_state_dict(model_zoo.load_url(model_urls['resnet18'])) return model def resnet34(pretrained=False, conv=nnConv, block=OldBlock): """Constructs a ResNet-34 model. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet """ model = ResNet(conv,block, [3, 4, 6, 3]) if pretrained: old_model = torchvision.models.resnet.resnet34(pretrained=False) old_model.load_state_dict(model_zoo.load_url(model_urls['resnet34'])) new_state_dict = model.state_dict() old_state_dict = old_model.state_dict() # This assumes the sequence of each module in the network is the same in both cases. # Ridiculously, batch norm params are stored in a different sequence in the downloaded state dict, so we have to # load the old model definition, load in its downloaded state dict to change the order back, then transfer this! old_model = torchvision.models.resnet.resnet34(pretrained=False) old_model.load_state_dict(model_zoo.load_url(model_urls['resnet34'])) old_names = [v for v in old_state_dict] new_names = [v for v in new_state_dict] for i,j in enumerate(old_names): new_state_dict[new_names[i]] = old_state_dict[j] model.load_state_dict(new_state_dict) return model def test2(): net = resnet34() x = torch.randn(1, 3, 224, 224) y, _ = net(Variable(x)) print(y.size()) if __name__ == '__main__': test2() # Haven't written the old bottleneck yet. # # def resnet50(pretrained=False, **kwargs): # """Constructs a ResNet-50 model. # Args: # pretrained (bool): If True, returns a model pre-trained on ImageNet # """ # model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs) # if pretrained: # model.load_state_dict(model_zoo.load_url(model_urls['resnet50'])) # return model # # # def resnet101(pretrained=False, **kwargs): # """Constructs a ResNet-101 model. # Args: # pretrained (bool): If True, returns a model pre-trained on ImageNet # """ # model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs) # if pretrained: # model.load_state_dict(model_zoo.load_url(model_urls['resnet101'])) # return model # # # def resnet152(pretrained=False, **kwargs): # """Constructs a ResNet-152 model. # Args: # pretrained (bool): If True, returns a model pre-trained on ImageNet # """ # model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs) # if pretrained: # model.load_state_dict(model_zoo.load_url(model_urls['resnet152'])) # return model
6,623
34.047619
120
py
deficient-efficient
deficient-efficient-master/models/hashed.py
# HashedNet Convolutional Layer: https://arxiv.org/abs/1504.04788 from functools import reduce import torch import torch.nn as nn import torch.nn.functional as F class HashedConv2d(nn.Conv2d): """Conv2d with the weights of the convolutional filters parameterised using a budgeted subset of parameters and random indexes to place those parameters in the weight tensor.""" def __init__(self, in_channels, out_channels, kernel_size, budget, stride=1, padding=0, dilation=1, groups=1, bias=True): super(HashedConv2d, self).__init__(in_channels, out_channels, kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=True) # grab budgeted subset of the weights assert self.weight.numel() >= budget, \ f"budget {budget} higher than {self.weight.numel()}" self.weight_size = self.weight.size() budgeted = self.weight.data.view(-1)[:budget] del self.weight # register non-budgeted weights self.register_parameter('hashed_weight', nn.Parameter(budgeted)) # precompute random index matrix idxs = torch.randint(high=budget-1, size=self.weight_size).long() idxs = idxs.view(-1) # register indexes as a buffer self.register_buffer('idxs', idxs) #self.W = self.weight[self.idxs].cuda() def forward(self, x): # index to make weight matrix try: W = self.hashed_weight.index_select(0, self.idxs).view(self.weight_size) except RuntimeError: import ipdb ipdb.set_trace() # complete forward pass as normal return F.conv2d(x, W, self.bias, self.stride, self.padding, self.dilation, self.groups) class HalfHashedSeparable(nn.Module): """A depthwise grouped convolution followed by a HashedNet 1x1 convolution. Grouped convolution could also be hashed, but it's not.""" def __init__(self, in_channels, out_channels, kernel_size, budget, stride=1, padding=0, dilation=1, groups=1, bias=True): super(HalfHashedSeparable, self).__init__() # has to have hashed in the name to get caught by alternative weight # decay setting, it is not actually hashed if kernel_size > 1: self.grouped = nn.Conv2d(in_channels, in_channels, kernel_size, stride=stride, padding=padding, dilation=dilation, groups=in_channels, bias=False) # we spent some of the budget on that grouped convolution assert self.grouped.weight.numel() == reduce(lambda x,y: x*y, self.grouped.weight.size()) budget = budget - self.grouped.weight.numel() assert budget > 0, \ "budget exceeded by grouped convolution: %i too many"%(-budget) self.hashed = HashedConv2d(in_channels, out_channels, 1, budget, bias=bias) else: self.grouped = None self.hashed = HashedConv2d(in_channels, out_channels, 1, budget, stride=stride, padding=padding, dilation=dilation, bias=bias) def forward(self, x): if self.grouped is not None: x = self.grouped(x) return self.hashed(x) class HashedSeparable(nn.Module): """Separabled, where grouped and pointwise are both Hashed..""" def __init__(self, in_channels, out_channels, kernel_size, budget, stride=1, padding=0, dilation=1, groups=1, bias=True): super(HashedSeparable, self).__init__() # has to have hashed in the name to get caught by alternative weight # decay setting, it is not actually hashed grouped_params = float(in_channels * kernel_size * kernel_size) pointwise_params = float(in_channels * out_channels) total_params = grouped_params + pointwise_params grouped_budget = int(budget*grouped_params/total_params) pointwise_budget = int(budget*pointwise_params/total_params) #print(total_params, grouped_budget, pointwise_budget) if kernel_size > 1: self.grouped = HashedConv2d(in_channels, in_channels, kernel_size, grouped_budget, stride=stride, padding=padding, dilation=dilation, groups=in_channels, bias=False) stride = 1 else: self.grouped = None pointwise_budget = budget assert budget > 0, "budget must be greater than 0, was %i"%(-budget) self.hashed = HashedConv2d(in_channels, out_channels, 1, pointwise_budget, stride=stride, bias=bias) def forward(self, x): if self.grouped is not None: x = self.grouped(x) return self.hashed(x) if __name__ == '__main__': from timeit import timeit setup = "from __main__ import HashedConv2d; import torch; X = torch.randn(128, 256, 28, 28).cuda(); conv = HashedConv2d(256, 512, 3, 1000, bias=False).cuda()" print("HashedConv2d: ", timeit("_ = conv(X)", setup=setup, number=100)) setup = "import torch.nn as nn; import torch; X = torch.randn(128, 256, 28, 28).cuda(); conv = nn.Conv2d(256, 512, 3, bias=False).cuda()" print("Conv2d: ", timeit("_ = conv(X)", setup=setup, number=100)) setup = "from __main__ import HalfHashedSeparable; import torch; X = torch.randn(128, 256, 28, 28).cuda(); conv = HalfHashedSeparable(256, 512, 3, 5000, bias=False).cuda()" print("HalfHashedSeparable: ", timeit("_ = conv(X)", setup=setup, number=100)) setup = "from __main__ import HashedSeparable; import torch; X = torch.randn(128, 256, 28, 28).cuda(); conv = HashedSeparable(256, 512, 3, 5000, bias=False).cuda()" print("HashedSeparable: ", timeit("_ = conv(X)", setup=setup, number=100))
5,827
48.811966
176
py
deficient-efficient
deficient-efficient-master/models/darts.py
# DARTS network definition import numpy as np import torch import torch.nn as nn import torch.nn.functional as F import torchvision.transforms as transforms from torch.utils.checkpoint import checkpoint from collections import namedtuple from .blocks import DepthwiseSep from .wide_resnet import group_lowrank, compression ############################# # Training utils start here # ############################# class Cutout(object): def __init__(self, length): self.length = length def __call__(self, img): h, w = img.size(1), img.size(2) mask = np.ones((h, w), np.float32) y = np.random.randint(h) x = np.random.randint(w) y1 = np.clip(y - self.length // 2, 0, h) y2 = np.clip(y + self.length // 2, 0, h) x1 = np.clip(x - self.length // 2, 0, w) x2 = np.clip(x + self.length // 2, 0, w) mask[y1: y2, x1: x2] = 0. mask = torch.from_numpy(mask) mask = mask.expand_as(img) img *= mask return img def _data_transforms_cifar10(): CIFAR_MEAN = [0.49139968, 0.48215827, 0.44653124] CIFAR_STD = [0.24703233, 0.24348505, 0.26158768] train_transform = transforms.Compose([ transforms.RandomCrop(32, padding=4), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize(CIFAR_MEAN, CIFAR_STD), ]) if True: # always use cutout train_transform.transforms.append(Cutout(16)) valid_transform = transforms.Compose([ transforms.ToTensor(), transforms.Normalize(CIFAR_MEAN, CIFAR_STD), ]) return train_transform, valid_transform ##################################### # End of training utils # ##################################### # Model definition code starts here # ##################################### Genotype = namedtuple('Genotype', 'normal normal_concat reduce reduce_concat') DARTS_V2 = Genotype(normal=[('sep_conv_3x3', 0), ('sep_conv_3x3', 1), ('sep_conv_3x3', 0), ('sep_conv_3x3', 1), ('sep_conv_3x3', 1), ('skip_connect', 0), ('skip_connect', 0), ('dil_conv_3x3', 2)], normal_concat=[2, 3, 4, 5], reduce=[('max_pool_3x3', 0), ('max_pool_3x3', 1), ('skip_connect', 2), ('max_pool_3x3', 1), ('max_pool_3x3', 0), ('skip_connect', 2), ('skip_connect', 2), ('max_pool_3x3', 1)], reduce_concat=[2, 3, 4, 5]) OPS = { 'none' : lambda C, stride, affine, conv: Zero(stride), 'avg_pool_3x3' : lambda C, stride, affine, conv: nn.AvgPool2d(3, stride=stride, padding=1, count_include_pad=False), 'max_pool_3x3' : lambda C, stride, affine, conv: nn.MaxPool2d(3, stride=stride, padding=1), 'skip_connect' : lambda C, stride, affine, conv: Identity() if stride == 1 else FactorizedReduce(C, C, conv, affine=affine), 'sep_conv_3x3' : lambda C, stride, affine, conv: SepConv(C, C, 3, stride, 1, Conv=conv, affine=affine), 'sep_conv_5x5' : lambda C, stride, affine, conv: SepConv(C, C, 5, stride, 2, Conv=conv, affine=affine), 'sep_conv_7x7' : lambda C, stride, affine, conv: SepConv(C, C, 7, stride, 3, Conv=conv, affine=affine), 'dil_conv_3x3' : lambda C, stride, affine, conv: DilConv(C, C, 3, stride, 2, 2, Conv=conv, affine=affine), 'dil_conv_5x5' : lambda C, stride, affine, conv: DilConv(C, C, 5, stride, 4, 2, Conv=conv, affine=affine), # this is never used so you can remove it without hitting any errors # 'conv_7x1_1x7' : lambda C, stride, affine, conv: nn.Sequential( # nn.ReLU(inplace=False), # nn.Conv2d(C, C, (1,7), stride=(1, stride), padding=(0, 3), bias=False), # nn.Conv2d(C, C, (7,1), stride=(stride, 1), padding=(3, 0), bias=False), # nn.BatchNorm2d(C, affine=affine) # ), } class ReLUConvBN(nn.Module): def __init__(self, C_in, C_out, ConvClass, kernel_size, stride, padding, affine=True): super(ReLUConvBN, self).__init__() #ConvClass = nn.Conv2d if ConvClass is DepthwiseSep else ConvClass self.op = nn.Sequential( nn.ReLU(inplace=False), ConvClass(C_in, C_out, kernel_size, stride=stride, padding=padding, bias=False), nn.BatchNorm2d(C_out, affine=affine) ) def forward(self, x): #return self.op(x) return checkpoint(self.op, x) class DilConv(nn.Module): def __init__(self, C_in, C_out, kernel_size, stride, padding, dilation, Conv=DepthwiseSep, affine=True): super(DilConv, self).__init__() self.op = nn.Sequential( nn.ReLU(inplace=False), Conv(C_in, C_out, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, bias=False), nn.BatchNorm2d(C_out, affine=affine), ) def forward(self, x): return self.op(x) class SepConv(nn.Module): def __init__(self, C_in, C_out, kernel_size, stride, padding, Conv=DepthwiseSep, affine=True): super(SepConv, self).__init__() self.op = nn.Sequential( nn.ReLU(inplace=False), Conv(C_in, C_in, kernel_size=kernel_size, stride=stride, padding=padding, bias=False), nn.BatchNorm2d(C_in, affine=affine), nn.ReLU(inplace=False), Conv(C_in, C_out, kernel_size=kernel_size, stride=1, padding=padding, bias=False), nn.BatchNorm2d(C_out, affine=affine), ) def forward(self, x): return self.op(x) class Identity(nn.Module): def __init__(self): super(Identity, self).__init__() def forward(self, x): return x class Zero(nn.Module): def __init__(self, stride): super(Zero, self).__init__() self.stride = stride def forward(self, x): if self.stride == 1: return x.mul(0.) return x[:,:,::self.stride,::self.stride].mul(0.) class FactorizedReduce(nn.Module): def __init__(self, C_in, C_out, ConvClass, affine=True): super(FactorizedReduce, self).__init__() assert C_out % 2 == 0 self.relu = nn.ReLU(inplace=False) #ConvClass = nn.Conv2d if ConvClass is DepthwiseSep else ConvClass #ConvClass = nn.Conv2d self.conv_1 = ConvClass(C_in, C_out // 2, 1, stride=2, padding=0, bias=False) self.conv_2 = ConvClass(C_in, C_out // 2, 1, stride=2, padding=0, bias=False) self.bn = nn.BatchNorm2d(C_out, affine=affine) def forward(self, x): def factorized_reduce(x): x = self.relu(x) out = torch.cat([self.conv_1(x), self.conv_2(x[:,:,1:,1:])], dim=1) return self.bn(out) #out = checkpoint(cat_1, *[self.conv_1(x), self.conv_2(x[:,:,1:,1:])]) #return factorized_reduce(x) return checkpoint(factorized_reduce, x) #return out class Cell(nn.Module): def __init__(self, genotype, C_prev_prev, C_prev, C, reduction, reduction_prev, Conv): super(Cell, self).__init__() self.Conv = Conv if reduction_prev: self.preprocess0 = FactorizedReduce(C_prev_prev, C, Conv) else: self.preprocess0 = ReLUConvBN(C_prev_prev, C, Conv, 1, 1, 0) self.preprocess1 = ReLUConvBN(C_prev, C, Conv, 1, 1, 0) if reduction: op_names, indices = zip(*genotype.reduce) concat = genotype.reduce_concat else: op_names, indices = zip(*genotype.normal) concat = genotype.normal_concat self._compile(C, op_names, indices, concat, reduction) def _compile(self, C, op_names, indices, concat, reduction): assert len(op_names) == len(indices) self._steps = len(op_names) // 2 self._concat = concat self.multiplier = len(concat) self._ops = nn.ModuleList() for name, index in zip(op_names, indices): stride = 2 if reduction and index < 2 else 1 op = OPS[name](C, stride, True, self.Conv) self._ops += [op] self._indices = indices def forward(self, s0, s1, drop_prob): s0 = self.preprocess0(s0) s1 = self.preprocess1(s1) states = [s0, s1] for i in range(self._steps): h1 = states[self._indices[2*i]] h2 = states[self._indices[2*i+1]] op1 = self._ops[2*i] op2 = self._ops[2*i+1] h1 = op1(h1) h2 = op2(h2) if self.training and drop_prob > 0.: if not isinstance(op1, Identity): h1 = drop_path(h1, drop_prob) if not isinstance(op2, Identity): h2 = drop_path(h2, drop_prob) s = h1 + h2 states += [s] return torch.cat([states[i] for i in self._concat], dim=1) #return checkpoint(cat_1, *[states[i] for i in self._concat]) class AuxiliaryHeadCIFAR(nn.Module): def __init__(self, C, num_classes): """assuming input size 8x8""" super(AuxiliaryHeadCIFAR, self).__init__() self.features = nn.Sequential( nn.ReLU(inplace=True), nn.AvgPool2d(5, stride=3, padding=0, count_include_pad=False), # image size = 2 x 2 nn.Conv2d(C, 128, 1, bias=False), nn.BatchNorm2d(128), nn.ReLU(inplace=True), nn.Conv2d(128, 768, 2, bias=False), nn.BatchNorm2d(768), nn.ReLU(inplace=True) ) self.classifier = nn.Linear(768, num_classes) def forward(self, x): x = self.features(x) x = self.classifier(x.view(x.size(0),-1)) return x def drop_path(x, drop_prob): if drop_prob > 0.: keep_prob = 1.-drop_prob mask = torch.FloatTensor(x.size(0), 1, 1, 1).bernoulli_(keep_prob) mask = mask.to(x.device) x.div_(keep_prob) x.mul_(mask) return x class DARTS(nn.Module): def __init__(self, ConvClass=DepthwiseSep, C=36, num_classes=10, layers=20, auxiliary=True, genotype=DARTS_V2, drop_path_prob=0.2): self.kwargs = dict(ConvClass=ConvClass, C=C, num_classes=num_classes, layers=layers, auxiliary=auxiliary, genotype=genotype, drop_path_prob=drop_path_prob) super(DARTS, self).__init__() self.drop_path_prob = drop_path_prob self._layers = layers self._auxiliary = auxiliary stem_multiplier = 3 C_curr = stem_multiplier*C self.stem = nn.Sequential( nn.Conv2d(3, C_curr, 3, padding=1, bias=False), nn.BatchNorm2d(C_curr) ) C_prev_prev, C_prev, C_curr = C_curr, C_curr, C self.cells = nn.ModuleList() reduction_prev = False for i in range(layers): if i in [layers//3, 2*layers//3]: C_curr *= 2 reduction = True else: reduction = False cell = Cell(genotype, C_prev_prev, C_prev, C_curr, reduction, reduction_prev, ConvClass) reduction_prev = reduction self.cells += [cell] C_prev_prev, C_prev = C_prev, cell.multiplier*C_curr if i == 2*layers//3: C_to_auxiliary = C_prev if auxiliary: self.auxiliary_head = AuxiliaryHeadCIFAR(C_to_auxiliary, num_classes) self.global_pooling = nn.AdaptiveAvgPool2d(1) self.classifier = nn.Linear(C_prev, num_classes) def compression_ratio(self): return compression(self.__class__, self.kwargs) def grouped_parameters(self, weight_decay): return group_lowrank(self.named_parameters(), weight_decay, self.compression_ratio()) def forward(self, input): logits_aux = None s0 = s1 = self.stem(input) cell_AMs = [] attention = lambda x: F.normalize(x.pow(2).mean(1).view(x.size(0), -1)) layers = len(self.cells) for i, cell in enumerate(self.cells): s0, s1 = s1, cell(s0, s1, self.drop_path_prob) if i in [layers//3, 2*layers//3]: cell_AMs.append(attention(s0)) if i == 2*self._layers//3: if self._auxiliary and self.training: logits_aux = self.auxiliary_head(s1) out = self.global_pooling(s1) logits = self.classifier(out.view(out.size(0),-1)) return logits, cell_AMs, logits_aux if __name__ == '__main__': darts = DARTS() X = torch.randn(10,3,32,32) print(darts(X))
11,450
32.979228
429
py
deficient-efficient
deficient-efficient-master/models/wide_resnet.py
# network definition import math import torch import torch.nn as nn import torch.nn.functional as F from torch.autograd import Variable from collections import OrderedDict # wildcard import for legacy reasons if __name__ == '__main__': from blocks import * else: from .blocks import * def parse_options(convtype, blocktype): # legacy cmdline argument parsing if isinstance(convtype,str): conv = conv_function(convtype) else: raise NotImplementedError("Tuple convolution specification no longer supported.") if blocktype =='Basic': block = BasicBlock elif blocktype =='Bottle': block = BottleBlock elif blocktype =='Old': block = OldBlock else: block = None return conv, block def group_lowrank(named_parameters, weight_decay, compression_ratio): lowrank_params, other_params = [], [] for n,p in named_parameters: if 'A' in n or 'D' in n: lowrank_params.append(p) elif 'shuffle' in n: lowrank_params.append(p) elif 'hashed' in n: lowrank_params.append(p) elif 'weight_core' in n or 'weight_u' in n: lowrank_params.append(p) elif 'lowrank' in n: lowrank_params.append(p) else: other_params.append(p) return [{'params': lowrank_params, 'weight_decay': compression_ratio*weight_decay}, {'params': other_params}] def compression(model_class, kwargs): # assume there is a kwarg "conv", which is the convolution we've chosen compressed_params = sum([p.numel() for p in model_class(**kwargs).parameters()]) if 'genotype' in list(kwargs.keys()): # standard conv with DARTS is DepthwiseSep kwargs['ConvClass'] = DepthwiseSep else: # everything else it's Conv kwargs['ConvClass'] = Conv uncompressed_params = sum([p.numel() for p in model_class(**kwargs).parameters()]) ratio = float(compressed_params)/float(uncompressed_params) print("Compression: %i to %i, ratio %.2f"%(uncompressed_params, compressed_params, ratio)) return ratio class WideResNet(nn.Module): def __init__(self, depth, widen_factor, ConvClass, block, num_classes=10, dropRate=0.0, s = 1): super(WideResNet, self).__init__() self.kwargs = dict(depth=depth, widen_factor=widen_factor, ConvClass=ConvClass, block=block, num_classes=num_classes, dropRate=dropRate, s=s) nChannels = [16, 16 * widen_factor, 32 * widen_factor, 64 * widen_factor] nChannels = [int(a) for a in nChannels] assert ((depth - 4) % 6 == 0) # why? n = (depth - 4) // 6 assert n % s == 0, 'n mod s must be zero' # 1st conv before any network block self.conv1 = nn.Conv2d(3, nChannels[0], kernel_size=3, stride=1, padding=1, bias=False) # 1st block self.block1 = torch.nn.ModuleList() for i in range(s): self.block1.append(NetworkBlock(int(n//s), nChannels[0] if i == 0 else nChannels[1], nChannels[1], block, 1, dropRate, ConvClass)) # 2nd block self.block2 = torch.nn.ModuleList() for i in range(s): self.block2.append(NetworkBlock(int(n//s), nChannels[1] if i == 0 else nChannels[2], nChannels[2], block, 2 if i == 0 else 1, dropRate, ConvClass)) # 3rd block self.block3 = torch.nn.ModuleList() for i in range(s): self.block3.append(NetworkBlock(int(n//s), nChannels[2] if i == 0 else nChannels[3], nChannels[3], block, 2 if i == 0 else 1, dropRate, ConvClass)) # global average pooling and classifier self.bn1 = nn.BatchNorm2d(nChannels[3]) self.relu = nn.ReLU(inplace=True) self.fc = nn.Linear(nChannels[3], num_classes) self.nChannels = nChannels[3] # normal is better than uniform initialisation # this should really be in `self.reset_parameters` for m in self.modules(): if isinstance(m, nn.Conv2d): n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels try: m.weight.data.normal_(0, math.sqrt(2. / n)) except AttributeError: pass elif isinstance(m, nn.BatchNorm2d): m.weight.data.fill_(1) m.bias.data.zero_() elif isinstance(m, nn.Linear): m.bias.data.zero_() def compression_ratio(self): return compression(self.__class__, self.kwargs) def grouped_parameters(self, weight_decay): # iterate over parameters and separate those in ACDC layers return group_lowrank(self.named_parameters(), weight_decay, self.compression_ratio()) def forward(self, x): activation_maps = [] out = self.conv1(x) #activations.append(out) attention = lambda x: F.normalize(x.pow(2).mean(1).view(x.size(0), -1)) for sub_block in self.block1: out = sub_block(out) activation_maps.append(attention(out)) for sub_block in self.block2: out = sub_block(out) activation_maps.append(attention(out)) for sub_block in self.block3: out = sub_block(out) activation_maps.append(attention(out)) out = self.relu(self.bn1(out)) out = F.avg_pool2d(out, 8) out = out.view(-1, self.nChannels) return self.fc(out), activation_maps class ResNet(nn.Module): def __init__(self, ConvClass, layers, block=Bottleneck, widen=1, num_classes=1000, expansion=4): self.kwargs = dict(layers=layers, expansion=expansion, ConvClass=ConvClass, widen=widen, num_classes=num_classes, block=block) self.expansion = expansion super(ResNet, self).__init__() self.Conv = ConvClass self.inplanes = 64 self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False) self.bn1 = nn.BatchNorm2d(64) self.relu = nn.ReLU(inplace=True) self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) self.layer1 = self._make_layer(block, 64*widen, layers[0]) self.layer2 = self._make_layer(block, 128*widen, layers[1], stride=2) self.layer3 = self._make_layer(block, 256*widen, layers[2], stride=2) self.layer4 = self._make_layer(block, 512*widen, layers[3], stride=2) self.avgpool = nn.AvgPool2d((7, 7), 1, 0) self.fc = nn.Linear(512*widen * self.expansion, num_classes) #self.fc = self.Conv(512*widen * self.expansion, num_classes, kernel_size=1, bias=True) for m in self.modules(): if isinstance(m, nn.Conv2d): if hasattr(m, 'weight'): w = m.weight nn.init.kaiming_normal_(w, mode='fan_out', nonlinearity='relu') elif isinstance(m, nn.BatchNorm2d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) def _make_layer(self, block, planes, blocks, stride=1): downsample = None if stride != 1 or self.inplanes != planes * self.expansion: downsample = nn.Sequential(OrderedDict([ ('conv', self.Conv(self.inplanes, planes * self.expansion, kernel_size=1, stride=stride, padding=0, bias=False)), ('bn', nn.BatchNorm2d(planes * self.expansion)) ])) layers = [] layers.append(block(self.inplanes, planes, self.Conv, stride, downsample, self.expansion)) self.inplanes = planes * self.expansion for _ in range(1, blocks): layers.append(block(self.inplanes, planes, self.Conv, expansion=self.expansion)) return nn.Sequential(*layers) def compression_ratio(self): return compression(self.__class__, self.kwargs) def grouped_parameters(self, weight_decay): # iterate over parameters and separate those in other layer types return group_lowrank(self.named_parameters(), weight_decay, self.compression_ratio()) def forward(self, x): x = self.conv1(x) x = self.bn1(x) x = self.relu(x) x = self.maxpool(x) attention_maps = [] attention = lambda x: F.normalize(x.pow(2).mean(1).view(x.size(0), -1)) if self.train: x = self.layer1(x) #x = checkpoint(self.layer1, x) #x = checkpoint_sequential(self.layer1, 1, x) else: x = self.layer1(x) attention_maps.append(attention(x)) if self.train: x = self.layer2(x) #x = checkpoint(self.layer2, x) #x = checkpoint_sequential(self.layer2, 1, x) else: x = self.layer2(x) attention_maps.append(attention(x)) if self.train: x = self.layer3(x) #x = checkpoint(self.layer3, x) #x = checkpoint_sequential(self.layer3, 1, x) else: x = self.layer3(x) attention_maps.append(attention(x)) if self.train: x = self.layer4(x) #x = checkpoint(self.layer4, x) #x = checkpoint_sequential(self.layer4, 1, x) else: x = self.layer4(x) attention_maps.append(attention(x)) x = self.avgpool(x) x = x.view(x.size(0), -1) x = self.fc(x) #x = x.view(x.size(0), -1) return x, attention_maps def WRN_50_2(Conv, Block=None): assert Block is None return ResNet(Conv, [3, 4, 6, 3], widen=2, expansion=2) def test(): net = WideResNet(28, 10, conv_function("Shuffle_7"), BasicBlock) params = net.grouped_parameters(5e-4) params = [d['params'] for d in params] print("Low-rank: ", sum([p.numel() for p in params[0]])) print("Full-rank: ", sum([p.numel() for p in params[1]])) print("FC: ", sum([p.numel() for p in net.fc.parameters()])) net = WRN_50_2(conv_function("Shuffle_7")) params = net.grouped_parameters(5e-4) params = [d['params'] for d in params] print("Low-rank: ", sum([p.numel() for p in params[0]])) print("Full-rank: ", sum([p.numel() for p in params[1]])) print("FC: ", sum([p.numel() for p in net.fc.parameters()])) x = torch.randn(1,3,224,224).float() y, _ = net(Variable(x)) print(y.size()) if __name__ == '__main__': test()
10,717
36.872792
106
py
deficient-efficient
deficient-efficient-master/models/__init__.py
from .wide_resnet import * #from .resnet import *
50
16
26
py
deficient-efficient
deficient-efficient-master/models/decomposed.py
# Substitute layer explicitly decomposing the tensors in convolutional layers # All implemented using tntorch: https://github.com/rballester/tntorch # All also use a separable design: the low-rank approximate pointwise # convolution is preceded by a grouped convolution import math import torch import torch.nn as nn import torch.nn.functional as F import tntorch as tn torch.set_default_dtype(torch.float32) def dimensionize(t, d, rank_scale): """Take a tensor, t, and reshape so that it has d dimensions, of roughly equal size.""" # if not, we have to do some work N = t.numel() # do d-th root with log equal = math.exp((1./d)*math.log(N)) # if this is an integer, our work here is done if abs(round(equal) - equal) < 1e-6: dims = [int(round(equal))]*d # if the tensor already has d dimensions elif t.ndimension() == d: dims = list(t.size()) # oh no, then we want to build up a list of dimensions it *does* divide by else: dims = [] for i in range(d-1): divisor = closest_divisor(N, int(round(equal))) dims.append(divisor) N = N//divisor dims.append(N) # rank between dimensions must be less than ranks = {} ranks['ranks_tt'] = [max(1,int(round(rank_scale*min(b,a)))) for b,a in zip(dims, dims[1:])] ranks['ranks_tucker'] = [max(1,int(round(rank_scale*d))) for d in dims] ranks['ranks_cp'] = max(1,int(round(rank_scale*min(dims)))) return t.view(*dims), ranks def closest_divisor(N, d): if N < d: return N while N%d != 0: d += 1 return d class TnTorchConv2d(nn.Conv2d): def __init__(self, in_channels, out_channels, kernel_size, rank_scale, TnConstructor, stride=1, padding=0, dilation=1, groups=1, bias=True): self.TnConstructor = TnConstructor assert groups == 1 if kernel_size == 1: super(TnTorchConv2d, self).__init__(in_channels, out_channels, 1, stride=stride, padding=padding, dilation=dilation, bias=bias) elif kernel_size > 1: super(TnTorchConv2d, self).__init__(in_channels, out_channels, 1, bias=bias) self.grouped = nn.Conv2d(in_channels, in_channels, kernel_size, stride=stride, padding=padding, dilation=dilation, groups=in_channels, bias=False) self.rank_scale = rank_scale self.tn_weight = self.TnConstructor(self.weight.data.squeeze(), rank_scale=self.rank_scale) # store the correct size for this weight self.weight_size = self.weight.size() # check the fit to the weight initialisation self.store_metrics(self.weight) # delete the original weight del self.weight # then register the cores of the Tensor Train as parameters self.register_tnparams(self.tn_weight.cores, self.tn_weight.Us) def register_tnparams(self, cores, Us): cores = [] if all([c is None for c in cores]) else cores Us = [] if all([u is None for u in Us]) else Us # tensor train or cp cores for i,core in enumerate(cores): core_name = 'weight_core_%i'%i if hasattr(self, core_name): delattr(self, core_name) core.requires_grad = True self.register_parameter(core_name, nn.Parameter(core)) # replace Parameter in tn.Tensor object self.tn_weight.cores[i] = getattr(self, core_name) for i, u in enumerate(Us): u_name = 'weight_u_%i'%i if hasattr(self, u_name): delattr(self, u_name) u.requires_grad = True self.register_parameter(u_name, nn.Parameter(u)) # replace Parameter in tn.Tensor object self.tn_weight.Us[i] = getattr(self, u_name) def conv_weight(self): weight = self.tn_weight.torch() n,d,_,_ = self.weight_size return weight.view(n,d,1,1) def reset_parameters(self): if hasattr(self, 'tn_weight'): # full rank weight tensor weight = self.conv_weight() else: weight = self.weight.data n = self.in_channels for k in self.kernel_size: n *= k stdv = 1. / math.sqrt(n) weight.data.uniform_(-stdv, stdv) if hasattr(self, 'tn_weight'): self.tn_weight = self.TnConstructor(weight.data.squeeze(), rank_scale=self.rank_scale) # update cores self.register_tnparams(self.tn_weight.cores, self.tn_weight.Us) else: self.weight.data = weight if self.bias is not None: self.bias.data.uniform_(-stdv, stdv) def forward(self, x): if hasattr(self, 'grouped'): out = self.grouped(x) else: out = x weight = self.conv_weight() return F.conv2d(out, weight, self.bias, self.stride, self.padding, self.dilation, self.groups) def store_metrics(self, full): t = self.tn_weight full = full.view(t.torch().size()) self.compression = (full.numel(), t.numel(), full.numel() / t.numel()) self.relative_error = tn.relative_error(full, t) self.rmse = tn.rmse(full, t) self.r_squared = tn.r_squared(full, t) def extra_repr(self): extra = [] extra.append(self.tn_weight.__repr__()) extra.append('Compression ratio: {}/{} = {:g}'.format(*self.compression)) extra.append('Relative error: %f'%self.relative_error) extra.append('RMSE: %f'%self.rmse) extra.append('R^2: %f'%self.r_squared) return "\n".join(extra) class TensorTrain(TnTorchConv2d): def __init__(self, in_channels, out_channels, kernel_size, rank_scale, dimensions, stride=1, padding=0, dilation=1, groups=1, bias=True): def TT(tensor, rank_scale): tensor, ranks = dimensionize(tensor, dimensions, rank_scale) return tn.Tensor(tensor, ranks_tt=ranks['ranks_tt']) super(TensorTrain, self).__init__(in_channels, out_channels, kernel_size, rank_scale, TT, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias) class Tucker(TnTorchConv2d): def __init__(self, in_channels, out_channels, kernel_size, rank_scale, dimensions, stride=1, padding=0, dilation=1, groups=1, bias=True): def tucker(tensor, rank_scale): tensor, ranks = dimensionize(tensor, dimensions, rank_scale) return tn.Tensor(tensor, ranks_tucker=ranks['ranks_tucker']) super(Tucker, self).__init__(in_channels, out_channels, kernel_size, rank_scale, tucker, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias) class CP(TnTorchConv2d): def __init__(self, in_channels, out_channels, kernel_size, rank_scale, dimensions, stride=1, padding=0, dilation=1, groups=1, bias=True): def cp(tensor, rank_scale): tensor, ranks = dimensionize(tensor, dimensions, rank_scale) return tn.Tensor(tensor, ranks_cp=ranks['ranks_cp']) super(CP, self).__init__(in_channels, out_channels, kernel_size, rank_scale, cp, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias) if __name__ == '__main__': for ConvClass in [TensorTrain, Tucker, CP]: X = torch.randn(5,16,32,32) tnlayer = ConvClass(16,16,3,0.5,2,bias=False) tnlayer.reset_parameters() print(tnlayer) tnlayer.zero_grad() y = tnlayer(X) l = y.sum() l.backward() for n,p in tnlayer.named_parameters(): assert p.requires_grad, n assert torch.abs(tnlayer.weight_core_0.grad - tnlayer.tn_weight.cores[0].grad).max() < 1e-5 # same output on the GPU tnlayer, X = tnlayer.cuda(), X.cuda() assert torch.abs(tnlayer(X).cpu() - y).max() < 1e-5 for ConvClass in [TensorTrain, Tucker, CP]: X = torch.randn(5,16,32,32) tnlayer = ConvClass(16,16,3,0.5,4,bias=False) tnlayer.reset_parameters() print(tnlayer)
8,252
39.856436
99
py
deficient-efficient
deficient-efficient-master/models/MobileNetV2.py
import torch import torch.nn as nn import math # wildcard import for legacy reasons if __name__ == '__main__': import sys sys.path.append("..") from models.blocks import * from models.wide_resnet import compression, group_lowrank # only used in the first convolution, which we do not substitute by convention def conv_bn(inp, oup, stride): return nn.Sequential( nn.Conv2d(inp, oup, 3, stride, 1, bias=False), nn.BatchNorm2d(oup), nn.ReLU6(inplace=True) ) # only used for final fully connectec layers def conv_1x1_bn(inp, oup, ConvClass): return nn.Sequential( ConvClass(inp, oup, 1, 1, 0, bias=False), nn.BatchNorm2d(oup), nn.ReLU6(inplace=True) ) class InvertedResidual(nn.Module): def __init__(self, inp, oup, stride, expand_ratio, ConvClass): super(InvertedResidual, self).__init__() self.stride = stride self.Conv = ConvClass assert stride in [1, 2] hidden_dim = round(inp * expand_ratio) self.use_res_connect = self.stride == 1 and inp == oup if expand_ratio == 1: self.conv = nn.Sequential( # dw nn.Conv2d(hidden_dim, hidden_dim, 3, stride, 1, groups=hidden_dim, bias=False), nn.BatchNorm2d(hidden_dim), nn.ReLU6(inplace=True), # pw-linear self.Conv(hidden_dim, oup, 1, 1, 0, bias=False), nn.BatchNorm2d(oup), ) else: self.conv = nn.Sequential( # pw self.Conv(inp, hidden_dim, 1, 1, 0, bias=False), nn.BatchNorm2d(hidden_dim), nn.ReLU6(inplace=True), # dw nn.Conv2d(hidden_dim, hidden_dim, 3, stride, 1, groups=hidden_dim, bias=False), nn.BatchNorm2d(hidden_dim), nn.ReLU6(inplace=True), # pw-linear self.Conv(hidden_dim, oup, 1, 1, 0, bias=False), nn.BatchNorm2d(oup), ) def forward(self, x): if self.use_res_connect: return x + self.conv(x) else: return self.conv(x) class MobileNetV2(nn.Module): def __init__(self, ConvClass, block=None, n_class=1000, input_size=224, width_mult=1.): super(MobileNetV2, self).__init__() self.kwargs = dict(ConvClass=ConvClass, block=block, n_class=n_class, input_size=input_size, width_mult=width_mult) block = InvertedResidual self.Conv = ConvClass input_channel = 32 last_channel = 1280 interverted_residual_setting = [ # t, c, n, s [1, 16, 1, 1], [6, 24, 2, 2], [6, 32, 3, 2], [6, 64, 4, 2], [6, 96, 3, 1], [6, 160, 3, 2], [6, 320, 1, 1], ] # building first layer assert input_size % 32 == 0 input_channel = int(input_channel * width_mult) self.last_channel = int(last_channel * width_mult) if width_mult > 1.0 else last_channel self.features = [conv_bn(3, input_channel, 2)] # building inverted residual blocks for t, c, n, s in interverted_residual_setting: output_channel = int(c * width_mult) for i in range(n): if i == 0: self.features.append(block(input_channel, output_channel, s, expand_ratio=t, ConvClass=self.Conv)) else: self.features.append(block(input_channel, output_channel, 1, expand_ratio=t, ConvClass=self.Conv)) input_channel = output_channel # building last several layers self.features.append(conv_1x1_bn(input_channel, self.last_channel, self.Conv)) # make it nn.Sequential self.features = nn.Sequential(*self.features) # building classifier self.classifier_conv = self.Conv(self.last_channel, n_class, 1, 1, 0, bias=True) #self.classifier = \ #nn.Dropout(0.2), remove dropout for training according to github # nn.(self.last_channel, n_class), #) self._initialize_weights() def classifier(self, x): n, c = x.size() x = self.classifier_conv(x.view(n,c,1,1)) n, c, _, _ = x.size() return x.view(n,c) def forward(self, x): #y_orig = self.features(x) attention_maps = [] attention = lambda x: F.normalize(x.pow(2).mean(1).view(x.size(0), -1)) y = x for block in self.features: y = block(y) if isinstance(block, InvertedResidual): if block.stride > 1: attention_maps.append(attention(y)) #error = torch.abs(y-y_orig).max() #assert error < 1e-2, f"Error {error} above 0.01" x = y x = x.mean(3).mean(2) x = self.classifier(x) return x, attention_maps def compression_ratio(self): return compression(self.__class__, self.kwargs) def grouped_parameters(self, weight_decay): return group_lowrank(self.named_parameters(), weight_decay, self.compression_ratio()) def _initialize_weights(self): for m in self.modules(): if isinstance(m, nn.Conv2d): n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels if hasattr(m, 'weight'): m.weight.data.normal_(0, math.sqrt(2. / n)) if m.bias is not None: m.bias.data.zero_() elif isinstance(m, nn.BatchNorm2d): m.weight.data.fill_(1) m.bias.data.zero_() elif isinstance(m, nn.Linear): n = m.weight.size(1) m.weight.data.normal_(0, 0.01) m.bias.data.zero_() def save_reference(): net = MobileNetV2() net.eval() x = torch.randn(1,3,224,224).float() y = net(x) print(y.size()) torch.save(x, "reference_input_mobilenet.torch") torch.save(y, "reference_output_mobilenet.torch") torch.save(net.state_dict(), "reference_state_mobilenet.torch") def match_keys(net, state): nstate = net.state_dict() old_keys = [k for k in state] for i, k in enumerate(nstate): p = state[old_keys[i]] if i == (len(old_keys)-2): n,m = p.size() nstate[k] = p.view(n,m,1,1) else: nstate[k] = p return nstate def test(): import os net = MobileNetV2(Conv) if os.path.exists("reference_state_mobilenet.torch"): state = torch.load("reference_state_mobilenet.torch") state = match_keys(net, state) net.load_state_dict(state) net.eval() x = torch.load("reference_input_mobilenet.torch") else: x = torch.randn(1,3,224,224).float() y, _ = net(Variable(x)) print(y.size()) # check if these match the test weights if os.path.exists("reference_output_mobilenet.torch"): ref_output = torch.load("reference_output_mobilenet.torch") error = torch.abs(ref_output - y).max() print(f"Error: {error}, Max logit: {y.max()}/{ref_output.max()}, Min logit: {y.min()}/{ref_output.min()}") state = { 'net': net.state_dict(), 'epoch': 150, 'args': None, 'width': None, 'depth': None, 'conv': 'Conv', 'blocktype': None, 'module': None, 'train_losses': None, 'train_errors': None, 'val_losses': None, 'val_errors': [28.2], } torch.save(state, "mobilenetv2.tonylins.t7") def test_compression(): net = MobileNetV2(Conv) #net = MobileNetV2(conv_function('Hashed_0.1')) nparams = lambda x: sum([p.numel() for p in x.parameters()]) for block in net.features: print(nparams(block)) for x in block: print(x) print(nparams(x)) #CompressedConv = conv_function("Hashed_0.1") for conv in ['Shuffle_%i'%i for i in [4,8,16,32]]+['Hashed_0.01']: print(conv) CompressedConv = conv_function(conv) net = MobileNetV2(CompressedConv) print(" ", net.compression_ratio()) if __name__ == '__main__': test() #test_compression()
8,316
33.086066
118
py
deficient-efficient
deficient-efficient-master/models/blocks.py
# blocks and convolution definitions import math import torch import torch.nn as nn import torch.nn.functional as F from torch.autograd import Variable from torch.utils.checkpoint import checkpoint, checkpoint_sequential if __name__ == 'blocks' or __name__ == '__main__': from hashed import HashedConv2d, HalfHashedSeparable, HashedSeparable from decomposed import TensorTrain, Tucker, CP else: from .hashed import HashedConv2d, HalfHashedSeparable, HashedSeparable from .decomposed import TensorTrain, Tucker, CP def HashedDecimate(in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=False): # Hashed Conv2d using 1/10 the original parameters original_params = out_channels*in_channels*kernel_size*kernel_size // groups budget = original_params//10 return HashedConv2d(in_channels, out_channels, kernel_size, budget, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias) def SepHashedDecimate(in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=False): # Hashed Conv2d using 1/10 the original parameters assert groups == 1 original_params = out_channels*in_channels*kernel_size*kernel_size budget = original_params//10 conv = HalfHashedSeparable(in_channels, out_channels, kernel_size, budget, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias) n_params = sum([p.numel() for p in conv.parameters()]) budget = budget + conv.hashed.bias.numel() assert n_params <= budget, f"{n_params} > {budget}" return conv from pytorch_acdc.layers import FastStackedConvACDC def ACDC(in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=False): return FastStackedConvACDC(in_channels, out_channels, kernel_size, 12, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias) def OriginalACDC(in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=False): return FastStackedConvACDC(in_channels, out_channels, kernel_size, 12, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias, original=True) class GenericLowRank(nn.Module): """A generic low rank layer implemented with a linear bottleneck, using two Conv2ds in sequence. Preceded by a depthwise grouped convolution in keeping with the other low-rank layers here.""" def __init__(self, in_channels, out_channels, kernel_size, rank, stride=1, padding=0, dilation=1, groups=1, bias=False): assert groups == 1 super(GenericLowRank, self).__init__() if kernel_size > 1: self.grouped = nn.Conv2d(in_channels, in_channels, kernel_size, stride=stride, padding=padding, dilation=dilation, groups=in_channels, bias=False) self.lowrank_contract = nn.Conv2d(in_channels, rank, 1, bias=False) self.lowrank_expand = nn.Conv2d(rank, out_channels, 1, bias=bias) else: self.grouped = None self.lowrank_contract = nn.Conv2d(in_channels, rank, 1, stride=stride, dilation=dilation, bias=False) self.lowrank_expand = nn.Conv2d(rank, out_channels, 1, bias=bias) def forward(self, x): if self.grouped is not None: x = self.grouped(x) x = self.lowrank_contract(x) return self.lowrank_expand(x) class LowRank(nn.Module): """A generic low rank layer implemented with a linear bottleneck, using two Conv2ds in sequence. Preceded by a depthwise grouped convolution in keeping with the other low-rank layers here.""" def __init__(self, in_channels, out_channels, kernel_size, rank, stride=1, padding=0, dilation=1, groups=1, bias=False): assert groups == 1 assert out_channels%in_channels == 0 self.upsample = out_channels//in_channels super(LowRank, self).__init__() if kernel_size > 1: self.grouped = nn.Conv2d(in_channels, in_channels, kernel_size, stride=stride, padding=padding, dilation=dilation, groups=in_channels, bias=False) self.lowrank = nn.Conv2d(self.upsample*in_channels, rank, 1, bias=bias) else: self.grouped = None self.lowrank = nn.Conv2d(self.upsample*in_channels, rank, 1, stride=stride, dilation=dilation, bias=bias) def forward(self, x): if self.grouped is not None: x = self.grouped(x) if self.upsample > 1: x = x.repeat(1,self.upsample,1,1) x = F.conv2d(x, self.lowrank.weight, None, self.lowrank.stride, self.lowrank.padding, self.lowrank.dilation, self.lowrank.groups) return F.conv2d(x, self.lowrank.weight.permute(1,0,2,3), self.lowrank.bias) # from: https://github.com/kuangliu/pytorch-cifar/blob/master/models/shufflenet.py#L10-L19 class ShuffleBlock(nn.Module): def __init__(self, groups): super(ShuffleBlock, self).__init__() self.groups = groups def forward(self, x): '''Channel shuffle: [N,C,H,W] -> [N,g,C/g,H,W] -> [N,C/g,g,H,w] -> [N,C,H,W]''' N,C,H,W = x.size() g = self.groups return x.view(N,g,C//g,H,W).permute(0,2,1,3,4).contiguous().view(N,C,H,W) class LinearShuffleNet(nn.Module): """Linear version of the ShuffleNet block, minus the shortcut connection, as we assume relevant shortcuts already exist in the network having a substitution. When linear, this can be viewed as a low-rank tensor decomposition.""" def __init__(self, in_channels, out_channels, kernel_size, shuffle_groups, stride=1, padding=0, dilation=1, groups=1, bias=False): assert groups == 1 super(LinearShuffleNet, self).__init__() # why 4? https://github.com/jaxony/ShuffleNet/blob/master/model.py#L67 bottleneck_channels = out_channels // 4 self.shuffle_gconv1 = nn.Conv2d(in_channels, bottleneck_channels, 1, groups=shuffle_groups, bias=False) self.shuffle = ShuffleBlock(shuffle_groups) self.shuffle_dwconv = nn.Conv2d(bottleneck_channels, bottleneck_channels, kernel_size, stride=stride, padding=padding, dilation=dilation, groups=bottleneck_channels, bias=False) self.shuffle_gconv2 = nn.Conv2d(bottleneck_channels, out_channels, 1, groups=shuffle_groups, bias=bias) def forward(self, x): x = self.shuffle_gconv1(x) x = self.shuffle(x) x = self.shuffle_dwconv(x) return self.shuffle_gconv2(x) def cant_be_shuffled(shuffle_groups, in_channels, out_channels): # utility function, true if we can't instance shufflenet block using this divides_in = in_channels%shuffle_groups == 0 divides_out = out_channels%shuffle_groups == 0 divides_bottleneck = (out_channels//4)%shuffle_groups == 0 return not (divides_in and divides_out and divides_bottleneck) class DepthwiseSep(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True): super(DepthwiseSep, self).__init__() assert groups == 1 if kernel_size > 1: self.grouped = nn.Conv2d(in_channels, in_channels, kernel_size, stride=stride, padding=padding, dilation=dilation, groups=in_channels, bias=False) self.pointwise = nn.Conv2d(in_channels, out_channels, 1, bias=bias) else: self.pointwise = nn.Conv2d(in_channels, out_channels, 1, stride=stride, padding=padding, dilation=dilation, bias=bias) def forward(self, x): if hasattr(self, 'grouped'): out = self.grouped(x) else: out = x return self.pointwise(out) class Conv(nn.Module): def __init__(self, in_planes, out_planes, kernel_size, stride=1, padding=1, dilation=1, bias=False): super(Conv, self).__init__() # Dumb normal conv incorporated into a class self.conv = nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=padding, bias=bias, dilation=dilation) def forward(self, x): return self.conv(x) def conv_function(convtype): # if convtype contains an underscore, it must have a hyperparam in it if "_" in convtype: convtype, hyperparam = convtype.split("_") if convtype == 'ACDC': # then hyperparam controls how many layers in each conv n_layers = int(round(float(hyperparam))) def conv(in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=False): return FastStackedConvACDC(in_channels, out_channels, kernel_size, n_layers, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias) elif convtype == 'Hashed': # then hyperparam controls relative budget for each layer budget_scale = float(hyperparam) def conv(in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=False): # Hashed Conv2d using 1/10 the original parameters original_params = out_channels*in_channels*kernel_size*kernel_size // groups budget = int(original_params*budget_scale) return HashedConv2d(in_channels, out_channels, kernel_size, budget, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias) elif convtype == 'SepHashed': # then hyperparam controls relative budget for each layer budget_scale = float(hyperparam) def conv(in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=False): original_params = out_channels*in_channels // groups budget = int(original_params*budget_scale) if kernel_size > 1: # budget for a grouped convolution budget += in_channels*kernel_size*kernel_size return HalfHashedSeparable(in_channels, out_channels, kernel_size, budget, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias) elif convtype == 'Generic': rank_scale = float(hyperparam) def conv(in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=False): full_rank = max(in_channels,out_channels) rank = int(rank_scale*full_rank) return GenericLowRank(in_channels, out_channels, kernel_size, rank, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias) elif convtype == 'LR': rank_scale = float(hyperparam) def conv(in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=False): full_rank = max(in_channels,out_channels) rank = int(rank_scale*full_rank) return LowRank(in_channels, out_channels, kernel_size, rank, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias) elif convtype == 'TensorTrain': rank_scale = float(hyperparam) def conv(in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=False): return TensorTrain(in_channels, out_channels, kernel_size, rank_scale, 3, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias) elif convtype == 'Tucker': rank_scale = float(hyperparam) def conv(in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=False): return Tucker(in_channels, out_channels, kernel_size, rank_scale, 3, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias) elif convtype == 'CP': assert False, "Deprecated" rank_scale = float(hyperparam) def conv(in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=False): return CP(in_channels, out_channels, kernel_size, rank_scale, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias) elif convtype == 'Shuffle': def conv(in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=False): shuffle_groups = int(hyperparam) while cant_be_shuffled(shuffle_groups, in_channels, out_channels): shuffle_groups += -1 return LinearShuffleNet(in_channels, out_channels, kernel_size, shuffle_groups, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias) else: if convtype == 'Conv': conv = Conv elif convtype =='ACDC': conv = ACDC elif convtype =='OriginalACDC': conv = OriginalACDC elif convtype == 'HashedDecimate': conv = HashedDecimate elif convtype == 'SepHashedDecimate': conv = SepHashedDecimate elif convtype == 'Sep': conv = DepthwiseSep else: raise ValueError('Conv "%s" not recognised'%convtype) return conv class BasicBlock(nn.Module): def __init__(self, in_planes, out_planes, stride, dropRate=0.0, conv=Conv): super(BasicBlock, self).__init__() self.bn1 = nn.BatchNorm2d(in_planes) self.relu1 = nn.ReLU(inplace=True) self.conv1 = conv(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False) self.bn2 = nn.BatchNorm2d(out_planes) self.relu2 = nn.ReLU(inplace=True) self.conv2 = conv(out_planes, out_planes, kernel_size=3, stride=1, padding=1, bias=False) #assert self.conv2.grouped.padding[0] == 1 self.droprate = dropRate self.equalInOut = (in_planes == out_planes) self.convShortcut = (not self.equalInOut) and nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, padding=0, bias=False) or None def forward(self, x): if not self.equalInOut: x = self.relu1(self.bn1(x)) else: out = self.relu1(self.bn1(x)) out = self.relu2(self.bn2(self.conv1(out if self.equalInOut else x))) if self.droprate > 0: out = F.dropout(out, p=self.droprate, training=self.training) out = self.conv2(out) return torch.add(x if self.equalInOut else self.convShortcut(x), out) # modified from torchvision class Bottleneck(nn.Module): """Bottleneck architecture block for ResNet""" def __init__(self, inplanes, planes, ConvClass, stride=1, downsample=None, expansion=4): super(Bottleneck, self).__init__() self.expansion = expansion pointwise = lambda i,o: ConvClass(i, o, kernel_size=1, padding=0, bias=False) self.conv1 = pointwise(inplanes, planes) self.bn1 = nn.BatchNorm2d(planes) self.conv2 = ConvClass(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False) self.bn2 = nn.BatchNorm2d(planes) self.conv3 = pointwise(planes, planes * self.expansion) self.bn3 = nn.BatchNorm2d(planes * self.expansion) self.relu = nn.ReLU(inplace=True) self.downsample = downsample self.stride = stride def add_residual(self, x, out): if self.downsample is not None: residual = self.downsample(x) else: residual = x return out + residual def forward(self, x): out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) out = self.relu(out) out = self.conv3(out) out = self.bn3(out) #out = checkpoint(self.add_residual, x, out) out = self.add_residual(x, out) out = self.relu(out) return out class NetworkBlock(nn.Module): def __init__(self, nb_layers, in_planes, out_planes, block, stride, dropRate=0.0, conv = Conv): super(NetworkBlock, self).__init__() self.layer = self._make_layer(block, in_planes, out_planes, nb_layers, stride, dropRate, conv) def _make_layer(self, block, in_planes, out_planes, nb_layers, stride, dropRate, conv): layers = [] for i in range(nb_layers): layers.append(block(i == 0 and in_planes or out_planes, out_planes, i == 0 and stride or 1, dropRate, conv)) return nn.Sequential(*layers) def forward(self, x): return self.layer(x) if __name__ == '__main__': X = torch.randn(5,16,32,32) # sanity of generic low-rank layer generic = GenericLowRank(16, 32, 3, 2) for n,p in generic.named_parameters(): print(n, p.size(), p.numel()) out = generic(X) print(out.size()) low = LowRank(16, 32, 3, 2) for n, p in low.named_parameters(): print(n, p.size(), p.numel()) out = low(X) print(out.size()) assert False # check we don't initialise a grouped conv when not required layers_to_test = [LowRank(3,32,1,1), GenericLowRank(3,32,1,1), HalfHashedSeparable(3,32,1,10), TensorTrain(3,32,1,0.5,3), Tucker(3,32,1,0.5,3), CP(3,32,1,0.5,3), ACDC(3,32,1)] for layer in layers_to_test: assert getattr(layer, 'grouped', None) is None # and we *do* when it is required layers_to_test = [LowRank(3,32,3,1), GenericLowRank(3,32,3,1), HalfHashedSeparable(3,32,3,100), TensorTrain(3,32,3,0.5,3), Tucker(3,32,3,0.5,3), CP(3,32,3,0.5,3), ACDC(3,32,3)] for layer in layers_to_test: assert getattr(layer, 'grouped', None) is not None, layer # sanity of LinearShuffleNet X = torch.randn(5,16,32,32) shuffle = LinearShuffleNet(16,32,3,4) print(shuffle(X).size())
18,941
43.992874
120
py
ACRO
ACRO-main/setup.py
"""Python setup script for installing ACRO.""" from pathlib import Path from setuptools import find_packages, setup this_directory = Path(__file__).parent long_description = (this_directory / "README.md").read_text() setup( name="acro", version="0.4.2", license="MIT", maintainer="Jim Smith", maintainer_email="james.smith@uwe.ac.uk", description="ACRO: Tools for the Automatic Checking of Research Outputs", long_description=long_description, long_description_content_type="text/markdown", url="https://github.com/AI-SDC/ACRO", packages=find_packages(), package_data={"acro": ["default.yaml"]}, python_requires=">=3.10", install_requires=["lxml", "numpy", "openpyxl", "pandas", "PyYAML", "statsmodels"], classifiers=[ "Development Status :: 3 - Alpha", "Intended Audience :: Developers", "Intended Audience :: Science/Research", "License :: OSI Approved :: MIT License", "Natural Language :: English", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Topic :: Scientific/Engineering", "Topic :: Scientific/Engineering :: Information Analysis", "Operating System :: OS Independent", ], keywords=[ "data-privacy", "data-protection", "privacy", "privacy-tools", "statistical-disclosure-control", ], )
1,427
31.454545
86
py
ACRO
ACRO-main/test/stata.py
#!/usr/bin/env python """ACRO Stata Tests.""" # ACRO Tests import os import pandas as pd from acro import ACRO, add_constant # Instantiate ACRO acro = ACRO() # Load test data path = os.path.join("../data", "test_data.dta") df = pd.read_stata(path) df.head() # Pandas crosstab table = pd.crosstab(df.year, df.grant_type) # ACRO crosstab safe_table = acro.crosstab(df.year, df.grant_type) # ACRO crosstab with aggregation function safe_table = acro.crosstab(df.year, df.grant_type, values=df.inc_grants, aggfunc="mean") # ACRO crosstab with negative values negative = df.inc_grants.copy() negative[0:10] = -10 safe_table = acro.crosstab(df.year, df.grant_type, values=negative, aggfunc="mean") # ACRO pivot_table table = acro.pivot_table( df, index=["grant_type"], values=["inc_grants"], aggfunc=["mean", "std"] ) # ACRO pivot_table with negative values df.loc[0:10, "inc_grants"] = -10 table = acro.pivot_table( df, index=["grant_type"], values=["inc_grants"], aggfunc=["mean", "std"] ) # ACRO OLS new_df = df[["inc_activity", "inc_grants", "inc_donations", "total_costs"]] new_df = new_df.dropna() y = new_df["inc_activity"] x = new_df[["inc_grants", "inc_donations", "total_costs"]] x = add_constant(x) results = acro.ols(y, x) results.summary() # ACRO OLSR results = acro.olsr( formula="inc_activity ~ inc_grants + inc_donations + total_costs", data=new_df ) results.summary() # ACRO Probit new_df = df[["survivor", "inc_activity", "inc_grants", "inc_donations", "total_costs"]] new_df = new_df.dropna() y = new_df["survivor"].astype("category").cat.codes # numeric y.name = "survivor" x = new_df[["inc_activity", "inc_grants", "inc_donations", "total_costs"]] x = add_constant(x) results = acro.probit(y, x) results.summary() # ACRO Logit results = acro.logit(y, x) results.summary() # List current ACRO outputs acro.print_outputs() # Remove some ACRO outputs before finalising acro.remove_output("output_1") acro.remove_output("output_4") # Finalise ACRO output = acro.finalise("test_results.xlsx")
2,056
18.778846
88
py
ACRO
ACRO-main/test/test_initial.py
"""This module contains unit tests.""" import json import os import numpy as np import pandas as pd import pytest from acro import ACRO, add_constant, record, utils from acro.record import Records, load_records # pylint: disable=redefined-outer-name PATH: str = "RES_PYTEST" @pytest.fixture def data() -> pd.DataFrame: """Load test data.""" path = os.path.join("data", "test_data.dta") data = pd.read_stata(path) return data @pytest.fixture def acro() -> ACRO: """Initialise ACRO.""" return ACRO(suppress=True) def test_crosstab_without_suppression(data): """Crosstab threshold without automatic suppression.""" acro = ACRO(suppress=False) _ = acro.crosstab(data.year, data.grant_type) output = acro.results.get_index(0) correct_summary: str = "fail; threshold: 6 cells may need suppressing; " assert output.summary == correct_summary assert 48 == output.output[0]["R/G"].sum() def test_crosstab_multiple_aggregate_function(data, acro): """Crosstab with multiple agg funcs.""" acro = ACRO(suppress=False) _ = acro.crosstab( data.year, data.grant_type, values=data.inc_grants, aggfunc=["mean", "std"] ) output = acro.results.get_index(0) correct_summary: str = ( "fail; threshold: 12 cells may need suppressing;" " p-ratio: 2 cells may need suppressing; " "nk-rule: 2 cells may need suppressing; " ) assert ( output.summary == correct_summary ), f"\n{output.summary}\n should be \n{correct_summary}\n" print(f"{output.output[0]['mean'][ 'R/G'].sum()}") correctval = 97383496.0 errmsg = f"{output.output[0]['mean']['R/G'].sum()} should be {correctval}" assert correctval == output.output[0]["mean"]["R/G"].sum(), errmsg def test_crosstab_threshold(data, acro): """Crosstab threshold test.""" _ = acro.crosstab(data.year, data.grant_type) output = acro.results.get_index(0) total_nan: int = output.output[0]["R/G"].isnull().sum() assert total_nan == 6 positions = output.sdc["cells"]["threshold"] for pos in positions: row, col = pos assert np.isnan(output.output[0].iloc[row, col]) acro.add_exception("output_0", "Let me have it") results: Records = acro.finalise() correct_summary: str = "fail; threshold: 6 cells suppressed; " output = results.get_index(0) assert output.summary == correct_summary def test_crosstab_multiple(data, acro): """Crosstab multiple rule test.""" _ = acro.crosstab( data.year, data.grant_type, values=data.inc_grants, aggfunc="mean" ) acro.add_exception("output_0", "Let me have it") results: Records = acro.finalise() correct_summary: str = ( "fail; threshold: 6 cells suppressed; p-ratio: 1 cells suppressed; " "nk-rule: 1 cells suppressed; " ) output = results.get_index(0) assert output.summary == correct_summary def test_negatives(data, acro): """Pivot table and Crosstab with negative values.""" data.loc[0:10, "inc_grants"] = -10 _ = acro.crosstab( data.year, data.grant_type, values=data.inc_grants, aggfunc="mean" ) _ = acro.pivot_table( data, index=["grant_type"], values=["inc_grants"], aggfunc=["mean", "std"] ) acro.add_exception("output_0", "Let me have it") acro.add_exception("output_1", "I want this") results: Records = acro.finalise() correct_summary: str = "review; negative values found" output_0 = results.get_index(0) output_1 = results.get_index(1) assert output_0.summary == correct_summary assert output_1.summary == correct_summary def test_pivot_table_without_suppression(data): """Pivot table without automatic suppression.""" acro = ACRO(suppress=False) _ = acro.pivot_table( data, index=["grant_type"], values=["inc_grants"], aggfunc=["mean", "std"] ) output_0 = acro.results.get_index(0) assert 36293992.0 == output_0.output[0]["mean"]["inc_grants"].sum() assert "pass" == output_0.summary def test_pivot_table_pass(data, acro): """Pivot table pass test.""" _ = acro.pivot_table( data, index=["grant_type"], values=["inc_grants"], aggfunc=["mean", "std"] ) results: Records = acro.finalise() correct_summary: str = "pass" output_0 = results.get_index(0) assert output_0.summary == correct_summary def test_pivot_table_cols(data, acro): """Pivot table with columns test.""" _ = acro.pivot_table( data, index=["grant_type"], columns=["year"], values=["inc_grants"], aggfunc=["mean", "std"], ) acro.add_exception("output_0", "Let me have it") results: Records = acro.finalise() correct_summary: str = ( "fail; threshold: 14 cells suppressed; " "p-ratio: 2 cells suppressed; nk-rule: 2 cells suppressed; " ) output_0 = results.get_index(0) assert output_0.summary == correct_summary def test_ols(data, acro): """Ordinary Least Squares test.""" new_df = data[["inc_activity", "inc_grants", "inc_donations", "total_costs"]] new_df = new_df.dropna() # OLS endog = new_df.inc_activity exog = new_df[["inc_grants", "inc_donations", "total_costs"]] exog = add_constant(exog) results = acro.ols(endog, exog) assert results.df_resid == 807 assert results.rsquared == pytest.approx(0.894, 0.001) # OLSR results = acro.olsr( formula="inc_activity ~ inc_grants + inc_donations + total_costs", data=new_df ) assert results.df_resid == 807 assert results.rsquared == pytest.approx(0.894, 0.001) # Finalise results = acro.finalise() correct_summary: str = "pass; dof=807.0 >= 10" output_0 = results.get_index(0) output_1 = results.get_index(1) assert output_0.summary == correct_summary assert output_1.summary == correct_summary def test_probit_logit(data, acro): """Probit and Logit tests.""" new_df = data[ ["survivor", "inc_activity", "inc_grants", "inc_donations", "total_costs"] ] new_df = new_df.dropna() endog = new_df["survivor"].astype("category").cat.codes # numeric endog.name = "survivor" exog = new_df[["inc_activity", "inc_grants", "inc_donations", "total_costs"]] exog = add_constant(exog) # Probit results = acro.probit(endog, exog) assert results.df_resid == 806 assert results.prsquared == pytest.approx(0.208, 0.01) # Logit results = acro.logit(endog, exog) assert results.df_resid == 806 assert results.prsquared == pytest.approx(0.214, 0.01) # ProbitR new_df["survivor"] = new_df["survivor"].astype("category").cat.codes results = acro.probitr( formula="survivor ~ inc_activity + inc_grants + inc_donations + total_costs", data=new_df, ) assert results.df_resid == 806 assert results.prsquared == pytest.approx(0.208, 0.01) # LogitR results = acro.logitr( formula="survivor ~ inc_activity + inc_grants + inc_donations + total_costs", data=new_df, ) assert results.df_resid == 806 assert results.prsquared == pytest.approx(0.214, 0.01) # Finalise results = acro.finalise() correct_summary: str = "pass; dof=806.0 >= 10" output_0 = results.get_index(0) output_1 = results.get_index(1) output_2 = results.get_index(2) output_3 = results.get_index(3) assert output_0.summary == correct_summary assert output_1.summary == correct_summary assert output_2.summary == correct_summary assert output_3.summary == correct_summary def test_finalise_excel(data, acro): """Finalise excel test.""" _ = acro.crosstab(data.year, data.grant_type) acro.add_exception("output_0", "Let me have it") results: Records = acro.finalise(PATH, "xlsx") output_0 = results.get_index(0) filename = os.path.normpath(f"{PATH}/results.xlsx") load_data = pd.read_excel(filename, sheet_name=output_0.uid) correct_cell: str = "_ = acro.crosstab(data.year, data.grant_type)" assert load_data.iloc[0, 0] == "Command" assert load_data.iloc[0, 1] == correct_cell def test_output_removal(data, acro, monkeypatch): """Output removal and print test.""" _ = acro.crosstab(data.year, data.grant_type) _ = acro.crosstab(data.year, data.grant_type) _ = acro.crosstab(data.year, data.grant_type) exceptions = ["I want it", "Let me have it", "Please!"] monkeypatch.setattr("builtins.input", lambda _: exceptions.pop(0)) results: Records = acro.finalise() output_0 = results.get("output_0") output_1 = results.get("output_1") # remove something that is there acro.remove_output(output_0.uid) results = acro.finalise() correct_summary: str = "fail; threshold: 6 cells suppressed; " keys = results.get_keys() assert output_0.uid not in keys assert output_1.uid in keys assert output_1.summary == correct_summary acro.print_outputs() # remove something that is not there with pytest.raises(ValueError): acro.remove_output("123") def test_load_output(): """Empty array when loading output.""" with pytest.raises(ValueError): record.load_output(PATH, []) def test_finalise_invalid(data, acro): """Invalid output format when finalising.""" _ = acro.crosstab(data.year, data.grant_type) output_0 = acro.results.get_index(0) output_0.exception = "Let me have it" with pytest.raises(ValueError): _ = acro.finalise(PATH, "123") def test_finalise_json(data, acro): """Finalise json test.""" _ = acro.crosstab(data.year, data.grant_type) acro.add_exception("output_0", "Let me have it") # write JSON result: Records = acro.finalise(PATH, "json") # load JSON loaded: Records = load_records(PATH) orig = result.get_index(0) read = loaded.get_index(0) print("*****************************") print(orig) print("*****************************") print(read) print("*****************************") # check equal assert orig.uid == read.uid assert orig.status == read.status assert orig.output_type == read.output_type assert orig.properties == read.properties assert orig.sdc == read.sdc assert orig.command == read.command assert orig.summary == read.summary assert orig.comments == read.comments assert orig.timestamp == read.timestamp assert (orig.output[0].reset_index()).equals(read.output[0]) # test reading JSON with open(os.path.normpath(f"{PATH}/results.json"), encoding="utf-8") as file: json_data = json.load(file) results: dict = json_data["results"] assert results[orig.uid]["files"][0]["name"] == f"{orig.uid}_0.csv" def test_rename_output(data, acro): """Output renaming test.""" _ = acro.crosstab(data.year, data.grant_type) _ = acro.crosstab(data.year, data.grant_type) acro.add_exception("output_0", "Let me have it") acro.add_exception("output_1", "I want this") results: Records = acro.finalise() output_0 = results.get_index(0) orig_name = output_0.uid new_name = "cross_table" acro.rename_output(orig_name, new_name) results = acro.finalise() assert output_0.uid == new_name assert orig_name not in results.get_keys() assert os.path.exists(f"outputs/{new_name}_0.csv") # rename an output that doesn't exist with pytest.raises(ValueError): acro.rename_output("123", "name") # rename an output to another that already exists with pytest.raises(ValueError): acro.rename_output("output_1", "cross_table") def test_add_comments(data, acro): """Adding comments to output test.""" _ = acro.crosstab(data.year, data.grant_type) acro.add_exception("output_0", "Let me have it") results: Records = acro.finalise() output_0 = results.get_index(0) assert output_0.comments == [] comment = "This is a cross table between year and grant_type" acro.add_comments(output_0.uid, comment) assert output_0.comments == [comment] comment_1 = "6 cells were suppressed" acro.add_comments(output_0.uid, comment_1) assert output_0.comments == [comment, comment_1] # add a comment to something that is not there with pytest.raises(ValueError): acro.add_comments("123", "comment") def test_custom_output(acro): """Adding an unsupported output to the results dictionary test.""" filename = "notebooks/XandY.jfif" file_path = os.path.normpath(filename) acro.custom_output(filename) acro.add_exception("output_0", "Let me have it") results: Records = acro.finalise(path=PATH) output_0 = results.get_index(0) assert output_0.output == [file_path] assert os.path.exists(os.path.normpath(f"{PATH}/XandY.jfif")) def test_missing(data, acro, monkeypatch): """Pivot table and Crosstab with negative values.""" utils.CHECK_MISSING_VALUES = True data.loc[0:10, "inc_grants"] = np.NaN _ = acro.crosstab( data.year, data.grant_type, values=data.inc_grants, aggfunc="mean" ) _ = acro.pivot_table( data, index=["grant_type"], values=["inc_grants"], aggfunc=["mean", "std"] ) exceptions = ["I want it", "Let me have it"] monkeypatch.setattr("builtins.input", lambda _: exceptions.pop(0)) results: Records = acro.finalise() correct_summary: str = "review; missing values found" output_0 = results.get_index(0) output_1 = results.get_index(1) assert output_0.summary == correct_summary assert output_1.summary == correct_summary assert output_0.exception == "I want it" assert output_1.exception == "Let me have it" def test_suppression_error(caplog): """Apply suppression type error test.""" table_data = {"col1": [1, 2], "col2": [3, 4]} mask_data = {"col1": [np.NaN, True], "col2": [True, True]} table = pd.DataFrame(data=table_data) masks = {"test": pd.DataFrame(data=mask_data)} utils.apply_suppression(table, masks) assert "problem mask test is not binary" in caplog.text def test_adding_exception(acro): """Adding an exception to an output that doesn't exist test.""" with pytest.raises(ValueError): acro.add_exception("output_0", "Let me have it")
14,281
34.527363
86
py
ACRO
ACRO-main/test/test_stata_interface.py
"""This module contains unit tests for the stata interface.""" import os import pandas as pd import pytest from acro import ACRO from stata.acro_stata_parser import ( apply_stata_expstmt, apply_stata_ifstmt, find_brace_contents, parse_and_run, parse_table_details, ) # pylint: disable=redefined-outer-name @pytest.fixture def acro() -> ACRO: """Initialise ACRO.""" return ACRO() @pytest.fixture def data() -> pd.DataFrame: """Load test data.""" path = os.path.join("data", "test_data.dta") data = pd.read_stata(path) return data # --- global object and dummy code to replace things in acro.ado stata_acro = "empty" # pylint:disable=invalid-name def dummy_acrohandler( data, command, varlist, exclusion, exp, weights, options ): # pylint:disable=too-many-arguments """ Provides an alternative interface that mimics the code in acro.ado Most notably the presence of a global variable called stata_acro. """ # global stata_acro acro_outstr = parse_and_run( data, command, varlist, exclusion, exp, weights, options ) return acro_outstr # --- Helper functions def test_find_brace_contents(): """Tests helper function that extracts contents 'A B C' of something specified via X(A B C) on the stata command line. """ options = "by(grant_type) contents(mean sd inc_activity) suppress nototals" res, substr = find_brace_contents("by", options) assert res assert substr == "grant_type" res, substr = find_brace_contents("contents", options) assert res assert substr == "mean sd inc_activity" res, substr = find_brace_contents("foo", options) assert not res assert substr == "foo not found" def test_apply_stata_ifstmt(data): """Tests that if statements work for selection.""" ifstring = "year!=2013" all_list = list(data["year"].unique()) smaller = apply_stata_ifstmt(ifstring, data) all_list.remove(2013) assert list(smaller["year"].unique()) == all_list ifstring2 = "year != 2013 & year <2015" all_list.remove(2015) smaller2 = apply_stata_ifstmt(ifstring2, data) assert list(smaller2["year"].unique()) == all_list def test_apply_stata_expstmt(data): """Tests that in statements work for row selection.""" length = data.shape[0] # use of f/F and l/L for first and last with specified row range exp = "f/5" smaller = apply_stata_expstmt(exp, data) assert smaller.shape[0] == 5 assert (smaller.iloc[-1].fillna(0).values == data.iloc[4].fillna(0).values).all() exp = "F/5" smaller = apply_stata_expstmt(exp, data) assert smaller.shape[0] == 5 assert (smaller.iloc[-1].fillna(0).values == data.iloc[4].fillna(0).values).all() exp = "-6/l" smaller = apply_stata_expstmt(exp, data) assert smaller.shape[0] == 5 assert ( smaller.iloc[-1].fillna(0).values == data.iloc[length - 2].fillna(0).values ).all() exp = "-6/L" smaller = apply_stata_expstmt(exp, data) assert smaller.shape[0] == 5 assert ( smaller.iloc[-1].fillna(0).values == data.iloc[length - 2].fillna(0).values ).all() # invalid range should default to end of dataframe exp = "500/450" smaller = apply_stata_expstmt(exp, data) assert smaller.shape[0] == length - 1 - 500 # -----acro management def test_stata_acro_init(): """ Tests creation of an acro object at the start of a session For stata this gets held in a variable stata_acro Which is initialsied to the string "empty" in the acro.ado file Then should be pointed at a new acro instance. """ assert isinstance(stata_acro, str) ret = dummy_acrohandler( data, command="init", varlist="", exclusion="", exp="", weights="", options="" ) assert ( ret == "acro analysis session created\n" ), f"wrong string for acro init: {ret}\n" # assert isinstance(stata_acro,ACRO),f'wrong type for stata_acro:{type(stata_acro)}' def test_stata_print_outputs(data): """Checks print_outputs gets called.""" ret = dummy_acrohandler( data, command="print_outputs", varlist=" inc_activity inc_grants inc_donations total_costs", exclusion="", exp="", weights="", options="", ) assert len(ret) == 0, "return string should be empty" # ----main SDC functionality def test_simple_table(data): """ Checks that the simple table command works as expected Does via reference to direct call to pd.crosstab() To make sure table specification is parsed correctly acro SDC analysis is tested elsewhere. """ correct = pd.crosstab( index=data["survivor"], columns=data["grant_type"] ).to_string() ret = dummy_acrohandler( data, "table", "survivor grant_type", exclusion="", exp="", weights="", options="nototals", ) ret = ret.replace("NaN", "0") ret = ret.replace(".0", "") assert ret.split() == correct.split(), f"got\n{ret}\n expected\n{correct}" def test_parse_table_details(data): """ Series of checks that the varlist and options are parsed correctly by the helper function. """ varlist = ["survivor", "grant_type", "year"] varnames = data.columns options = "by(grant_type) contents(mean sd inc_activity) suppress nototals" details = parse_table_details(varlist, varnames, options) errstring = f" rows {details['rowvars']} should be ['grant_type','survivor']" assert details["rowvars"] == ["grant_type", "survivor"], errstring errstring = f" cols {details['colvars']} should be ['year','grant_type']" assert details["colvars"] == ["year", "grant_type"], errstring errstring = f" aggfunctions {details['aggfuncs']} should be ['mean','sd']" assert details["aggfuncs"] == ["mean", "sd"], errstring errstring = f" values {details['values']} should be ['inc_activity']" assert details["values"] == ["inc_activity"], errstring assert not details["totals"], "totals should be False" assert details["suppress"], "suppress should be True" def test_stata_probit(data): """Checks probit gets called correctly.""" ret = dummy_acrohandler( data, command="probit", varlist=" survivor inc_activity inc_grants inc_donations total_costs", exclusion="", exp="", weights="", options="", ) ret = ret.replace("\n", ",") tokens = ret.split(",") idx = tokens.index(" Df Residuals: ") residuals = int(tokens[idx + 1]) assert residuals == 806, f"{residuals} should be 806" idx = tokens.index(" Pseudo R-squ.: ") rsquared = float(tokens[idx + 1]) assert rsquared == pytest.approx(0.208, 0.01) def test_stata_linregress(data): """Checks linear regression called correctly.""" ret = dummy_acrohandler( data, command="regress", varlist=" inc_activity inc_grants inc_donations total_costs", exclusion="", exp="", weights="", options="", ) ret = ret.replace("\n", ",") tokens = ret.split(",") idx = tokens.index("Df Residuals: ") residuals = int(tokens[idx + 1]) assert residuals == 807, f"{residuals} should be 807" idx = tokens.index(" R-squared: ") rsquared = float(tokens[idx + 1]) assert rsquared == pytest.approx(0.894, 0.001) def test_unsupported_formatting_options(data): """Checks that user gets warning if they try to format table.""" format_string = "acro does not currently support table formatting commands." correct = pd.crosstab( index=data["survivor"], columns=data["grant_type"] ).to_string() for bad_option in [ "cellwidth", "csepwidth", "stubwidth", "scsepwidth", "center", "left", ]: ret = dummy_acrohandler( data, "table", "survivor grant_type", exclusion="", exp="", weights="", options=f"{bad_option} nototals", ) rets = ret.split("\n", 1) assert len(rets) == 2, "table should have warning prepended" errmsg = f"first line {rets[0]} should be {format_string}" assert rets[0] == format_string, errmsg ret = rets[1] ret = ret.replace("NaN", "0") ret = ret.replace(".0", "") assert ret.split() == correct.split(), f"got\n{ret}\n expected\n{correct}" def test_stata_finalise(monkeypatch): """Checks finalise gets called correctly.""" monkeypatch.setattr("builtins.input", lambda _: "Let me have it") ret = dummy_acrohandler( data, command="finalise", varlist="", exclusion="", exp="", weights="", options="", ) correct = "outputs and stata_out.json written\n" assert ret == correct, f"returned string {ret} should be {correct}\n"
9,013
29.764505
88
py
ACRO
ACRO-main/test/__init__.py
0
0
0
py
ACRO
ACRO-main/docs/source/conf.py
# Configuration file for the Sphinx documentation builder. # # -- Path setup -------------------------------------------------------------- import os import sys sys.path.insert(0, os.path.abspath("../../")) from acro.version import __version__ # -- Project information ----------------------------------------------------- project = "ACRO" copyright = "2023, ACRO Project Team" author = "ACRO Project Team" release = __version__ # -- General configuration --------------------------------------------------- extensions = [ "numpydoc", "sphinx-prompt", "sphinx.ext.autodoc", "sphinx.ext.doctest", "sphinx.ext.imgconverter", "sphinx.ext.intersphinx", "sphinx.ext.viewcode", "sphinx_autopackagesummary", "sphinx_issues", "sphinx_rtd_theme", ] exclude_patterns = [] html_static_path = ["_static"] # -- Options for HTML output ------------------------------------------------- html_theme = "sphinx_rtd_theme" html_theme_options = {"navigation_depth": 2} # -- ------------------------------------------------------------------------- numpydoc_class_members_toctree = False
1,120
23.369565
78
py
ACRO
ACRO-main/acro/record.py
"""ACRO: Output storage and serialization.""" import datetime import hashlib import json import logging import os import shutil from pathlib import Path from typing import Any import pandas as pd from pandas import DataFrame from .version import __version__ logger = logging.getLogger("acro:records") def load_outcome(outcome: dict) -> DataFrame: """Returns a DataFrame from an outcome dictionary. Parameters ---------- outcome : dict The outcome to load as a DataFrame. """ return pd.DataFrame.from_dict(outcome) def load_output(path: str, output: list[str]) -> list[str] | list[DataFrame]: """Returns a loaded output. Parameters ---------- path : str The path to the output folder (with results.json). output : list[str] The output to load. Returns ------- list[str] | list[DataFrame] The loaded output field. """ if len(output) < 1: raise ValueError("error loading output") loaded: list[DataFrame] = [] for filename in output: _, ext = os.path.splitext(filename) if ext == ".csv": filename = os.path.normpath(f"{path}/{filename}") loaded.append(pd.read_csv(filename)) if len(loaded) < 1: # output is path(s) to custom file(s) return output return loaded class Record: # pylint: disable=too-many-instance-attributes,too-few-public-methods """Stores data related to a single output record. Attributes ---------- uid : str Unique identifier. status : str SDC status: {"pass", "fail", "review"} output_type : str Type of output, e.g., "regression" properties : dict Dictionary containing structured output data. sdc : dict Dictionary containing SDC results. command : str String representation of the operation performed. summary : str String summarising the ACRO checks. outcome : DataFrame DataFrame describing the details of ACRO checks. output : Any List of output DataFrames. comments : list[str] List of strings entered by the user to add comments to the output. exception : str Description of why an exception to fail/review should be granted. timestamp : str Time the record was created in ISO format. """ def __init__( # pylint: disable=too-many-arguments self, uid: str, status: str, output_type: str, properties: dict, sdc: dict, command: str, summary: str, outcome: DataFrame, output: list[str] | list[DataFrame], comments: list[str] | None = None, ) -> None: """Constructs a new output record. Parameters ---------- uid : str Unique identifier. status : str SDC status: {"pass", "fail", "review"} output_type : str Type of output, e.g., "regression" properties : dict Dictionary containing structured output data. sdc : dict Dictionary containing SDC results. command : str String representation of the operation performed. summary : str String summarising the ACRO checks. outcome : DataFrame DataFrame describing the details of ACRO checks. output : list[str] | list[DataFrame] List of output DataFrames. comments : list[str] | None, default None List of strings entered by the user to add comments to the output. """ self.uid: str = uid self.status: str = status self.output_type: str = output_type self.properties: dict = properties self.sdc: dict = sdc self.command: str = command self.summary: str = summary self.outcome: DataFrame = outcome self.output: Any = output self.comments: list[str] = [] if comments is None else comments self.exception: str = "" now = datetime.datetime.now() self.timestamp: str = now.isoformat() def serialize_output(self, path: str = "outputs") -> list[str]: """Serializes outputs. Parameters ---------- path : str, default 'outputs' Name of the folder that outputs are to be written. Returns ------- list[str] List of filepaths of the written outputs. """ output: list[str] = [] # check if the outputs directory was already created try: # pragma: no cover os.makedirs(path) logger.debug("Directory %s created successfully", path) except FileExistsError: logger.debug("Directory %s already exists", path) # save each output DataFrame to a different csv if all(isinstance(obj, DataFrame) for obj in self.output): for i, data in enumerate(self.output): filename = f"{self.uid}_{i}.csv" output.append(filename) filename = os.path.normpath(f"{path}/{filename}") with open(filename, mode="w", newline="", encoding="utf-8") as file: file.write(data.to_csv()) # move custom files to the output folder if self.output_type == "custom": for filename in self.output: if os.path.exists(filename): shutil.copy(filename, path) output.append(Path(filename).name) return output def __str__(self) -> str: """Returns a string representation of a record. Returns ------- str The record. """ return ( f"uid: {self.uid}\n" f"status: {self.status}\n" f"type: {self.output_type}\n" f"properties: {self.properties}\n" f"sdc: {self.sdc}\n" f"command: {self.command}\n" f"summary: {self.summary}\n" f"outcome: {self.outcome}\n" f"output: {self.output}\n" f"timestamp: {self.timestamp}\n" f"comments: {self.comments}\n" f"exception: {self.exception}\n" ) class Records: """Stores data related to a collection of output records.""" def __init__(self) -> None: """Constructs a new object for storing multiple records.""" self.results: dict[str, Record] = {} self.output_id: int = 0 def add( # pylint: disable=too-many-arguments self, status: str, output_type: str, properties: dict, sdc: dict, command: str, summary: str, outcome: DataFrame, output: list[str] | list[DataFrame], comments: list[str] | None = None, ) -> None: """Adds an output to the results. Parameters ---------- status : str SDC status: {"pass", "fail", "review"} output_type : str Type of output, e.g., "regression" properties : dict Dictionary containing structured output data. sdc : dict Dictionary containing SDC results. command : str String representation of the operation performed. summary : str String summarising the ACRO checks. outcome : DataFrame DataFrame describing the details of ACRO checks. output : list[str | list[DataFrame] List of output DataFrames. comments : list[str] | None, default None List of strings entered by the user to add comments to the output. """ new = Record( uid=f"output_{self.output_id}", status=status, output_type=output_type, properties=properties, sdc=sdc, command=command, summary=summary, outcome=outcome, output=output, comments=comments, ) self.results[new.uid] = new self.output_id += 1 logger.info("add(): %s", new.uid) def remove(self, key: str) -> None: """Removes an output from the results. Parameters ---------- key : str Key specifying which output to remove, e.g., 'output_0'. """ if key not in self.results: raise ValueError(f"unable to remove {key}, key not found") del self.results[key] logger.info("remove(): %s removed", key) def get(self, key: str) -> Record: """Returns a specified output from the results. Parameters ---------- key : str Key specifying which output to return, e.g., 'output_0'. Returns ------- Record The requested output. """ logger.debug("get(): %s ", key) return self.results[key] def get_keys(self) -> list[str]: """Returns the list of available output keys. Returns ------- list[str] List of output names. """ logger.debug("get_keys()") return list(self.results.keys()) def get_index(self, index: int) -> Record: """Returns the output at the specified position. Parameters ---------- index : int Position of the output to return. Returns ------- Record The requested output. """ logger.debug("get_index(): %s", index) key = list(self.results.keys())[index] return self.results[key] def add_custom(self, filename: str, comment: str | None = None) -> None: """Adds an unsupported output to the results dictionary. Parameters ---------- filename : str The name of the file that will be added to the list of the outputs. comment : str | None, default None An optional comment. """ output = Record( uid=f"output_{self.output_id}", status="review", output_type="custom", properties={}, sdc={}, command="custom", summary="review", outcome=DataFrame(), output=[os.path.normpath(filename)], comments=None if comment is None else [comment], ) self.results[output.uid] = output logger.info("add_custom(): %s", output.uid) def rename(self, old: str, new: str) -> None: """Rename an output. Parameters ---------- old : str The old name of the output. new : str The new name of the output. """ if old not in self.results: raise ValueError(f"unable to rename {old}, key not found") if new in self.results: raise ValueError(f"unable to rename, {new} already exists") self.results[new] = self.results[old] self.results[new].uid = new del self.results[old] logger.info("rename_output(): %s renamed to %s", old, new) def add_comments(self, output: str, comment: str) -> None: """Adds a comment to an output. Parameters ---------- output : str The name of the output. comment : str The comment. """ if output not in self.results: raise ValueError(f"unable to find {output}, key not found") self.results[output].comments.append(comment) logger.info("a comment was added to %s", output) def add_exception(self, output: str, reason: str) -> None: """Adds an exception request to an output. Parameters ---------- output : str The name of the output. reason : str The reason the output should be released. """ if output not in self.results: raise ValueError(f"unable to add exception: {output} not found") self.results[output].exception = reason logger.info("exception request was added to %s", output) def print(self) -> str: """Prints the current results. Returns ------- str String representation of all outputs. """ logger.debug("print()") outputs: str = "" for _, record in self.results.items(): outputs += str(record) + "\n" print(outputs) return outputs def validate_outputs(self) -> None: """Prompts researcher to complete any required fields.""" for _, record in self.results.items(): if record.status != "pass" and record.exception == "": logger.info( "\n%s\n" "The status of the record above is: %s.\n" "Please explain why an exception should be granted.\n", str(record), record.status, ) record.exception = input("") def finalise(self, path: str, ext: str) -> None: """Creates a results file for checking. Parameters ---------- path : str Name of a folder to save outputs. ext : str Extension of the results file. Valid extensions: {json, xlsx}. """ logger.debug("finalise()") self.validate_outputs() if ext == "json": self.finalise_json(path) elif ext == "xlsx": self.finalise_excel(path) else: raise ValueError("Invalid file extension. Options: {json, xlsx}") self.write_checksums(path) logger.info("outputs written to: %s", path) def finalise_json(self, path: str) -> None: """Writes outputs to a JSON file. Parameters ---------- path : str Name of a folder to save outputs. """ outputs: dict = {} for key, val in self.results.items(): outputs[key] = { "uid": val.uid, "status": val.status, "type": val.output_type, "properties": val.properties, "files": [], "outcome": json.loads(val.outcome.to_json()), "command": val.command, "summary": val.summary, "timestamp": val.timestamp, "comments": val.comments, "exception": val.exception, } files: list[str] = val.serialize_output(path) for file in files: outputs[key]["files"].append({"name": file, "sdc": val.sdc}) results: dict = {"version": __version__, "results": outputs} filename: str = os.path.normpath(f"{path}/results.json") with open(filename, "w", newline="", encoding="utf-8") as handle: json.dump(results, handle, indent=4, sort_keys=False) def finalise_excel(self, path: str) -> None: """Writes outputs to an excel spreadsheet. Parameters ---------- path : str Name of a folder to save outputs. """ filename: str = os.path.normpath(f"{path}/results.xlsx") try: # check if the directory was already created os.makedirs(path, exist_ok=True) logger.debug("Directory %s created successfully", path) except FileExistsError: # pragma: no cover logger.debug("Directory %s already exists", path) with pd.ExcelWriter( # pylint: disable=abstract-class-instantiated filename, engine="openpyxl" ) as writer: # description sheet sheet = [] summary = [] command = [] for output_id, output in self.results.items(): if output.output_type == "custom": continue # avoid writing custom outputs sheet.append(output_id) command.append(output.command) summary.append(output.summary) tmp_df = pd.DataFrame( {"Sheet": sheet, "Command": command, "Summary": summary} ) tmp_df.to_excel(writer, sheet_name="description", index=False, startrow=0) # individual sheets for output_id, output in self.results.items(): if output.output_type == "custom": continue # avoid writing custom outputs # command and summary start = 0 tmp_df = pd.DataFrame( [output.command, output.summary], index=["Command", "Summary"] ) tmp_df.to_excel(writer, sheet_name=output_id, startrow=start) # outcome if output.outcome is not None: output.outcome.to_excel(writer, sheet_name=output_id, startrow=4) # output for table in output.output: start = 1 + writer.sheets[output_id].max_row table.to_excel(writer, sheet_name=output_id, startrow=start) def write_checksums(self, path: str) -> None: """Writes checksums for each file to checksums folder. Parameters ---------- path : str Name of a folder to save outputs. """ checksums: dict[str, str] = {} for name in os.listdir(path): filename = os.path.join(path, name) if os.path.isfile(filename): with open(filename, "rb") as file: read = file.read() checksums[name] = hashlib.sha256(read).hexdigest() checksums_dir: str = os.path.normpath(f"{path}/checksums") os.makedirs(checksums_dir, exist_ok=True) for name, sha256 in checksums.items(): filename = os.path.join(checksums_dir, name + ".txt") with open(filename, "w", encoding="utf-8") as file: file.write(sha256) def load_records(path: str) -> Records: """Loads outputs from a JSON file. Parameters ---------- path : str Name of an output folder containing results.json. Returns ------- Records The loaded records. """ records = Records() filename = os.path.normpath(f"{path}/results.json") with open(filename, newline="", encoding="utf-8") as handle: data = json.load(handle) if data["version"] != __version__: # pragma: no cover raise ValueError("error loading output") for key, val in data["results"].items(): files: list[dict] = val["files"] filenames: list = [] sdcs: list = [] for file in files: filenames.append(file["name"]) sdcs.append(file["sdc"]) records.results[key] = Record( uid=val["uid"], status=val["status"], output_type=val["type"], properties=val["properties"], sdc=sdcs[0], command=val["command"], summary=val["summary"], outcome=load_outcome(val["outcome"]), output=load_output(path, filenames), comments=val["comments"], ) records.results[key].exception = val["exception"] records.results[key].timestamp = val["timestamp"] return records
19,460
32.209898
86
py
ACRO
ACRO-main/acro/utils.py
"""ACRO: Utility Functions.""" import logging from collections.abc import Callable from inspect import FrameInfo, getframeinfo import numpy as np import pandas as pd from pandas import DataFrame, Series from statsmodels.iolib.table import SimpleTable logger = logging.getLogger("acro") AGGFUNC: dict[str, Callable] = { "mean": np.mean, "median": np.median, "sum": np.sum, "std": np.std, "freq": np.size, } # aggregation function parameters THRESHOLD: int = 10 SAFE_PRATIO_P: float = 0.1 SAFE_NK_N: int = 2 SAFE_NK_K: float = 0.9 CHECK_MISSING_VALUES: bool = False def get_command(default: str, stack_list: list[FrameInfo]) -> str: """Returns the calling source line as a string. Parameters ---------- default : str Default string to return if unable to extract the stack. stack_list : list[tuple] A list of frame records for the caller's stack. The first entry in the returned list represents the caller; the last entry represents the outermost call on the stack. Returns ------- str The calling source line. """ command: str = default if len(stack_list) > 1: code = getframeinfo(stack_list[1][0]).code_context if code is not None: command = "\n".join(code).strip() logger.debug("command: %s", command) return command def get_summary_dataframes(results: list[SimpleTable]) -> list[DataFrame]: """Converts a list of SimpleTable objects to a list of DataFrame objects. Parameters ---------- results : list[SimpleTable] Results from fitting statsmodel. Returns ------- list[DataFrame] List of DataFrame objects. """ tables: list[DataFrame] = [] for table in results: table_df = pd.read_html(table.as_html(), header=0, index_col=0)[0] tables.append(table_df) return tables def agg_threshold(vals: Series) -> bool: """Aggregation function that returns whether the number of contributors is below a threshold. Parameters ---------- vals : Series Series to calculate the p percent value. Returns ------- bool Whether the threshold rule is violated. """ return vals.count() < THRESHOLD def agg_negative(vals: Series) -> bool: """Aggregation function that returns whether any values are negative. Parameters ---------- vals : Series Series to check for negative values. Returns ------- bool Whether a negative value was found. """ return vals.min() < 0 def agg_missing(vals: Series) -> bool: """Aggregation function that returns whether any values are missing. Parameters ---------- vals : Series Series to check for missing values. Returns ------- bool Whether a missing value was found. """ return vals.isna().sum() != 0 def agg_p_percent(vals: Series) -> bool: """Aggregation function that returns whether the p percent rule is violated. That is, the uncertainty (as a fraction) of the estimate that the second highest respondent can make of the highest value. Assuming there are n items in the series, they are first sorted in descending order and then we calculate the value p = (sum - N-2 highest values)/highest value. If all values are 0, returns 1. Parameters ---------- vals : Series Series to calculate the p percent value. Returns ------- bool whether the p percent rule is violated. """ sorted_vals = vals.sort_values(ascending=False) total: float = sorted_vals.sum() sub_total = total - sorted_vals.iloc[0] - sorted_vals.iloc[1] p_val: float = sub_total / sorted_vals.iloc[0] if total > 0 else 1 return p_val < SAFE_PRATIO_P def agg_nk(vals: Series) -> bool: """Aggregation function that returns whether the top n items account for more than k percent of the total. Parameters ---------- vals : Series Series to calculate the nk value. Returns ------- bool Whether the nk rule is violated. """ total: float = vals.sum() if total > 0: sorted_vals = vals.sort_values(ascending=False) n_total = sorted_vals.iloc[0:SAFE_NK_N].sum() return (n_total / total) > SAFE_NK_K return False def apply_suppression( table: DataFrame, masks: dict[str, DataFrame] ) -> tuple[DataFrame, DataFrame]: """Applies suppression to a table. Parameters ---------- table : DataFrame Table to apply suppression. masks : dict[str, DataFrame] Dictionary of tables specifying suppression masks for application. Returns ------- DataFrame Table to output with any suppression applied. DataFrame Table with outcomes of suppression checks. """ logger.debug("apply_suppression()") safe_df = table.copy() outcome_df = DataFrame().reindex_like(table) outcome_df.fillna("", inplace=True) # don't apply suppression if negatives are present if "negative" in masks: mask = masks["negative"] outcome_df[mask.values] = "negative" # don't apply suppression if missing values are present elif "missing" in masks: mask = masks["missing"] outcome_df[mask.values] = "missing" # apply suppression masks else: for name, mask in masks.items(): try: safe_df[mask.values] = np.NaN tmp_df = DataFrame().reindex_like(outcome_df) tmp_df.fillna("", inplace=True) tmp_df[mask.values] = name + "; " outcome_df += tmp_df except TypeError: logger.warning("problem mask %s is not binary", name) outcome_df = outcome_df.replace({"": "ok"}) logger.info("outcome_df:\n%s", outcome_df) return safe_df, outcome_df def get_table_sdc(masks: dict[str, DataFrame], suppress: bool) -> dict: """Returns the SDC dictionary using the suppression masks. Parameters ---------- masks : dict[str, DataFrame] Dictionary of tables specifying suppression masks for application. suppress : bool Whether suppression has been applied. """ # summary of cells to be suppressed sdc: dict = {"summary": {"suppressed": suppress}, "cells": {}} sdc["summary"]["negative"] = 0 sdc["summary"]["missing"] = 0 sdc["summary"]["threshold"] = 0 sdc["summary"]["p-ratio"] = 0 sdc["summary"]["nk-rule"] = 0 for name, mask in masks.items(): sdc["summary"][name] = int(mask.to_numpy().sum()) # positions of cells to be suppressed sdc["cells"]["negative"] = [] sdc["cells"]["missing"] = [] sdc["cells"]["threshold"] = [] sdc["cells"]["p-ratio"] = [] sdc["cells"]["nk-rule"] = [] for name, mask in masks.items(): true_positions = np.column_stack(np.where(mask.values)) for pos in true_positions: row_index, col_index = pos sdc["cells"][name].append([int(row_index), int(col_index)]) return sdc def get_summary(sdc: dict) -> tuple[str, str]: """Returns the status and summary of the suppression masks. Parameters ---------- sdc : dict Properties of the SDC checks. Returns ------- str Status: {"review", "fail", "pass"}. str Summary of the suppression masks. """ status: str = "pass" summary: str = "" sdc_summary = sdc["summary"] sup: str = "suppressed" if sdc_summary["suppressed"] else "may need suppressing" if sdc_summary["negative"] > 0: summary += "negative values found" status = "review" elif sdc_summary["missing"] > 0: summary += "missing values found" status = "review" else: if sdc_summary["threshold"] > 0: summary += f"threshold: {sdc_summary['threshold']} cells {sup}; " status = "fail" if sdc_summary["p-ratio"] > 0: summary += f"p-ratio: {sdc_summary['p-ratio']} cells {sup}; " status = "fail" if sdc_summary["nk-rule"] > 0: summary += f"nk-rule: {sdc_summary['nk-rule']} cells {sup}; " status = "fail" if summary != "": summary = f"{status}; {summary}" else: summary = status logger.info("get_summary(): %s", summary) return status, summary def get_aggfunc(aggfunc: str | None) -> Callable | None: """Checks whether an aggregation function is allowed and returns the appropriate function. Parameters ---------- aggfunc : str | None Name of the aggregation function to apply. Returns ------- Callable | None The aggregation function to apply. """ logger.debug("get_aggfunc()") func = None if aggfunc is not None: if not isinstance(aggfunc, str) or aggfunc not in AGGFUNC: raise ValueError(f"aggfunc {aggfunc} must be: {', '.join(AGGFUNC.keys())}") func = AGGFUNC[aggfunc] logger.debug("aggfunc: %s", func) return func def get_aggfuncs( aggfuncs: str | list[str] | None, ) -> Callable | list[Callable] | None: """Checks whether a list of aggregation functions is allowed and returns the appropriate functions. Parameters ---------- aggfuncs : str | list[str] | None List of names of the aggregation functions to apply. Returns ------- Callable | list[Callable] | None The aggregation functions to apply. """ logger.debug("get_aggfuncs()") if aggfuncs is None: logger.debug("aggfuncs: None") return None if isinstance(aggfuncs, str): function = get_aggfunc(aggfuncs) logger.debug("aggfuncs: %s", function) return function if isinstance(aggfuncs, list): functions: list[Callable] = [] for function_name in aggfuncs: function = get_aggfunc(function_name) if function is not None: functions.append(function) logger.debug("aggfuncs: %s", functions) if len(functions) < 1: raise ValueError(f"invalid aggfuncs: {aggfuncs}") return functions raise ValueError("aggfuncs must be: either str or list[str]")
10,320
27.991573
87
py
ACRO
ACRO-main/acro/acro.py
"""ACRO: Automatic Checking of Research Outputs.""" import json import logging import os import pathlib import warnings from collections.abc import Callable from inspect import stack import pandas as pd import statsmodels.api as sm import statsmodels.formula.api as smf import yaml from pandas import DataFrame from statsmodels.discrete.discrete_model import BinaryResultsWrapper from statsmodels.iolib.table import SimpleTable from statsmodels.regression.linear_model import RegressionResultsWrapper from . import utils from .record import Records from .version import __version__ logging.basicConfig(level=logging.INFO) logger = logging.getLogger("acro") warnings.simplefilter(action="ignore", category=FutureWarning) class ACRO: """ACRO: Automatic Checking of Research Outputs. Attributes ---------- config : dict Safe parameters and their values. results : Records The current outputs including the results of checks. output_id : int The next identifier to be assigned to an output. Examples -------- >>> acro = ACRO() >>> results = acro.ols(y, x) >>> results.summary() >>> acro.finalise("MYFOLDER", "json") """ def __init__(self, config: str = "default", suppress: bool = False) -> None: """Constructs a new ACRO object and reads parameters from config. Parameters ---------- config : str Name of a yaml configuration file with safe parameters. suppress : bool, default False Whether to automatically apply suppression. """ self.config: dict = {} self.results: Records = Records() self.suppress: bool = suppress path = pathlib.Path(__file__).with_name(config + ".yaml") logger.debug("path: %s", path) with open(path, encoding="utf-8") as handle: self.config = yaml.load(handle, Loader=yaml.loader.SafeLoader) logger.info("version: %s", __version__) logger.info("config: %s", self.config) logger.info("automatic suppression: %s", self.suppress) # set globals needed for aggregation functions utils.THRESHOLD = self.config["safe_threshold"] utils.SAFE_PRATIO_P = self.config["safe_pratio_p"] utils.SAFE_NK_N = self.config["safe_nk_n"] utils.SAFE_NK_K = self.config["safe_nk_k"] utils.CHECK_MISSING_VALUES = self.config["check_missing_values"] def finalise(self, path: str = "outputs", ext="json") -> Records: """Creates a results file for checking. Parameters ---------- path : str Name of a folder to save outputs. ext : str Extension of the results file. Valid extensions: {json, xlsx}. Returns ------- Records Object storing the outputs. """ self.results.finalise(path, ext) config_filename: str = os.path.normpath(f"{path}/config.json") with open(config_filename, "w", newline="", encoding="utf-8") as file: json.dump(self.config, file, indent=4, sort_keys=False) return self.results def remove_output(self, key: str) -> None: """Removes an output from the results. Parameters ---------- key : str Key specifying which output to remove, e.g., 'output_0'. """ self.results.remove(key) def print_outputs(self) -> str: """Prints the current results dictionary. Returns ------- str String representation of all outputs. """ return self.results.print() def custom_output(self, filename: str, comment: str = "") -> None: """Adds an unsupported output to the results dictionary. Parameters ---------- filename : str The name of the file that will be added to the list of the outputs. comment : str An optional comment. """ self.results.add_custom(filename, comment) def crosstab( # pylint: disable=too-many-arguments,too-many-locals self, index, columns, values=None, rownames=None, colnames=None, aggfunc=None, margins: bool = False, margins_name: str = "All", dropna: bool = True, normalize=False, ) -> DataFrame: """Compute a simple cross tabulation of two (or more) factors. By default, computes a frequency table of the factors unless an array of values and an aggregation function are passed. Parameters ---------- index : array-like, Series, or list of arrays/Series Values to group by in the rows. columns : array-like, Series, or list of arrays/Series Values to group by in the columns. values : array-like, optional Array of values to aggregate according to the factors. Requires `aggfunc` be specified. rownames : sequence, default None If passed, must match number of row arrays passed. colnames : sequence, default None If passed, must match number of column arrays passed. aggfunc : str, optional If specified, requires `values` be specified as well. margins : bool, default False Add row/column margins (subtotals). margins_name : str, default 'All' Name of the row/column that will contain the totals when margins is True. dropna : bool, default True Do not include columns whose entries are all NaN. normalize : bool, {'all', 'index', 'columns'}, or {0,1}, default False Normalize by dividing all values by the sum of values. - If passed 'all' or `True`, will normalize over all values. - If passed 'index' will normalize over each row. - If passed 'columns' will normalize over each column. - If margins is `True`, will also normalize margin values. Returns ------- DataFrame Cross tabulation of the data. """ logger.debug("crosstab()") command: str = utils.get_command("crosstab()", stack()) # convert [list of] string to [list of] function aggfunc = utils.get_aggfuncs(aggfunc) # requested table table: DataFrame = pd.crosstab( # type: ignore index, columns, values, rownames, colnames, aggfunc, margins, margins_name, dropna, normalize, ) # suppression masks to apply based on the following checks masks: dict[str, DataFrame] = {} if aggfunc is not None: # create lists with single entry for when there is only one aggfunc freq_funcs: list[Callable] = [utils.AGGFUNC["freq"]] neg_funcs: list[Callable] = [utils.agg_negative] pperc_funcs: list[Callable] = [utils.agg_p_percent] nk_funcs: list[Callable] = [utils.agg_nk] missing_funcs: list[Callable] = [utils.agg_missing] # then expand them to deal with extra columns as needed if isinstance(aggfunc, list): num = len(aggfunc) freq_funcs.extend([utils.AGGFUNC["freq"] for i in range(1, num)]) neg_funcs.extend([utils.agg_negative for i in range(1, num)]) pperc_funcs.extend([utils.agg_p_percent for i in range(1, num)]) nk_funcs.extend([utils.agg_nk for i in range(1, num)]) missing_funcs.extend([utils.agg_missing for i in range(1, num)]) # threshold check- doesn't matter what we pass for value t_values = pd.crosstab( # type: ignore index, columns, values=index, rownames=rownames, colnames=colnames, aggfunc=freq_funcs, margins=margins, margins_name=margins_name, dropna=dropna, normalize=normalize, ) t_values = t_values < utils.THRESHOLD masks["threshold"] = t_values # check for negative values -- currently unsupported negative = pd.crosstab( # type: ignore index, columns, values, aggfunc=neg_funcs, margins=margins ) if negative.to_numpy().sum() > 0: masks["negative"] = negative # p-percent check masks["p-ratio"] = pd.crosstab( # type: ignore index, columns, values, aggfunc=pperc_funcs, margins=margins ) # nk values check masks["nk-rule"] = pd.crosstab( # type: ignore index, columns, values, aggfunc=nk_funcs, margins=margins ) # check for missing values -- currently unsupported if utils.CHECK_MISSING_VALUES: masks["missing"] = pd.crosstab( # type: ignore index, columns, values, aggfunc=missing_funcs, margins=margins ) else: # threshold check- doesn't matter what we pass for value t_values = pd.crosstab( # type: ignore index, columns, values=None, rownames=rownames, colnames=colnames, aggfunc=None, margins=margins, margins_name=margins_name, dropna=dropna, normalize=normalize, ) t_values = t_values < utils.THRESHOLD masks["threshold"] = t_values # pd.crosstab returns nan for an empty cell for name, mask in masks.items(): mask.fillna(value=1, inplace=True) mask = mask.astype(int) mask.replace({0: False, 1: True}, inplace=True) masks[name] = mask # build the sdc dictionary sdc: dict = utils.get_table_sdc(masks, self.suppress) # get the status and summary status, summary = utils.get_summary(sdc) # apply the suppression safe_table, outcome = utils.apply_suppression(table, masks) if self.suppress: table = safe_table # record output self.results.add( status=status, output_type="table", properties={"method": "crosstab"}, sdc=sdc, command=command, summary=summary, outcome=outcome, output=[table], ) return table def pivot_table( # pylint: disable=too-many-arguments,too-many-locals self, data: DataFrame, values=None, index=None, columns=None, aggfunc="mean", fill_value=None, margins: bool = False, dropna: bool = True, margins_name: str = "All", observed: bool = False, sort: bool = True, ) -> DataFrame: """Create a spreadsheet-style pivot table as a DataFrame. The levels in the pivot table will be stored in MultiIndex objects (hierarchical indexes) on the index and columns of the result DataFrame. Parameters ---------- data : DataFrame The DataFrame to operate on. values : column, optional Column to aggregate, optional. index : column, Grouper, array, or list of the previous If an array is passed, it must be the same length as the data. The list can contain any of the other types (except list). Keys to group by on the pivot table index. If an array is passed, it is being used as the same manner as column values. columns : column, Grouper, array, or list of the previous If an array is passed, it must be the same length as the data. The list can contain any of the other types (except list). Keys to group by on the pivot table column. If an array is passed, it is being used as the same manner as column values. aggfunc : str | list[str], default 'mean' If list of strings passed, the resulting pivot table will have hierarchical columns whose top level are the function names (inferred from the function objects themselves). fill_value : scalar, default None Value to replace missing values with (in the resulting pivot table, after aggregation). margins : bool, default False Add all row / columns (e.g. for subtotal / grand totals). dropna : bool, default True Do not include columns whose entries are all NaN. margins_name : str, default 'All' Name of the row / column that will contain the totals when margins is True. observed : bool, default False This only applies if any of the groupers are Categoricals. If True: only show observed values for categorical groupers. If False: show all values for categorical groupers. sort : bool, default True Specifies if the result should be sorted. Returns ------- DataFrame Cross tabulation of the data. """ logger.debug("pivot_table()") command: str = utils.get_command("pivot_table()", stack()) aggfunc = utils.get_aggfuncs(aggfunc) # convert string(s) to function(s) n_agg: int = 1 if not isinstance(aggfunc, list) else len(aggfunc) # requested table table: DataFrame = pd.pivot_table( # pylint: disable=too-many-function-args data, values, index, columns, aggfunc, fill_value, margins, dropna, margins_name, observed, sort, ) # suppression masks to apply based on the following checks masks: dict[str, DataFrame] = {} # threshold check agg = [utils.agg_threshold] * n_agg if n_agg > 1 else utils.agg_threshold t_values = pd.pivot_table( # type: ignore data, values, index, columns, aggfunc=agg ) masks["threshold"] = t_values if aggfunc is not None: # check for negative values -- currently unsupported agg = [utils.agg_negative] * n_agg if n_agg > 1 else utils.agg_negative negative = pd.pivot_table( # type: ignore data, values, index, columns, aggfunc=agg ) if negative.to_numpy().sum() > 0: masks["negative"] = negative # p-percent check agg = [utils.agg_p_percent] * n_agg if n_agg > 1 else utils.agg_p_percent masks["p-ratio"] = pd.pivot_table( # type: ignore data, values, index, columns, aggfunc=agg ) # nk values check agg = [utils.agg_nk] * n_agg if n_agg > 1 else utils.agg_nk masks["nk-rule"] = pd.pivot_table( # type: ignore data, values, index, columns, aggfunc=agg ) # check for missing values -- currently unsupported if utils.CHECK_MISSING_VALUES: agg = [utils.agg_missing] * n_agg if n_agg > 1 else utils.agg_missing masks["missing"] = pd.pivot_table( # type: ignore data, values, index, columns, aggfunc=agg ) # build the sdc dictionary sdc: dict = utils.get_table_sdc(masks, self.suppress) # get the status and summary status, summary = utils.get_summary(sdc) # apply the suppression safe_table, outcome = utils.apply_suppression(table, masks) if self.suppress: table = safe_table # record output self.results.add( status=status, output_type="table", properties={"method": "pivot_table"}, sdc=sdc, command=command, summary=summary, outcome=outcome, output=[table], ) return table def __check_model_dof(self, name: str, model) -> tuple[str, str, float]: """Check model DOF. Parameters ---------- name : str The name of the model. model A statsmodels model. Returns ------- str Status: {"review", "fail", "pass"}. str Summary of the check. float The degrees of freedom. """ status = "fail" dof: int = model.df_resid threshold: int = self.config["safe_dof_threshold"] if dof < threshold: summary = f"fail; dof={dof} < {threshold}" warnings.warn(f"Unsafe {name}: {summary}", stacklevel=8) else: status = "pass" summary = f"pass; dof={dof} >= {threshold}" logger.info("%s() outcome: %s", name, summary) return status, summary, float(dof) def ols( # pylint: disable=too-many-locals self, endog, exog=None, missing="none", hasconst=None, **kwargs ) -> RegressionResultsWrapper: """Fits Ordinary Least Squares Regression. Parameters ---------- endog : array_like A 1-d endogenous response variable. The dependent variable. exog : array_like A nobs x k array where `nobs` is the number of observations and `k` is the number of regressors. An intercept is not included by default and should be added by the user. missing : str Available options are 'none', 'drop', and 'raise'. If 'none', no nan checking is done. If 'drop', any observations with nans are dropped. If 'raise', an error is raised. Default is 'none'. hasconst : None or bool Indicates whether the RHS includes a user-supplied constant. If True, a constant is not checked for and k_constant is set to 1 and all result statistics are calculated as if a constant is present. If False, a constant is not checked for and k_constant is set to 0. **kwargs Extra arguments that are used to set model properties when using the formula interface. Returns ------- RegressionResultsWrapper Results. """ logger.debug("ols()") command: str = utils.get_command("ols()", stack()) model = sm.OLS(endog, exog=exog, missing=missing, hasconst=hasconst, **kwargs) results = model.fit() status, summary, dof = self.__check_model_dof("ols", model) tables: list[SimpleTable] = results.summary().tables self.results.add( status=status, output_type="regression", properties={"method": "ols", "dof": dof}, sdc={}, command=command, summary=summary, outcome=DataFrame(), output=utils.get_summary_dataframes(tables), ) return results def olsr( # pylint: disable=too-many-locals,keyword-arg-before-vararg self, formula, data, subset=None, drop_cols=None, *args, **kwargs ) -> RegressionResultsWrapper: """Fits Ordinary Least Squares Regression from a formula and dataframe. Parameters ---------- formula : str or generic Formula object The formula specifying the model. data : array_like The data for the model. See Notes. subset : array_like An array-like object of booleans, integers, or index values that indicate the subset of df to use in the model. Assumes df is a `pandas.DataFrame`. drop_cols : array_like Columns to drop from the design matrix. Cannot be used to drop terms involving categoricals. *args Additional positional argument that are passed to the model. **kwargs These are passed to the model with one exception. The ``eval_env`` keyword is passed to patsy. It can be either a :class:`patsy:patsy.EvalEnvironment` object or an integer indicating the depth of the namespace to use. For example, the default ``eval_env=0`` uses the calling namespace. If you wish to use a "clean" environment set ``eval_env=-1``. Returns ------- RegressionResultsWrapper Results. Notes ----- data must define __getitem__ with the keys in the formula terms args and kwargs are passed on to the model instantiation. E.g., a numpy structured or rec array, a dictionary, or a pandas DataFrame. """ logger.debug("olsr()") command: str = utils.get_command("olsr()", stack()) model = smf.ols( formula=formula, data=data, subset=subset, drop_cols=drop_cols, *args, **kwargs, ) results = model.fit() status, summary, dof = self.__check_model_dof("olsr", model) tables: list[SimpleTable] = results.summary().tables self.results.add( status=status, output_type="regression", properties={"method": "olsr", "dof": dof}, sdc={}, command=command, summary=summary, outcome=DataFrame(), output=utils.get_summary_dataframes(tables), ) return results def logit( # pylint: disable=too-many-arguments,too-many-locals self, endog, exog, missing: str | None = None, check_rank: bool = True, ) -> BinaryResultsWrapper: """Fits Logit model. Parameters ---------- endog : array_like A 1-d endogenous response variable. The dependent variable. exog : array_like A nobs x k array where nobs is the number of observations and k is the number of regressors. An intercept is not included by default and should be added by the user. missing : str | None Available options are ‘none’, ‘drop’, and ‘raise’. If ‘none’, no nan checking is done. If ‘drop’, any observations with nans are dropped. If ‘raise’, an error is raised. Default is ‘none’. check_rank : bool Check exog rank to determine model degrees of freedom. Default is True. Setting to False reduces model initialization time when exog.shape[1] is large. Returns ------- BinaryResultsWrapper Results. """ logger.debug("logit()") command: str = utils.get_command("logit()", stack()) model = sm.Logit(endog, exog, missing=missing, check_rank=check_rank) results = model.fit() status, summary, dof = self.__check_model_dof("logit", model) tables: list[SimpleTable] = results.summary().tables self.results.add( status=status, output_type="regression", properties={"method": "logit", "dof": dof}, sdc={}, command=command, summary=summary, outcome=DataFrame(), output=utils.get_summary_dataframes(tables), ) return results def logitr( # pylint: disable=too-many-locals,keyword-arg-before-vararg self, formula, data, subset=None, drop_cols=None, *args, **kwargs ) -> RegressionResultsWrapper: """Fits Logit model from a formula and dataframe. Parameters ---------- formula : str or generic Formula object The formula specifying the model. data : array_like The data for the model. See Notes. subset : array_like An array-like object of booleans, integers, or index values that indicate the subset of df to use in the model. Assumes df is a `pandas.DataFrame`. drop_cols : array_like Columns to drop from the design matrix. Cannot be used to drop terms involving categoricals. *args Additional positional argument that are passed to the model. **kwargs These are passed to the model with one exception. The ``eval_env`` keyword is passed to patsy. It can be either a :class:`patsy:patsy.EvalEnvironment` object or an integer indicating the depth of the namespace to use. For example, the default ``eval_env=0`` uses the calling namespace. If you wish to use a "clean" environment set ``eval_env=-1``. Returns ------- RegressionResultsWrapper Results. Notes ----- data must define __getitem__ with the keys in the formula terms args and kwargs are passed on to the model instantiation. E.g., a numpy structured or rec array, a dictionary, or a pandas DataFrame. """ logger.debug("logitr()") command: str = utils.get_command("logitr()", stack()) model = smf.logit( formula=formula, data=data, subset=subset, drop_cols=drop_cols, *args, **kwargs, ) results = model.fit() status, summary, dof = self.__check_model_dof("logitr", model) tables: list[SimpleTable] = results.summary().tables self.results.add( status=status, output_type="regression", properties={"method": "logitr", "dof": dof}, sdc={}, command=command, summary=summary, outcome=DataFrame(), output=utils.get_summary_dataframes(tables), ) return results def probit( # pylint: disable=too-many-arguments,too-many-locals self, endog, exog, missing: str | None = None, check_rank: bool = True, ) -> BinaryResultsWrapper: """Fits Probit model. Parameters ---------- endog : array_like A 1-d endogenous response variable. The dependent variable. exog : array_like A nobs x k array where nobs is the number of observations and k is the number of regressors. An intercept is not included by default and should be added by the user. missing : str | None Available options are ‘none’, ‘drop’, and ‘raise’. If ‘none’, no nan checking is done. If ‘drop’, any observations with nans are dropped. If ‘raise’, an error is raised. Default is ‘none’. check_rank : bool Check exog rank to determine model degrees of freedom. Default is True. Setting to False reduces model initialization time when exog.shape[1] is large. Returns ------- BinaryResultsWrapper Results. """ logger.debug("probit()") command: str = utils.get_command("probit()", stack()) model = sm.Probit(endog, exog, missing=missing, check_rank=check_rank) results = model.fit() status, summary, dof = self.__check_model_dof("probit", model) tables: list[SimpleTable] = results.summary().tables self.results.add( status=status, output_type="regression", properties={"method": "probit", "dof": dof}, sdc={}, command=command, summary=summary, outcome=DataFrame(), output=utils.get_summary_dataframes(tables), ) return results def probitr( # pylint: disable=too-many-locals,keyword-arg-before-vararg self, formula, data, subset=None, drop_cols=None, *args, **kwargs ) -> RegressionResultsWrapper: """Fits Probit model from a formula and dataframe. Parameters ---------- formula : str or generic Formula object The formula specifying the model. data : array_like The data for the model. See Notes. subset : array_like An array-like object of booleans, integers, or index values that indicate the subset of df to use in the model. Assumes df is a `pandas.DataFrame`. drop_cols : array_like Columns to drop from the design matrix. Cannot be used to drop terms involving categoricals. *args Additional positional argument that are passed to the model. **kwargs These are passed to the model with one exception. The ``eval_env`` keyword is passed to patsy. It can be either a :class:`patsy:patsy.EvalEnvironment` object or an integer indicating the depth of the namespace to use. For example, the default ``eval_env=0`` uses the calling namespace. If you wish to use a "clean" environment set ``eval_env=-1``. Returns ------- RegressionResultsWrapper Results. Notes ----- data must define __getitem__ with the keys in the formula terms args and kwargs are passed on to the model instantiation. E.g., a numpy structured or rec array, a dictionary, or a pandas DataFrame. """ logger.debug("probitr()") command: str = utils.get_command("probitr()", stack()) model = smf.probit( formula=formula, data=data, subset=subset, drop_cols=drop_cols, *args, **kwargs, ) results = model.fit() status, summary, dof = self.__check_model_dof("probitr", model) tables: list[SimpleTable] = results.summary().tables self.results.add( status=status, output_type="regression", properties={"method": "probitr", "dof": dof}, sdc={}, command=command, summary=summary, outcome=DataFrame(), output=utils.get_summary_dataframes(tables), ) return results def rename_output(self, old: str, new: str) -> None: """Rename an output. Parameters ---------- old : str The old name of the output. new : str The new name of the output. """ self.results.rename(old, new) def add_comments(self, output: str, comment: str) -> None: """Adds a comment to an output. Parameters ---------- output : str The name of the output. comment : str The comment. """ self.results.add_comments(output, comment) def add_exception(self, output: str, reason: str) -> None: """Adds an exception request to an output. Parameters ---------- output : str The name of the output. reason : str The comment. """ self.results.add_exception(output, reason) def add_constant(data, prepend: bool = True, has_constant: str = "skip"): """Add a column of ones to an array. Parameters ---------- data : array_like A column-ordered design matrix. prepend : bool If true, the constant is in the first column. Else the constant is appended (last column). has_constant : str {'raise', 'add', 'skip'} Behavior if data already has a constant. The default will return data without adding another constant. If 'raise', will raise an error if any column has a constant value. Using 'add' will add a column of 1s if a constant column is present. Returns ------- array_like The original values with a constant (column of ones) as the first or last column. Returned value type depends on input type. Notes ----- When the input is a pandas Series or DataFrame, the added column's name is 'const'. """ return sm.add_constant(data, prepend=prepend, has_constant=has_constant)
32,438
36.243398
86
py
ACRO
ACRO-main/acro/version.py
"""ACRO version number.""" __version__ = "0.4.2"
49
15.666667
26
py
ACRO
ACRO-main/acro/__init__.py
"""ACRO.""" from .acro import *
32
10
19
py
ACRO
ACRO-main/stata/acro_stata_parser.py
# file with commands to manage the stata-acro interface import pandas as pd from acro import ACRO, add_constant def apply_stata_ifstmt(raw: str, df: pd.DataFrame) -> pd.DataFrame: if len(raw) == 0: return df else: # add braces aroubd each clause- keeping any in the original raw = "( " + raw + ")" raw = raw.replace("&", ") & (") raw = raw.replace("|", ") | (") # put spaces around operators to ease parsing for operator in [">", "<", "==", ">=", "<=", "!="]: raw = raw.replace(operator, " " + operator + " ") # replace variable names with df["varname"] for vname in df.columns: raw = raw.replace(vname, 'df["' + vname + '"]') # print(raw) # apply exclusion df2 = df[eval(raw)] return df2 def apply_stata_expstmt(raw: str, df: pd.DataFrame) -> pd.DataFrame: # stata allows f and F for first item and l/L for last last = len(df) - 1 token = raw.split("/") # first index if token[0] == "f" or token[0] == "F": start = 0 else: start = int(token[0]) if start < 0: start = last + 1 + start # last if "/" not in raw: end = last else: if token[1] == "l" or token[1] == "L": token[1] = last end = int(token[1]) if end < 0: end = last + 1 + end # enforce start <=end if start > end: end = last return df.iloc[start:end] def find_brace_contents(word: str, raw: str) -> (bool, str): idx = raw.find(word) if idx == -1: return False, f"{word} not found" idx += len(word) + 1 substr = "" while raw[idx] != ")" and idx < len(raw): substr += raw[idx] idx += 1 if idx == len(raw): return False, "phrase nor completed" else: return True, substr def parse_table_details(varlist: list, varnames: list, options: str) -> dict: """Function to parse stata-13 style table calls Note this is not for latest version of stata, syntax here: https://www.stata.com/manuals13/rtable.pdf >> table rowvar [colvar [supercolvar] [if] [in] [weight] [, options]. """ details = {"errmsg": ""} details["rowvars"] = [varlist.pop(0)] details["colvars"] = list(reversed(varlist)) # by() contents are super-rows found, superrows = find_brace_contents("by", options) if found and len(superrows) > 0: extras = superrows.split() for word in extras: if word not in varnames: details[ "errmsg" ] = f"Error: word {word} in by-list is not a variables name" return details if word not in details["rowvars"]: details["rowvars"].insert(0, word) # contents can be variable names or aggregation functions details["aggfuncs"], details["values"] = [], [] found, content = find_brace_contents("contents", options) if found and len(content) > 0: contents = content.split() for word in contents: if word in varnames: if word not in details["values"]: details["values"].append(word) else: if word not in details["aggfuncs"]: details["aggfuncs"].append(word) # default values details["totals"] = False if "nototals" in options else True details["suppress"] = False if "nosuppress" in options else True return details def parse_and_run( df: pd.DataFrame, command: str, varlist: str, exclusion: str, exp: str, weights: str, options: str, ) -> pd.DataFrame: """ Takes a dataframe and the parsed stata command line. Runs the appropriate command on a pre-existing ACRO object stata_acro Returns the result as a formatted string. """ # TODO de-abbreviate according to # https://www.stata.com/manuals13/u11.pdf#u11.1.3ifexp global stata_acro varlist = varlist.split() # print(f' split varlist is {varlist}') # data reduction if len(exclusion) > 0: df = apply_stata_ifstmt(exclusion, df) if len(exp) > 0: df = apply_stata_expstmt(exp, df) # now look at the commands # session management first if command == "init": # initialise the acro object stata_acro = ACRO() return "acro analysis session created\n" elif command == "finalise": stata_acro.finalise("stata_out", "json") return "outputs and stata_out.json written\n" elif command == "print_outputs": stata_acro.print_outputs() return "" # now statistical commands elif command == "table": varnames = df.columns details = parse_table_details(varlist, varnames, options) if len(details["errmsg"]) > 0: return details["errmsg"] else: aggfuncs = list(map(lambda x: x.replace("sd", "std"), details["aggfuncs"])) rows, cols = [], [] for row in details["rowvars"]: rows.append(df[row]) for col in details["colvars"]: cols.append(df[col]) if len(aggfuncs) > 0 and len(details["values"]) > 0: safe_output = stata_acro.crosstab( index=rows, columns=cols, aggfunc=aggfuncs, values=details["values"], # suppress=details['suppress'], margins=details["totals"], margins_name="Total", ) else: safe_output = stata_acro.crosstab( index=rows, columns=cols, # suppress=details['suppress'], margins=details["totals"], margins_name="Total", ) options_str = "" formatting = [ "cellwidth", "csepwidth", "stubwidth", "scsepwidth", "center", "left", ] if any(word in options for word in formatting): options_str = ( "acro does not currently support table formatting commands.\n " ) return options_str + safe_output.to_string() + "\n" elif command == "regress": depvar = varlist[0] indep_vars = varlist[1:] new_df = df[varlist].dropna() y = new_df[depvar] x = new_df[indep_vars] x = add_constant(x) results = stata_acro.ols(y, x) res_str = results.summary().as_csv() return res_str elif command == "probit": depvar = varlist[0] indep_vars = varlist[1:] new_df = df[varlist].dropna() y = new_df[depvar].astype("category").cat.codes # numeric y.name = depvar x = new_df[indep_vars] x = add_constant(x) results = stata_acro.probit(y, x) res_str = results.summary().as_csv() return res_str else: return f"acro command not recognised: {command}"
7,197
30.432314
87
py
ACRO
ACRO-main/notebooks/test-nursery.py
""" ACRO Tests Copyright : Maha Albashir, Richard Preen, Jim Smith 2023. """ # import libraries import os import numpy as np import pandas as pd from scipy.io.arff import loadarff from acro import ACRO, add_constant # Instantiate ACRO by making an acro object print( "\n Creating an acro object().\n" "The TRE's risk appetite is read from default.yml\n" "and shown to the researcher and output checker" ) acro = ACRO() # Load test data # The dataset used in this notebook is the nursery dataset from OpenML. # - The dataset can be read directly from OpenML using the code commented in the next cell. # - In this version, it can be read directly from the local machine # if it has been downloaded. # - The code below reads the data from a folder called "data" # which we assume is at the same level as the folder where you are working. # - The path might need to be changed if the data has been downloaded and stored elsewhere. # - for example use: # path = os.path.join("data", "nursery.arff") # if the data is in a sub-folder of your work folder # commented out version to load from web # from sklearn.datasets import fetch_openml # data = fetch_openml(data_id=26, as_frame=True) # df = data.data # df["recommend"] = data.target # Version to load data from local directory path = os.path.join("../data", "nursery.arff") data = loadarff(path) df = pd.DataFrame(data[0]) df = df.select_dtypes([object]) df = df.stack().str.decode("utf-8").unstack() df.rename(columns={"class": "recommend"}, inplace=True) print("\n Data loaded, these are the first five rows") print(df.head()) # Convert 'more than 3' children to random between 4 and 10 # Change the children column from categorical to numeric # in order to be able to test some of the ACRO functions that require a numeric feature print("\nChanging number of children to integer type") df["children"].replace(to_replace={"more": "4"}, inplace=True) df["children"] = pd.to_numeric(df["children"]) df["children"] = df.apply( lambda row: row["children"] if row["children"] in (1, 2, 3) else np.random.randint(4, 10), axis=1, ) # Examples of producing tabular output # We rely on the industry-standard package **pandas** for tabulating data. # In the next few examples we show: # - first, how a researcher would normally make a call in pandas, # saving the results in a variable that they can view on screen (or save to file?) # - then how the call is identical in SACRO, except that: # - "pd" is replaced by "acro" # - the researcher immediately sees TRE output checking recommendations. print( "\nThe first set of examples show acro wrappers around " " standard tabulation routines from the pandas package." ) # Pandas crosstab # This is an example of crosstab using pandas. # We first make the call, then the second line print the outputs to wscreen. print("\nCalling crosstab of recommendation by parents using pandas") table = pd.crosstab(df.recommend, df.parents) print(table) # ACRO crosstab # This is an example of crosstab using ACRO. # The INFO lines show the researcher what will be reported to the output checkers. # Then the (suppressed as necessary) table is shown via. the print command as before. print("\nNow the same crosstab call using the ACRO interface") safe_table = acro.crosstab(df.recommend, df.parents) print("\nand this is the researchers output") print(safe_table) # ACRO crosstab with aggregation function # Mean() in this case # Then how Max and Min are not allowed by the code print( "\nIllustration of crosstab using an aggregation function " "- mean in this case." ) safe_table = acro.crosstab(df.recommend, df.parents, values=df.children, aggfunc="mean") print("\nand this is the researchers output") print(safe_table) print( "\nThis is what happens if you try to get max values for a cell." "\nSo that this script runs on one go, we've caught the exception " "thrown by ACRO." ) try: safe_table = acro.crosstab( df.recommend, df.parents, values=df.children, aggfunc="max" ) except ValueError as e: print("ValueError:") print(e) # ACRO pivot_table # This is an example of pivot table using ACRO. # - Some researchers may prefer this to using crosstab. # - Again the call syntax is identical to the pandas "pd.pivot_table" # - in this case the output is non-disclosive print("\nIllustration of using the acro version of pandas pivot table") table = acro.pivot_table( df, index=["parents"], values=["children"], aggfunc=["mean", "std"] ) print("\nand this is the researchers output") print(table) # Regression examples using ACRO # Again there is an industry-standard package in python, this time called **statsmodels**. # - The examples below illustrate the use of the ACRO wrapper standard statsmodel functions # - Note that statsmodels can be called using an 'R-like' format # (using an 'r' suffix on the command names) # - most statsmodels functions return a "results object", # which has a "summary" function that produces printable/saveable outputs print( "\nThe next set of examples illustrate acro wrappers " "around functions from the statsmodels package" ) # Start by manipulating the nursery data to get two numeric variables # - The 'recommend' column is converted to an integer scale df["recommend"].replace( to_replace={ "not_recom": "0", "recommend": "1", "very_recom": "2", "priority": "3", "spec_prior": "4", }, inplace=True, ) df["recommend"] = pd.to_numeric(df["recommend"]) new_df = df[["recommend", "children"]] new_df = new_df.dropna() # ACRO OLS # This is an example of ordinary least square regression using ACRO. # - Above recommend column was converted form categorical to numeric. # - Now we perform a the linear regression between recommend and children. # - This version includes a constant (intercept) # - This is just to show how the regression is done using ACRO. # - **No correlation is expected to be seen by using these variables** y = new_df["recommend"] x = new_df["children"] x = add_constant(x) print("\nOrdinary Least Squares Regression") results = acro.ols(y, x) print("\nand this is the researchers output") print(results.summary()) # ACRO OLSR # This is an example of ordinary least squares regression using the 'R-like' statsmodels api, # i.e. from a formula and dataframe using ACRO print("\nAnd same, but passing a formula instead of two arrays") results = acro.olsr(formula="recommend ~ children", data=new_df) print("\nand this is the researchers output") print(results.summary()) # ACRO Probit # This is an example of probit regression using ACRO # We use a different combination of variables from the original dataset. new_df = df[["finance", "children"]] new_df = new_df.dropna() y = new_df["finance"].astype("category").cat.codes # numeric y.name = "finance" x = new_df["children"] x = add_constant(x) print("\n Example of a probit regression") results = acro.probit(y, x) print("\nand this is the researchers output") print(results.summary()) # ACRO Logit # This is an example of logistic regression using ACRO using the statmodels function print("\n Example of a logit regression") results = acro.logit(y, x) print("\nand this is the researchers output") print(results.summary()) # ACRO functionality to let users manage their outputs # # 1: List current ACRO outputs # This is an example of using the print_output function to list all the outputs created so far print("\nNow illustrating how users can manage their outputs") print( "\nStart by listing the outputs in the acro memory." "For each output the key line is the one starting 'Summary'" ) acro.print_outputs() # 2: Remove some ACRO outputs before finalising # This is an example of deleting some of the ACRO outputs. # The name of the output to be removed should be passed to the function remove_output. # - Currently, all outputs names contain timestamp; # that is the time when the output was created. # - The output name can be taken from the outputs listed by the print_outputs function, # - or by listing the results and choosing the specific output that needs to be removed print("\nNow removing two disclosive outputs") acro.remove_output("output_1") acro.remove_output("output_4") # 3: Rename ACRO outputs before finalising # This is an example of renaming the outputs to provide a more descriptive name. # The timestamp associated with the output name will not get overwritten print("\nUsers can rename output files to something more informative") acro.rename_output("output_2", "pivot_table") # 4: Add a comment to output # This is an example to add a comment to outputs. # It can be used to provide a description # or to pass additional information to the output checkers. print("\nUsers can add comments which the output checkers will see.") acro.add_comments("output_0", "Please let me have this table!") acro.add_comments("output_0", "6 cells were suppressed in this table") # 5: Add an unsupported output to the list of outputs # This is an example to add an unsupported outputs (such as images) to the list of outputs print("\nUsers can add files produced by an analysis aCRO doesn't cover") acro.custom_output( "XandY.jfif", "This output is an image showing the relationship between X and Y" ) # 6 (the big one) Finalise ACRO # This is an example of the function _finalise()_ # which the users must call at the end of each session. # - It takes each output and saves it to a CSV file. # - It also saves the SDC analysis for each output to a json file or Excel file # (depending on the extension of the name of the file provided as an input to the function) print( "\nUsers MUST call finalise to send their outputs to the checkers" " If they don't, the SDC analysis, and their outputs, are lost." ) output = acro.finalise("RES_TEST", "json")
9,954
32.518519
95
py
Dcm2Bids
Dcm2Bids-master/setup.py
#!/usr/bin/env python # -*- coding: utf-8 -*- description = """Reorganising NIfTI files from dcm2niix into the Brain Imaging Data Structure""" try: import pypandoc long_description = pypandoc.convert('README.md', 'rst') except(IOError, ImportError): long_description = open('README.md').read() import glob from setuptools import setup DISTNAME = "dcm2bids" DESCRIPTION = description VERSION = "0.4.0.1" AUTHOR = "Johan Carlin" AUTHOR_EMAIL = "" URL = "https://github.com/jooh/Dcm2Bids" DOWNLOAD_URL = URL + "/archive/" + VERSION + ".tar.gz" if __name__ == "__main__": setup( name=DISTNAME, version=VERSION, description=description, long_description=long_description, author=AUTHOR, author_email=AUTHOR_EMAIL, url=URL, download_url=DOWNLOAD_URL, packages=['dcm2bids'], scripts=glob.glob('scripts/dcm2bids*'), install_requires=['future'], )
1,007
24.2
96
py
Dcm2Bids
Dcm2Bids-master/dcm2bids/structure.py
# -*- coding: utf-8 -*- import os class Participant(object): """ """ def __init__(self, name, session=None): self._name = name self._session = session @property def name(self): return "sub-{}".format(self._name) @property def session(self): if self._session is None: return None else: return "ses-{}".format(self._session) @session.setter def session(self, session): self._session = session @property def directory(self): if self.hasSession(): return os.path.join(self.name, self.session) else: return self.name @property def prefix(self): if self.hasSession(): return "{}_{}".format(self.name, self.session) else: return self.name def hasSession(self): return self.session is not None class Acquisition(object): """ """ def __init__(self, base, dataType, suffix, customLabels=""): self.base = base self.dataType = dataType self._suffix = suffix self.customLabels = customLabels @property def suffix(self): suffix = "" if self.customLabels: suffix += "{}_".format(self.customLabels) suffix += self._suffix return suffix
1,347
19.738462
64
py
Dcm2Bids
Dcm2Bids-master/dcm2bids/dcm2niix.py
# -*- coding: utf-8 -*- import glob import os from subprocess import call from collections import OrderedDict import re from .utils import clean def sidecar2meta(carfile): """extract series number and potential suffixes (reflecting e.g. separate images for each echo) from the dcm2niix sidecar file name""" hits = re.search( "_series(?P<series>\d{3})(?P<suffix>\w*).json", os.path.split(carfile)[1] ) return {"seriesnum": int(hits.group("series")), "suffix": hits.group("suffix")} class Dcm2niix(object): """ """ def __init__( self, dicom_dir, participant=None, output="dcm2niix-example", outputdir=os.path.join(os.getcwd(), "tmp_dcm2bids"), ): self.dicomDir = dicom_dir self.participant = participant self.output = output self.outputdir = outputdir if not os.path.exists(self.outputdir): os.makedirs(self.outputdir) self.options = "-b y -ba y -z y -f '%f_%p_%t_series%3s'" self.sidecars = [] @property def outputDir(self): if self.participant is None: return os.path.join(self.outputdir, self.output) else: return os.path.join(self.outputdir, self.participant.prefix) def run(self): clean(self.outputDir) self.execute() carfiles = glob.glob(os.path.join(self.outputDir, "*.json")) carfiles.sort() self.sidecars = OrderedDict( (thiscar, sidecar2meta(thiscar)) for thiscar in carfiles ) return 0 def execute(self): with open(os.path.join(self.outputDir, "dcm2niix.log"), "a") as file_handle: for directory in self.dicomDir: commandStr = "dcm2niix {} -o {} {}" command = commandStr.format(self.options, self.outputDir, directory) call(command, shell=True, stdout=file_handle)
1,932
28.738462
84
py
Dcm2Bids
Dcm2Bids-master/dcm2bids/dcm2bids.py
# -*- coding: utf-8 -*- import glob import os import datetime import logging from collections import OrderedDict from .dcm2niix import Dcm2niix from .sidecarparser import Sidecarparser from .structure import Participant from .utils import ( load_json, make_directory_tree, splitext_, save_json, write_txt, read_participants, write_participants, ) from subprocess import call class Dcm2bids(object): """ """ def __init__( self, dicom_dir, config, clobber, participant, session=None, selectseries=None, outputdir=os.getcwd(), loglevel="INFO", anonymizer=None, ): self.dicom_dir = dicom_dir self.config = load_json(config) self.clobber = clobber self.extension = ".nii.gz" self.participant = Participant(participant, session) self.selectseries = selectseries self.outputdir = outputdir self.dicomdir = os.path.join(outputdir, "tmp_dcm2bids") self.anonymizer = anonymizer if not os.path.exists(self.outputdir): # need exist_ok to prevent race conditions in parallel execution os.makedirs(self.outputdir, exist_ok=True) self.derivdir = os.path.join(outputdir, "derivatives") logging.basicConfig( format="%(asctime)s %(message)s", datefmt="%Y/%m/%d %H:%M", filemode="a", filename=os.path.join(self.outputdir, "dcm2bids.log"), ) self.logger = logging.getLogger("dcm2bids") self.logger.setLevel(loglevel.upper()) self.logger.info("--- dcm2bids start ---") self.logger.info("participant: %s", participant) self.logger.info("session: %s", session) [ self.logger.info("dicom_dir: %s", os.path.realpath(thisdir)) for thisdir in dicom_dir ] self.logger.info("config: %s", os.path.realpath(config)) self.logger.info("outputdir: %s", os.path.realpath(self.outputdir)) @property def session(self): return self.participant.session @session.setter def session(self, value): self.participant.session = value def run(self): # convert dicoms to temporary dir self.logger.info("running dcm2niix DICOM to NIFTI conversion") dcm2niix = Dcm2niix(self.dicom_dir, self.participant, outputdir=self.dicomdir) dcm2niix.run() self.logger.info("parsing sidecars") parser = Sidecarparser( dcm2niix.sidecars, self.config["descriptions"], self.selectseries ) self.logger.info("moving acquisitions into BIDS output directory") for acq in parser.acquisitions: self._move(acq) self.logger.info("updating standard study files") if parser.acquisitions: self._updatestudyfiles() self.logger.info("--- dcm2bids finished without errors ---") return 0 def _move(self, acquisition): targetDir = os.path.join( self.outputdir, self.participant.directory, acquisition.dataType ) filename = "{}_{}".format(self.participant.prefix, acquisition.suffix) targetBase = os.path.join(targetDir, filename) # need to test for both because dcm2niix sometimes refuses to compress if os.path.isfile(targetBase + ".nii.gz") or os.path.isfile( targetBase + ".nii" ): if self.clobber: print("'{}' overwrites".format(filename)) for f in glob.glob(targetBase + ".*"): os.remove(f) for f in glob.glob(acquisition.base + ".*"): _, ext = splitext_(f) os.rename(f, targetBase + ext) else: print("'{}' already exists".format(filename)) return # if we make it this far, we can copy away make_directory_tree(targetDir) for f in glob.glob(acquisition.base + ".*"): _, ext = splitext_(f) if self.anonymizer and acquisition.dataType == "anat" and ".nii" in ext: # it's an anat scan - try the anonymizer command = " ".join([self.anonymizer, f, "--outfile", targetBase + ext]) self.logger.info( "anonymizing anatomical with %s: %s", self.anonymizer, targetBase + ext, ) call(command, shell=True) else: # just move os.rename(f, targetBase + ext) def _updatestudyfiles(self): if not os.path.exists(self.derivdir): os.makedirs(self.derivdir) # participant table partfile = os.path.join(self.outputdir, "participants.tsv") participants = read_participants(partfile) if not participants or not any( [part["participant_id"] == self.participant.name for part in participants] ): participants.append( OrderedDict(zip(("participant_id",), (self.participant.name,))) ) write_participants(partfile, participants) # dataset description descfile = os.path.join(self.outputdir, "dataset_description.json") if not os.path.exists(descfile): save_json( { "Name": "", "BIDSVersion": "1.1.0", "License": "", "Authors": [""], "Acknowledgments": "", "HowToAcknowledge": "", "Funding": "", "ReferencesAndLinks": [""], "DatasetDOI": "", }, descfile, ) # readme/change files readmefile = os.path.join(self.outputdir, "README") if not os.path.exists(readmefile): write_txt(readmefile) changefile = os.path.join(self.outputdir, "CHANGES") if not os.path.exists(changefile): write_txt( changefile, [ "Revision history for BIDS dataset.", "", "0.01 " + datetime.date.today().strftime("%Y-%m-%d"), "", " - Initialised study directory", ], ) # ignore file for bids-validator ignorefile = os.path.join(self.outputdir, ".bidsignore") if not os.path.exists(ignorefile): write_txt(ignorefile, ["tmp_dcm2bids/*", "dcm2bids.log"])
6,650
33.82199
87
py
Dcm2Bids
Dcm2Bids-master/dcm2bids/utils.py
# -*- coding: utf-8 -*- import json import os import shutil import csv from collections import OrderedDict import sys def load_json(filename): with open(filename, "r") as f: data = json.load(f, strict=False) return data def save_json(data, filename): with open(filename, "w") as f: json.dump(data, f, indent=4) def write_txt(filename, lines=[]): with open(filename, "a") as f: for row in lines: f.write("%s\n" % row) def write_participants(filename, participants): with open(filename, "w") as f: writer = csv.DictWriter(f, delimiter="\t", fieldnames=participants[0].keys()) writer.writeheader() writer.writerows(participants) def read_participants(filename): if not os.path.exists(filename): return [] with open(filename, "r") as f: reader = csv.reader(f, delimiter="\t") # Check for python version and use the right syntax. if sys.version_info[0] < 3: header = reader.next() else: header = next(reader) return [OrderedDict(zip(header, row)) for row in reader] def make_directory_tree(directory): if not os.path.exists(directory): os.makedirs(directory) def clean(directory): make_directory_tree(directory) if not os.listdir(directory) == []: shutil.rmtree(directory) make_directory_tree(directory) else: make_directory_tree(directory) def splitext_(path): for ext in [".nii.gz"]: if path.endswith(ext): return path[: -len(ext)], path[-len(ext) :] return os.path.splitext(path)
1,632
23.014706
85
py
Dcm2Bids
Dcm2Bids-master/dcm2bids/__init__.py
# -*- coding: utf-8 -*- __version__ = "0.4.0.1"
48
15.333333
23
py
Dcm2Bids
Dcm2Bids-master/dcm2bids/sidecarparser.py
# -*- coding: utf-8 -*- import itertools import os from collections import defaultdict, OrderedDict from future.utils import iteritems from .structure import Acquisition from .utils import load_json, save_json, splitext_ import logging class Sidecarparser(object): def __init__(self, sidecars, descriptions, selectseries=None): self.sidecars = sidecars self.descriptions = descriptions self.selectseries = selectseries self.logger = logging.getLogger("dcm2bids") self.graph = self._generateGraph() self.acquisitions = self._generateAcquisitions() self.findRuns() def _generateGraph(self): graph = OrderedDict((_, []) for _ in self.sidecars) for sidecar, index in itertools.product( self.sidecars, range(len(self.descriptions)) ): if ( self.selectseries and not self.sidecars[sidecar]["seriesnum"] in self.selectseries ): continue self.sidecars[sidecar]["header"] = load_json(sidecar) self._sidecar = self.sidecars[sidecar] if self._respect(self.descriptions[index]["criteria"]): self.logger.debug("Description %d matches sidecar %s\n", index, sidecar) graph[sidecar].append(index) if "customHeader" in self.descriptions[index]: self.sidecars[sidecar]["header"].update( self.descriptions[index]["customHeader"] ) save_json(self.sidecars[sidecar]["header"], sidecar) # attempt to correct nifti header if a field has been # changed that affects it if "RepetitionTime" in self.descriptions[index]["customHeader"]: self.logger.info( "RepetitionTime field is changed to %.2f in " "description, updating nifti header to match...", self.descriptions[index]["customHeader"]["RepetitionTime"], ) try: import nibabel # load up the nifti and re-write the header # see also: # https://groups.google.com/d/msg/bids-discussion/jPVb-4Ah29A/fB52S8ExBgAJ niftipath = splitext_(sidecar)[0] + ".nii" if not os.path.exists(niftipath): niftipath += ".gz" ni = nibabel.load(niftipath) ni.header["pixdim"][4] = self.descriptions[index][ "customHeader" ]["RepetitionTime"] # ugly but necessary because the above fix may not # always work (all method because numpy does not # work reliably with python builtin function) assert (ni.header["xyzt_units"] == 10).all(), ( "sequences with non-standard xyzt_units field " "are currently unsupported" ) ni.to_filename(niftipath) self.logger.info("updated header successfully.") except ImportError: self.logger.warning( "nibabel is unavailable, unable to " "fix nifti-sidecar mismatch" ) except: raise return graph def _generateAcquisitions(self): rsl = [] print("") for sidecar, match_descs in iteritems(self.graph): base = splitext_(sidecar)[0] basename = os.path.basename(sidecar) if len(match_descs) == 1: print("'{}' satisfies one description".format(basename)) acq = self._acquisition(base, self.descriptions[match_descs[0]]) rsl.append(acq) elif len(match_descs) == 0: print("'{}' satisfies no description - skipping".format(basename)) else: print("'{}' satisfies several descriptions - skipping".format(basename)) return rsl def findRuns(self): def list_duplicates(seq): """ http://stackoverflow.com/a/5419576 """ tally = defaultdict(list) for i, item in enumerate(seq): tally[item].append(i) return ((key, locs) for key, locs in tally.items() if len(locs) > 1) suffixes = [_.suffix for _ in self.acquisitions] for suffix, dup in sorted(list_duplicates(suffixes)): print("'{}': has several runs".format(suffix)) for run, acq_index in enumerate(dup): runStr = "run-{:02d}".format(run + 1) acq = self.acquisitions[acq_index] if acq.customLabels: acq.customLabels += "_" + runStr else: acq.customLabels = runStr print("") def _acquisition(self, base, desc): acq = Acquisition(base, desc["dataType"], desc["suffix"]) if "customLabels" in desc: acq.customLabels = desc["customLabels"] if "customHeader" in desc and "TaskName" in desc["customHeader"]: if acq.customLabels: acq.customLabels += "_task-" + desc["customHeader"]["TaskName"] else: acq.customLabels = "task-" + desc["customHeader"]["TaskName"] return acq def _respect(self, criteria): isEqual = "equal" in criteria isIn = "in" in criteria isSuff = "suffix" in criteria isSeries = "seriesnum" in criteria # Check if there is some criteria if not any([isEqual, isIn, isSuff, isSeries]): return False if isEqual: rsl_equal = self._isEqual(criteria["equal"]) else: rsl_equal = True if isIn: rsl_in = self._isIn(criteria["in"]) else: rsl_in = True if isSuff: rsl_suff = self._isFilenameSuffix(criteria["suffix"]) else: rsl_suff = True if isSeries: rsl_series = self._isFilenameSeries(criteria["seriesnum"]) else: rsl_series = True return all([rsl_equal, rsl_in, rsl_suff, rsl_series]) def _isEqual(self, criteria): rsl = [] for tag, query in iteritems(criteria): rsl.append(query == self.get_value(tag)) return all(rsl) def _isIn(self, criteria): rsl = [] for tag, query in iteritems(criteria): if isinstance(query, list): for q in query: rsl.append(q in self.get_value(tag)) else: rsl.append(query in self.get_value(tag)) return all(rsl) def _isFilenameSuffix(self, criteria): if isinstance(criteria, list): return any(self._isFilenameSuffix(crit) for crit in criteria) return self._sidecar["suffix"] == criteria def _isFilenameSeries(self, criteria): if isinstance(criteria, list): return any(self._isFilenameSeries(crit) for crit in criteria) return self._sidecar["seriesnum"] == criteria def get_value(self, tag): if tag in self._sidecar["header"]: return self._sidecar["header"][tag] else: return ""
7,753
38.969072
102
py
multimodal-vae-public
multimodal-vae-public-master/vision/sample.py
from __future__ import division from __future__ import print_function from __future__ import absolute_import import numpy as np from PIL import Image import torch import torch.nn.functional as F from torch.autograd import Variable from torchvision import transforms from torchvision.utils import save_image from train import load_checkpoint from datasets import obscure_image from datasets import add_watermark # this is the same loader used in datasets.py image_transform = transforms.Compose([transforms.Resize(64), transforms.CenterCrop(64), transforms.ToTensor()]) if __name__ == "__main__": import os import argparse parser = argparse.ArgumentParser() parser.add_argument('model_path', type=str, help='path to trained model file') parser.add_argument('--condition-file', type=str, help='if specified, condition on this image.') parser.add_argument('--condition-type', type=str, help='image|gray|edge|mask|obscured|watermark') parser.add_argument('--n-samples', type=int, default=1, help='Number of images and texts to sample [default: 1]') parser.add_argument('--cuda', action='store_true', default=False, help='enables CUDA training') args = parser.parse_args() args.cuda = args.cuda and torch.cuda.is_available() if args.condition_type: assert args.condition_type in ['image', 'gray', 'edge', 'mask', 'obscured', 'watermark'] if not os.path.isdir('./samples'): os.makedirs('./samples') model = load_checkpoint(args.model_path, use_cuda=args.cuda) model.eval() if args.cuda: model.cuda() if args.condition_file and args.condition_type: image = Image.open(args.condition_file) if args.condition_type == 'image': image = image.convert('RGB') image = image_transform(image).unsqueeze(0) save_image(image, './samples/sample_image.png') if args.cuda: image = image.cuda() image = Variable(image, volatile=True) mu, logvar = model.get_params(1, image=image) elif args.condition_type == 'gray': image = image.convert('L') image = image_transform(image).unsqueeze(0) save_image(image, './samples/sample_gray.png') if args.cuda: image = image.cuda() image = Variable(image, volatile=True) mu, logvar = model.get_params(1, gray=image) elif args.condition_type == 'edge': image = image.convert('L') image = image_transform(image).unsqueeze(0) save_image(image, './samples/sample_edge.png') if args.cuda: image = image.cuda() image = Variable(image, volatile=True) mu, logvar = model.get_params(1, edge=image) elif args.condition_type == 'mask': image = image.convert('L') image = 1 - image_transform(image).unsqueeze(0) save_image(image, './samples/sample_mask.png') if args.cuda: image = image.cuda() image = Variable(image, volatile=True) mu, logvar = model.get_params(1, mask=image) elif args.condition_type == 'obscured': image = image.convert('RGB') image = obscure_image(image) image = image_transform(image).unsqueeze(0) save_image(image, './samples/sample_obscured.png') if args.cuda: image = image.cuda() image = Variable(image, volatile=True) mu, logvar = model.get_params(1, obscured=image) elif args.condition_type == 'watermark': image = image.convert('RGB') image = add_watermark(image) image = image_transform(image).unsqueeze(0) save_image(image, './samples/sample_watermark.png') if args.cuda: image = image.cuda() image = Variable(image, volatile=True) mu, logvar = model.get_params(1, watermark=image) std = logvar.mul(0.5).exp_() else: # sample from uniform Gaussian prior mu = Variable(torch.Tensor([0])) std = Variable(torch.Tensor([1])) if args.cuda: mu = mu.cuda() std = std.cuda() # sample from uniform gaussian sample = Variable(torch.randn(args.n_samples, model.n_latents)) if args.cuda: sample = sample.cuda() # sample from particular gaussian by multiplying + adding mu = mu.expand_as(sample) std = std.expand_as(sample) sample = sample.mul(std).add_(mu) # generate image and text image_recon = F.sigmoid(model.image_decoder(sample)).cpu().data gray_recon = F.sigmoid(model.gray_decoder(sample)).cpu().data edge_recon = F.sigmoid(model.edge_decoder(sample)).cpu().data mask_recon = F.sigmoid(model.mask_decoder(sample)).cpu().data obscured_recon = F.sigmoid(model.obscured_decoder(sample)).cpu().data watermark_recon = F.sigmoid(model.watermark_decoder(sample)).cpu().data # save image samples to filesystem save_image(image_recon, './samples/sample_image.png') save_image(gray_recon, './samples/sample_gray.png') save_image(edge_recon, './samples/sample_edge.png') save_image(mask_recon, './samples/sample_mask.png') save_image(rotated_recon, './samples/sample_rotated.png') save_image(obscured_recon, './samples/sample_obscured.png') save_image(watermark_recon, './samples/sample_watermark.png')
5,676
40.437956
96
py
multimodal-vae-public
multimodal-vae-public-master/vision/setup.py
"""Grayscale, edge detection, and facial landmarks are pre-computed prior to training. Obscuring and watermarks are done in-place in datasets.py. >>> python setup.py grayscale ./data/images ./data/grayscale >>> python setup.py edge ./data/images ./data/edge >>> python setup.py mask ./data/images ./data/mask """ from __future__ import division from __future__ import print_function from __future__ import absolute_import import os import cv2 import dlib import random import numpy as np from PIL import Image from PIL import ImageFilter from skimage import feature from imutils import face_utils from collections import OrderedDict # define a dictionary that maps the indexes of the facial # landmarks to specific face regions FACIAL_LANDMARKS_IDXS = OrderedDict([ ("mouth", (48, 68)), ("right_eyebrow", (17, 22)), ("left_eyebrow", (22, 27)), ("right_eye", (36, 42)), ("left_eye", (42, 48)), ("nose", (27, 35)), ("jaw", (0, 17)) ]) def build_grayscale_dataset(in_dir, out_dir): """Generate a dataset of grayscale images. @param in_dir: string input directory of images. @param out_dir: string output directory of images. """ image_paths = os.listdir(in_dir) n_images = len(image_paths) for i, image_path in enumerate(image_paths): print('Building grayscale dataset: [%d/%d] images.' % (i + 1, n_images)) image_full_path = os.path.join(in_dir, image_path) image = Image.open(image_full_path) image = image.convert('RGB').convert('L') image.save(os.path.join(out_dir, image_path)) def build_edge_dataset(in_dir, out_dir, sigma=3): """Generate a dataset of (canny) edge-detected images. @param in_dir: string input directory of images. @param out_dir: string output directory of images. @param sigma: float (default: 3) smoothness for edge detection. """ image_paths = os.listdir(in_dir) n_images = len(image_paths) for i, image_path in enumerate(image_paths): print('Building edge-detected dataset: [%d/%d] images.' % (i + 1, n_images)) image_full_path = os.path.join(in_dir, image_path) image = Image.open(image_full_path).convert('L') image_npy = np.asarray(image).astype(np.float) / 255. image_npy = feature.canny(image_npy, sigma=sigma) image_npy = image_npy.astype(np.uint8) * 255 image = Image.fromarray(image_npy) image.save(os.path.join(out_dir, image_path)) def build_mask_dataset(in_dir, out_dir, model_path): """Generate a dataset of segmentation masks from images. @param in_dir: string input directory of images. @param out_dir: string output directory of images. @param model_path: string path to HOG model for facial features. """ # initialize dlib's face detector (HOG-based) and then create # the facial landmark predictor detector = dlib.get_frontal_face_detector() predictor = dlib.shape_predictor(model_path) image_paths = os.listdir(in_dir) n_images = len(image_paths) for i, image_path in enumerate(image_paths): print('Building face-mask dataset: [%d/%d] images.' % (i + 1, n_images)) image_full_path = os.path.join(in_dir, image_path) image = cv2.imread(image_full_path) gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) # detect faces in the grayscale image rects = detector(gray, 1) try: rect = rects[0] # we are only going to use the first one # determine the facial landmarks for the face region, then # convert the landmark (x, y)-coordinates to a NumPy array shape = predictor(gray, rect) shape = face_utils.shape_to_np(shape) output = visualize_facial_landmarks(image, shape) cv2.imwrite(os.path.join(out_dir, image_path), output) except: # if for some reason no bounding box is found, send blank. output = np.ones_like(image) * 255 cv2.imwrite(os.path.join(out_dir, image_path), output) def visualize_facial_landmarks(image, shape, colors=None): # create two copies of the input image -- one for the # overlay and one for the final output image overlay = np.ones_like(image) * 255 # loop over the facial landmark regions individually for (i, name) in enumerate(FACIAL_LANDMARKS_IDXS.keys()): # grab the (x, y)-coordinates associated with the # face landmark (j, k) = FACIAL_LANDMARKS_IDXS[name] pts = shape[j:k] # check if are supposed to draw the jawline if name == "jaw": # since the jawline is a non-enclosed facial region, # just draw lines between the (x, y)-coordinates for l in range(1, len(pts)): ptA = tuple(pts[l - 1]) ptB = tuple(pts[l]) cv2.line(overlay, ptA, ptB, (0, 0, 0), 2) # otherwise, compute the convex hull of the facial # landmark coordinates points and display it else: hull = cv2.convexHull(pts) cv2.drawContours(overlay, [hull], -1, (0, 0, 0), -1) return overlay if __name__ == '__main__': import argparse parser = argparse.ArgumentParser() parser.add_argument('type', type=str, help='grayscale||edge|mask') parser.add_argument('in_dir', type=str, help='where images are located') parser.add_argument('out_dir', type=str, help='where images are to be saved') args = parser.parse_args() if args.type == 'grayscale': build_grayscale_dataset(args.in_dir, args.out_dir) elif args.type == 'edge': build_edge_dataset(args.in_dir, args.out_dir, sigma=2) elif args.type == 'mask': build_mask_dataset(args.in_dir, args.out_dir, './data/shape_predictor_68_face_landmarks.dat')
6,049
35.666667
84
py
multimodal-vae-public
multimodal-vae-public-master/vision/model.py
from __future__ import division from __future__ import print_function from __future__ import absolute_import import sys import torch import torch.nn as nn from torch.autograd import Variable from torch.nn import functional as F class MVAE(nn.Module): def __init__(self, n_latents=250, use_cuda=False): super(MVAE, self).__init__() # define q(z|x_i) for i = 1...6 self.image_encoder = ImageEncoder(n_latents, 3) self.gray_encoder = ImageEncoder(n_latents, 1) self.edge_encoder = ImageEncoder(n_latents, 1) self.mask_encoder = ImageEncoder(n_latents, 1) self.obscured_encoder = ImageEncoder(n_latents, 3) self.watermark_encoder = ImageEncoder(n_latents, 3) # define p(x_i|z) for i = 1...6 self.image_decoder = ImageDecoder(n_latents, 3) self.gray_decoder = ImageDecoder(n_latents, 1) self.edge_decoder = ImageDecoder(n_latents, 1) self.mask_decoder = ImageDecoder(n_latents, 1) self.obscured_decoder = ImageDecoder(n_latents, 3) self.watermark_decoder = ImageDecoder(n_latents, 3) # define q(z|x) = q(z|x_1)...q(z|x_6) self.experts = ProductOfExperts() self.n_latents = n_latents self.use_cuda = use_cuda` def reparametrize(self, mu, logvar): if self.training: std = logvar.mul(0.5).exp_() eps = Variable(std.data.new(std.size()).normal_()) return eps.mul(std).add_(mu) else: # return mean during inference return mu def forward(self, image=None, gray=None, edge=None, mask=None, obscured=None, watermark=None): mu, logvar = self.get_params(image=image, gray=gray, edge=edge, mask=mask, obscured=obscured, watermark=watermark) # reparametrization trick to sample z = self.reparametrize(mu, logvar) # reconstruct inputs based on sample image_recon = self.image_decoder(z) gray_recon = self.gray_decoder(z) edge_recon = self.edge_decoder(z) mask_recon = self.mask_decoder(z) obscured_recon = self.obscured_decoder(z) watermark_recon = self.watermark_decoder(z) return (image_recon, gray_recon, edge_recon, mask_recon, rotated_recon, obscured_recon, mu, logvar) def get_params(self, image=None, gray=None, edge=None, mask=None, obscured=None, watermark=None): # define universal expert batch_size = get_batch_size(image, gray, edge, mask, obscured, watermark) use_cuda = next(self.parameters()).is_cuda # check if CUDA # initialize the universal prior expert mu, logvar = prior_expert((1, batch_size, self.n_latents), use_cuda=use_cuda) if image is not None: image_mu, image_logvar = self.image_encoder(image) mu = torch.cat((mu, image_mu.unsqueeze(0)), dim=0) logvar = torch.cat((logvar, image_logvar.unsqueeze(0)), dim=0) if gray is not None: gray_mu, gray_logvar = self.gray_encoder(gray) mu = torch.cat((mu, gray_mu.unsqueeze(0)), dim=0) logvar = torch.cat((logvar, gray_logvar.unsqueeze(0)), dim=0) if edge is not None: edge_mu, edge_logvar = self.edge_encoder(edge) mu = torch.cat((mu, edge_mu.unsqueeze(0)), dim=0) logvar = torch.cat((logvar, edge_logvar.unsqueeze(0)), dim=0) if mask is not None: mask_mu, mask_logvar = self.mask_encoder(mask) mu = torch.cat((mu, mask_mu.unsqueeze(0)), dim=0) logvar = torch.cat((logvar, mask_logvar.unsqueeze(0)), dim=0) if obscured is not None: obscured_mu, obscured_logvar = self.obscured_encoder(obscured) mu = torch.cat((mu, obscured_mu.unsqueeze(0)), dim=0) logvar = torch.cat((logvar, obscured_logvar.unsqueeze(0)), dim=0) if watermark is not None: watermark_mu, watermark_logvar = self.watermark_encoder(watermark) mu = torch.cat((mu, watermark_mu.unsqueeze(0)), dim=0) logvar = torch.cat((logvar, watermark_logvar.unsqueeze(0)), dim=0) # product of experts to combine gaussians mu, logvar = self.experts(mu, logvar) return mu, logvar def get_batch_size(*args): for arg in args: if arg is not None: return arg.size(0) class ImageEncoder(nn.Module): """Parametrizes q(z|x). We will use this for every q(z|x_i) for all i. @param n_latents: integer number of latent dimensions @param n_channels: integer [default: 3] number of input channels """ def __init__(self, n_latents, n_channels=3): super(ImageEncoder, self).__init__() self.features = nn.Sequential( nn.Conv2d(n_channels, 32, 4, 2, 1, bias=False), Swish(), nn.Conv2d(32, 64, 4, 2, 1, bias=False), nn.BatchNorm2d(64), Swish(), nn.Conv2d(64, 128, 4, 2, 1, bias=False), nn.BatchNorm2d(128), Swish(), nn.Conv2d(128, 256, 4, 1, 0, bias=False), nn.BatchNorm2d(256), Swish()) self.classifier = nn.Sequential( nn.Linear(256 * 5 * 5, 512), Swish(), nn.Dropout(p=0.1), nn.Linear(512, n_latents * 2)) self.n_latents = n_latents def forward(self, x): n_latents = self.n_latents x = self.features(x) x = x.view(-1, 256 * 5 * 5) x = self.classifier(x) return x[:, :n_latents], x[:, n_latents:] class ImageDecoder(nn.Module): """Parametrizes p(x|z). We will use this for every p(x_i|z) for all i. @param n_latents: integer number of latent dimensions @param n_channels: integer [default: 3] number of input channels """ def __init__(self, n_latents, n_channels=3): super(ImageDecoder, self).__init__() self.upsample = nn.Sequential( nn.Linear(n_latents, 256 * 5 * 5), Swish()) self.hallucinate = nn.Sequential( nn.ConvTranspose2d(256, 128, 4, 1, 0, bias=False), nn.BatchNorm2d(128), Swish(), nn.ConvTranspose2d(128, 64, 4, 2, 1, bias=False), nn.BatchNorm2d(64), Swish(), nn.ConvTranspose2d(64, 32, 4, 2, 1, bias=False), nn.BatchNorm2d(32), Swish(), nn.ConvTranspose2d(32, n_channels, 4, 2, 1, bias=False)) def forward(self, z): # the input will be a vector of size |n_latents| z = self.upsample(z) z = z.view(-1, 256, 5, 5) z = self.hallucinate(z) return z # no sigmoid! class ProductOfExperts(nn.Module): """Return parameters for product of independent experts. See https://arxiv.org/pdf/1410.7827.pdf for equations. @param mu: M x D for M experts @param logvar: M x D for M experts """ def forward(self, mu, logvar, eps=1e-8): var = torch.exp(logvar) + eps T = 1 / (var + eps) # precision of i-th Gaussian expert at point x pd_mu = torch.sum(mu * T, dim=0) / torch.sum(T, dim=0) pd_var = 1 / torch.sum(T, dim=0) pd_logvar = torch.log(pd_var + eps) return pd_mu, pd_logvar class Swish(nn.Module): """https://arxiv.org/abs/1710.05941""" def forward(self, x): return x * F.sigmoid(x) def prior_expert(size, use_cuda=False): """Universal prior expert. Here we use a spherical Gaussian: N(0, 1). @param size: integer dimensionality of Gaussian @param use_cuda: boolean [default: False] cast CUDA on variables """ mu = Variable(torch.zeros(size)) logvar = Variable(torch.log(torch.ones(size))) if use_cuda: mu, logvar = mu.cuda(), logvar.cuda() return mu, logvar
8,131
36.13242
82
py
multimodal-vae-public
multimodal-vae-public-master/vision/datasets.py
from __future__ import division from __future__ import print_function from __future__ import absolute_import import os import random import numpy as np from copy import deepcopy from PIL import Image import torch from torch.utils.data.dataset import Dataset from torchvision import transforms N_MODALITIES = 6 VALID_PARTITIONS = {'train': 0, 'val': 1, 'test': 2} class CelebVision(Dataset): """Define dataset of images of celebrities with a series of transformations applied to it. The user needs to have pre-defined the Anno and Eval folder from http://mmlab.ie.cuhk.edu.hk/projects/CelebA.html @param partition: string train|val|test [default: train] See VALID_PARTITIONS global variable. @param data_dir: string path to root of dataset images [default: ./data] """ def __init__(self, partition='train', data_dir='./data'): super(CelebVision, self).__init__() self.partition = partition self.data_dir = data_dir assert partition in VALID_PARTITIONS.keys() # load a list of images for the user-chosen partition self.image_paths = load_eval_partition(partition, data_dir=data_dir) self.size = int(len(self.image_paths)) # resize image to 64 x 64 self.image_transform = transforms.Compose([transforms.Resize(64), transforms.CenterCrop(64), transforms.ToTensor()]) def __getitem__(self, index): """ Args: index (int): Index Returns: tuple: (image, target) where target is index of the target class. """ image_path = os.path.join(self.data_dir, 'img_align_celeba', self.image_paths[index]) gray_path = os.path.join(self.data_dir, 'img_align_celeba_grayscale', self.image_paths[index]) edge_path = os.path.join(self.data_dir, 'img_align_celeba_edge', self.image_paths[index]) mask_path = os.path.join(self.data_dir, 'img_align_celeba_mask', self.image_paths[index]) # open PIL Image -- these are fixed versions of image that we save image = Image.open(image_path).convert('RGB') gray_image = Image.open(gray_path).convert('L') edge_image = Image.open(edge_path).convert('L') mask_image = Image.open(mask_path).convert('L') # add blocked to image obscured_image = Image.open(image_path).convert('RGB') obscured_image = obscure_image(obscured_image) # add watermark to image watermark_image = Image.open(image_path).convert('RGB') watermark_image = add_watermark(obscured_image, watermark_path='./watermark.png') image = self.image_transform(image) gray_image = self.image_transform(grayscale_image) edge_image = self.image_transform(edge_image) mask_image = self.image_transform(mask_image) obscured_image = self.image_transform(obscured_image) watermark_image = self.image_transform(watermark_image) # masks are normally white with black lines but we want to # be consistent with edges and MNIST-stuff, we so make the background # black and the lines white. mask_image = 1 - mask_image # return everything as a bundle return (image, grayscale_image, edge_image, mask_image, obscured_image, watermark_image) def __len__(self): return self.size def obscure_image(image): """Block image vertically in half with black pixels. @param image: np.array color image @return: np.array color image with vertically blocked pixels """ image_npy = deepcopy(np.asarray(image)) # we obscure half height because should be easier to complete # a face given vertical half than horizontal half center_h = image_npy.shape[1] // 2 image_npy[:, center_h + 1:, :] = 0 image = Image.fromarray(image_npy) return image def add_watermark(image, watermark_path='./watermark.png'): """Overlay image of watermark on color image. @param image: np.array color image @param watermark_path: string path to fixed watermark image [default: ./watermark.png] @return: np.array color image with overlayed watermark """ watermark = Image.open(watermark_path) nw, nh = image.size[0], image.size[1] watermark = watermark.resize((nw, nh), Image.BICUBIC) image.paste(watermark, (0, 0), watermark) return image
4,896
36.669231
78
py
multimodal-vae-public
multimodal-vae-public-master/vision/train.py
from __future__ import division from __future__ import print_function from __future__ import absolute_import import os import sys import shutil import torch import torch.nn as nn import torch.optim as optim import torch.nn.functional as F from torch.autograd import Variable from torchvision.utils import save_image from model import MVAE from datasets import N_MODALITIES def elbo_loss(recon_image, image, recon_gray, gray, recon_edge, edge, recon_mask, mask, recon_rotated, rotated, recon_obscured, obscured, mu, logvar, annealing_factor=1.): BCE = 0 if recon_image is not None and image is not None: recon_image, image = recon_image.view(-1, 3 * 64 * 64), image.view(-1, 3 * 64 * 64) image_BCE = torch.sum(binary_cross_entropy_with_logits(recon_image, image), dim=1) BCE += image_BCE if recon_gray is not None and gray is not None: recon_gray, gray = recon_gray.view(-1, 1 * 64 * 64), gray.view(-1, 1 * 64 * 64) gray_BCE = torch.sum(binary_cross_entropy_with_logits(recon_gray, gray), dim=1) BCE += gray_BCE if recon_edge is not None and edge is not None: recon_edge, edge = recon_edge.view(-1, 1 * 64 * 64), edge.view(-1, 1 * 64 * 64) edge_BCE = torch.sum(binary_cross_entropy_with_logits(recon_edge, edge), dim=1) BCE += edge_BCE if recon_mask is not None and mask is not None: recon_mask, mask = recon_mask.view(-1, 1 * 64 * 64), mask.view(-1, 1 * 64 * 64) mask_BCE = torch.sum(binary_cross_entropy_with_logits(recon_mask, mask), dim=1) BCE += mask_BCE if recon_obscured is not None and obscured is not None: recon_obscured, obscured = recon_obscured.view(-1, 3 * 64 * 64), obscured.view(-1, 3 * 64 * 64) obscured_BCE = torch.sum(binary_cross_entropy_with_logits(recon_obscured, obscured), dim=1) BCE += obscured_BCE if recon_watermark is not None and watermark is not None: recon_watermark, watermark = recon_watermark.view(-1, 3 * 64 * 64), watermark.view(-1, 3 * 64 * 64) watermark_BCE = torch.sum(binary_cross_entropy_with_logits(recon_watermark, watermark), dim=1) BCE += watermark_BCE # Kingma and Welling. Auto-Encoding Variational Bayes. ICLR, 2014 # https://arxiv.org/abs/1312.6114 KLD = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp(), dim=1) # NOTE: we use lambda_i = 1 for all i since each modality is roughly equal ELBO = torch.mean(BCE / float(N_MODALITIES) + annealing_factor * KLD) return ELBO def binary_cross_entropy_with_logits(input, target): """Sigmoid Activation + Binary Cross Entropy @param input: torch.Tensor (size N) @param target: torch.Tensor (size N) @return loss: torch.Tensor (size N) """ if not (target.size() == input.size()): raise ValueError("Target size ({}) must be the same as input size ({})".format( target.size(), input.size())) return (torch.clamp(input, 0) - input * target + torch.log(1 + torch.exp(-torch.abs(input)))) class AverageMeter(object): """Computes and stores the average and current value""" def __init__(self): self.reset() def reset(self): self.val = 0 self.avg = 0 self.sum = 0 self.count = 0 def update(self, val, n=1): self.val = val self.sum += val * n self.count += n self.avg = self.sum / self.count def save_checkpoint(state, is_best, folder='./', filename='checkpoint.pth.tar'): if not os.path.isdir(folder): os.mkdir(folder) torch.save(state, os.path.join(folder, filename)) if is_best: shutil.copyfile(os.path.join(folder, filename), os.path.join(folder, 'model_best.pth.tar')) def load_checkpoint(file_path, use_cuda=False): checkpoint = torch.load(file_path) if use_cuda else \ torch.load(file_path, map_location=lambda storage, location: storage) model = MVAE(checkpoint['n_latents']) model.load_state_dict(checkpoint['state_dict']) return model if __name__ == "__main__": import argparse parser = argparse.ArgumentParser() parser.add_argument('--n-latents', type=int, default=250, help='size of the latent embedding (default: 250)') parser.add_argument('--batch-size', type=int, default=50, metavar='N', help='input batch size for training (default: 50)') parser.add_argument('--epochs', type=int, default=100, metavar='N', help='number of epochs to train (default: 100)') parser.add_argument('--annealing-epochs', type=int, default=20, metavar='N', help='number of epochs to anneal KL for [default: 20]') parser.add_argument('--lr', type=float, default=1e-4, metavar='LR', help='learning rate (default: 1e-4)') parser.add_argument('--log-interval', type=int, default=10, metavar='N', help='how many batches to wait before logging training status') parser.add_argument('--cuda', action='store_true', default=False, help='enables CUDA training') args = parser.parse_args() args.cuda = args.cuda and torch.cuda.is_available() new_directories = ['./results', './results/image', './results/gray', './results/edge', './results/mask', './results/obscured', './results/watermark'] for new_dir in new_directories: if not os.path.isdir(new_dir): os.makedirs(new_dir) train_loader = torch.utils.data.DataLoader( datasets.CelebVision(partition='train', data_dir='./data'), batch_size=args.batch_size, shuffle=True) N_mini_batches = len(train_loader) test_loader = torch.utils.data.DataLoader( datasets.CelebVision(partition='val', data_dir='./data'), batch_size=args.batch_size, shuffle=False) model = MVAE(args.n_latents, use_cuda=args.cuda) optimizer = optim.Adam(model.parameters(), lr=args.lr) if args.cuda: model.cuda() def train(epoch): model.train() train_loss_meter = AverageMeter() for batch_idx, (image, gray_image, edge_image, mask_image, rotated_image, obscured_image) in enumerate(train_loader): if epoch < args.annealing_epochs: # compute the KL annealing factor for the current mini-batch in the current epoch annealing_factor = (float(batch_idx + (epoch - 1) * N_mini_batches + 1) / float(args.annealing_epochs * N_mini_batches)) else: # by default the KL annealing factor is unity annealing_factor = 1.0 if args.cuda: image = image.cuda() gray_image = gray_image.cuda() edge_image = edge_image.cuda() mask_image = mask_image.cuda() obscured_image = obscured_image.cuda() watermark_image = watermark_image.cuda() image = Variable(image) gray_image = Variable(gray_image) edge_image = Variable(edge_image) mask_image = Variable(mask_image) obscured_image = Variable(obscured_image) watermark_image = Variable(watermark_image) batch_size = image.size(0) # refresh the optimizer optimizer.zero_grad() # compute reconstructions using all the modalities (joint_recon_image, joint_recon_gray, joint_recon_edge, joint_recon_mask, joint_recon_obscured, joint_recon_watermark, joint_mu, joint_logvar) = model(image, gray_image, edge_image, mask_image, obscured_image, watermark_image) # compute reconstructions using each of the individual modalities (image_recon_image, image_recon_gray, image_recon_edge, image_recon_mask, image_recon_obscured, image_recon_watermark, image_mu, image_logvar) = model(image=image) (gray_recon_image, gray_recon_gray, gray_recon_edge, gray_recon_mask, gray_recon_obscured, gray_recon_watermark, gray_mu, gray_logvar) = model(gray=gray_image) (edge_recon_image, edge_recon_gray, edge_recon_edge, edge_recon_mask, edge_recon_obscured, edge_recon_watermark, edge_mu, edge_logvar) = model(edge=edge_image) (mask_recon_image, mask_recon_gray, mask_recon_edge, mask_recon_mask, mask_recon_obscured, mask_recon_watermark, mask_mu, mask_logvar) = model(mask=mask_image) (obscured_recon_image, obscured_recon_gray, obscured_recon_edge, obscured_recon_mask, obscured_recon_obscured, obscured_recon_watermark, obscured_mu, obscured_logvar) = model(obscured=obscured_image) (watermark_recon_image, watermark_recon_gray, watermark_recon_edge, watermark_recon_mask, watermark_recon_obscured, watermark_recon_watermark, watermark_mu, watermark_logvar) = model(watermark=watermark_image) # compute joint loss joint_train_loss = elbo_loss(joint_recon_image, image, joint_recon_gray, gray_image, joint_recon_edge, edge_image, joint_recon_mask, mask_image, joint_recon_obscured, obscured_image, joint_recon_watermark, watermark_image, joint_mu, joint_logvar, annealing_factor=annealing_factor) # compute loss with unimodal inputs image_train_loss = elbo_loss(image_recon_image, image, image_recon_gray, gray_image, image_recon_edge, edge_image, image_recon_mask, mask_image, image_recon_obscured, obscured_image, image_recon_watermark, watermark_image, image_mu, image_logvar, annealing_factor=annealing_factor) gray_train_loss = elbo_loss(gray_recon_image, image, gray_recon_gray, gray_image, gray_recon_edge, edge_image, gray_recon_mask, mask_image, gray_recon_obscured, obscured_image, gray_recon_watermark, watermark_image, gray_mu, joint_logvar, annealing_factor=annealing_factor) edge_train_loss = elbo_loss(edge_recon_image, image, edge_recon_gray, gray_image, edge_recon_edge, edge_image, edge_recon_mask, mask_image, edge_recon_obscured, obscured_image, edge_recon_watermark, watermark_image, edge_mu, edge_logvar, annealing_factor=annealing_factor) mask_train_loss = elbo_loss(mask_recon_image, image, mask_recon_gray, gray_image, mask_recon_edge, edge_image, mask_recon_mask, mask_image, mask_recon_obscured, obscured_image, mask_recon_watermark, watermark_image, mask_mu, mask_logvar, annealing_factor=annealing_factor) obscured_train_loss = elbo_loss(obscured_recon_image, image, obscured_recon_gray, gray_image, obscured_recon_edge, edge_image, obscured_recon_mask, mask_image, obscured_recon_obscured, obscured_image, obscured_recon_watermark, watermark_image, obscured_mu, obscured_logvar, annealing_factor=annealing_factor) watermark_train_loss = elbo_loss(watermark_recon_image, image, watermark_recon_gray, gray_image, watermark_recon_edge, edge_image, watermark_recon_mask, mask_image, watermark_recon_obscured, obscured_image, watermark_recon_watermark, watermark_image, watermark_mu, watermark_logvar, annealing_factor=annealing_factor) train_loss = joint_train_loss + image_train_loss + gray_train_loss \ + edge_train_loss + mask_train_loss + obscured_train_loss \ + watermark_train_loss train_loss_meter.update(train_loss.data[0], len(image)) # compute and take gradient step train_loss.backward() optimizer.step() if batch_idx % args.log_interval == 0: print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}\tAnnealing Factor: {:.3f}'.format( epoch, batch_idx * len(image), len(train_loader.dataset), 100. * batch_idx / len(train_loader), train_loss_meter.avg, annealing_factor)) print('====> Epoch: {}\tLoss: {:.4f}'.format(epoch, train_loss_meter.avg)) def test(epoch): model.eval() test_loss = 0 pbar = tqdm(total=len(test_loader)) for batch_idx, (image, gray_image, edge_image, mask_image, obscured_image, watermark_image) in enumerate(test_loader): if args.cuda: image = image.cuda() gray_image = gray_image.cuda() edge_image = edge_image.cuda() mask_image = mask_image.cuda() obscured_image = obscured_image.cuda() watermark_image = watermark_image.cuda() image = Variable(image) gray_image = Variable(gray_image) edge_image = Variable(edge_image) mask_image = Variable(mask_image) obscured_image = Variable(obscured_image) watermark_image = Variable(watermark_image) batch_size = image.size(0) # for ease, only compute the joint loss (joint_recon_image, joint_recon_gray, joint_recon_edge, joint_recon_mask, joint_recon_obscured, joint_recon_watermark, joint_mu, joint_logvar) = model(batch_size, image, gray_image, edge_image, mask_image, obscured_image, watermark_image) test_loss += loss_function(joint_recon_image, image, joint_recon_gray, gray_image, joint_recon_edge, edge_image, joint_recon_mask, mask_image, joint_recon_obscured, obscured_image, joint_recon_watermark, watermark_image, joint_mu, joint_logvar).data[0] if batch_idx == 0: # from time to time, plot the reconstructions to see how well the model is learning n = min(batch_size, 8) image_comparison = torch.cat( [image[:n], F.sigmoid(joint_recon_image).view(args.batch_size, 3, 64, 64)[:n]]) gray_comparison = torch.cat([ gray_image[:n], F.sigmoid(joint_recon_gray).view(args.batch_size, 1, 64, 64)[:n]]) edge_comparison = torch.cat([ edge_image[:n], F.sigmoid(joint_recon_edge).view(args.batch_size, 1, 64, 64)[:n]]) mask_comparison = torch.cat([ mask_image[:n], F.sigmoid(joint_recon_mask).view(args.batch_size, 1, 64, 64)[:n]]) obscured_comparison = torch.cat([ obscured_image[:n], F.sigmoid(joint_recon_obscured).view(args.batch_size, 3, 64, 64)[:n]]) watermark_comparison = torch.cat([ watermark_image[:n], F.sigmoid(joint_recon_watermark).view(args.batch_size, 3, 64, 64)[:n]]) # save these reconstructions save_image(image_comparison.data.cpu(), './results/image/reconstruction_%d.png' % epoch, nrow=n) save_image(gray_comparison.data.cpu(), './results/gray/reconstruction_%d.png' % epoch, nrow=n) save_image(edge_comparison.data.cpu(), './results/edge/reconstruction_%d.png' % epoch, nrow=n) save_image(mask_comparison.data.cpu(), './results/mask/reconstruction_%d.png' % epoch, nrow=n) save_image(obscured_comparison.data.cpu(), './results/obscured/reconstruction_%d.png' % epoch, nrow=n) save_image(watermark_comparison.data.cpu(), './results/watermark/reconstruction_%d.png' % epoch, nrow=n) pbar.update() pbar.close() test_loss /= len(test_loader) print('====> Test Loss: {:.4f}'.format(test_loss)) return test_loss best_loss = sys.maxint for epoch in range(1, args.epochs + 1): train(epoch) loss = test(epoch) is_best = loss < best_loss best_loss = min(loss, best_loss) # save the best model and current model save_checkpoint({ 'state_dict': model.state_dict(), 'best_loss': best_loss, 'n_latents': args.n_latents, 'optimizer' : optimizer.state_dict(), }, is_best, folder='./trained_models')
19,025
47.659847
107
py
multimodal-vae-public
multimodal-vae-public-master/mnist/sample.py
from __future__ import division from __future__ import print_function from __future__ import absolute_import import sys import numpy as np import torch import torch.nn.functional as F from torch.autograd import Variable from torchvision import datasets, transforms from torchvision.utils import save_image from train import load_checkpoint def fetch_mnist_image(label): """Return a random image from the MNIST dataset with label. @param label: integer a integer from 0 to 9 @return: torch.autograd.Variable MNIST image """ mnist_dataset = datasets.MNIST('./data', train=False, download=True, transform=transforms.ToTensor()) images = mnist_dataset.test_data.numpy() labels = mnist_dataset.test_labels.numpy() images = images[labels == label] image = images[np.random.choice(np.arange(images.shape[0]))] image = torch.from_numpy(image).float() image = image.unsqueeze(0) return Variable(image, volatile=True) def fetch_mnist_text(label): """Randomly generate a number from 0 to 9. @param label: integer a integer from 0 to 9 @return: torch.autograd.Variable Variable wrapped around an integer. """ text = torch.LongTensor([label]) return Variable(text, volatile=True) if __name__ == "__main__": import os import argparse parser = argparse.ArgumentParser() parser.add_argument('model_path', type=str, help='path to trained model file') parser.add_argument('--n-samples', type=int, default=64, help='Number of images and texts to sample [default: 64]') # condition sampling on a particular images parser.add_argument('--condition-on-image', type=int, default=None, help='If True, generate text conditioned on an image.') # condition sampling on a particular text parser.add_argument('--condition-on-text', type=int, default=None, help='If True, generate images conditioned on a text.') parser.add_argument('--cuda', action='store_true', default=False, help='enables CUDA training') args = parser.parse_args() args.cuda = args.cuda and torch.cuda.is_available() model = load_checkpoint(args.model_path, use_cuda=args.cuda) model.eval() if args.cuda: model.cuda() # mode 1: unconditional generation if not args.condition_on_image and not args.condition_on_text: mu = Variable(torch.Tensor([0])) std = Variable(torch.Tensor([1])) if args.cuda: mu = mu.cuda() std = std.cuda() # mode 2: generate conditioned on image elif args.condition_on_image and not args.condition_on_text: image = fetch_mnist_image(args.condition_on_image) if args.cuda: image = image.cuda() mu, logvar = model.infer(image=image) std = logvar.mul(0.5).exp_() # mode 3: generate conditioned on text elif args.condition_on_text and not args.condition_on_image: text = fetch_mnist_text(args.condition_on_text) if args.cuda: text = text.cuda() mu, logvar = model.infer(text=text) std = logvar.mul(0.5).exp_() # mode 4: generate conditioned on image and text elif args.condition_on_text and args.condition_on_image: image = fetch_mnist_image(args.condition_on_image) text = fetch_mnist_text(args.condition_on_text) if args.cuda: image = image.cuda() text = text.cuda() mu, logvar = model.infer(image=image, text=text) std = logvar.mul(0.5).exp_() # sample from uniform gaussian sample = Variable(torch.randn(args.n_samples, model.n_latents)) if args.cuda: sample = sample.cuda() # sample from particular gaussian by multiplying + adding mu = mu.expand_as(sample) std = std.expand_as(sample) sample = sample.mul(std).add_(mu) # generate image and text img_recon = F.sigmoid(model.image_decoder(sample)).cpu().data txt_recon = F.log_softmax(model.text_decoder(sample), dim=1).cpu().data # save image samples to filesystem save_image(img_recon.view(args.n_samples, 1, 28, 28), './sample_image.png') # save text samples to filesystem with open('./sample_text.txt', 'w') as fp: txt_recon_np = txt_recon.numpy() txt_recon_np = np.argmax(txt_recon_np, axis=1).tolist() for i, item in enumerate(txt_recon_np): fp.write('Text (%d): %s\n' % (i, item))
4,692
37.154472
82
py
multimodal-vae-public
multimodal-vae-public-master/mnist/model.py
from __future__ import division from __future__ import print_function from __future__ import absolute_import import numpy as np import torch import torch.nn as nn from torch.autograd import Variable from torch.nn import functional as F from torch.nn.parameter import Parameter class MVAE(nn.Module): """Multimodal Variational Autoencoder. @param n_latents: integer number of latent dimensions """ def __init__(self, n_latents): super(MVAE, self).__init__() self.image_encoder = ImageEncoder(n_latents) self.image_decoder = ImageDecoder(n_latents) self.text_encoder = TextEncoder(n_latents) self.text_decoder = TextDecoder(n_latents) self.experts = ProductOfExperts() self.n_latents = n_latents def reparametrize(self, mu, logvar): if self.training: std = logvar.mul(0.5).exp_() eps = Variable(std.data.new(std.size()).normal_()) return eps.mul(std).add_(mu) else: return mu def forward(self, image=None, text=None): mu, logvar = self.infer(image, text) # reparametrization trick to sample z = self.reparametrize(mu, logvar) # reconstruct inputs based on that gaussian img_recon = self.image_decoder(z) txt_recon = self.text_decoder(z) return img_recon, txt_recon, mu, logvar def infer(self, image=None, text=None): batch_size = image.size(0) if image is not None else text.size(0) use_cuda = next(self.parameters()).is_cuda # check if CUDA # initialize the universal prior expert mu, logvar = prior_expert((1, batch_size, self.n_latents), use_cuda=use_cuda) if image is not None: img_mu, img_logvar = self.image_encoder(image) mu = torch.cat((mu, img_mu.unsqueeze(0)), dim=0) logvar = torch.cat((logvar, img_logvar.unsqueeze(0)), dim=0) if text is not None: txt_mu, txt_logvar = self.text_encoder(text) mu = torch.cat((mu, txt_mu.unsqueeze(0)), dim=0) logvar = torch.cat((logvar, txt_logvar.unsqueeze(0)), dim=0) # product of experts to combine gaussians mu, logvar = self.experts(mu, logvar) return mu, logvar class ImageEncoder(nn.Module): """Parametrizes q(z|x). @param n_latents: integer number of latent dimensions """ def __init__(self, n_latents): super(ImageEncoder, self).__init__() self.fc1 = nn.Linear(784, 512) self.fc2 = nn.Linear(512, 512) self.fc31 = nn.Linear(512, n_latents) self.fc32 = nn.Linear(512, n_latents) self.swish = Swish() def forward(self, x): h = self.swish(self.fc1(x.view(-1, 784))) h = self.swish(self.fc2(h)) return self.fc31(h), self.fc32(h) class ImageDecoder(nn.Module): """Parametrizes p(x|z). @param n_latents: integer number of latent dimensions """ def __init__(self, n_latents): super(ImageDecoder, self).__init__() self.fc1 = nn.Linear(n_latents, 512) self.fc2 = nn.Linear(512, 512) self.fc3 = nn.Linear(512, 512) self.fc4 = nn.Linear(512, 784) self.swish = Swish() def forward(self, z): h = self.swish(self.fc1(z)) h = self.swish(self.fc2(h)) h = self.swish(self.fc3(h)) return self.fc4(h) # NOTE: no sigmoid here. See train.py class TextEncoder(nn.Module): """Parametrizes q(z|y). @param n_latents: integer number of latent dimensions """ def __init__(self, n_latents): super(TextEncoder, self).__init__() self.fc1 = nn.Embedding(10, 512) self.fc2 = nn.Linear(512, 512) self.fc31 = nn.Linear(512, n_latents) self.fc32 = nn.Linear(512, n_latents) self.swish = Swish() def forward(self, x): h = self.swish(self.fc1(x)) h = self.swish(self.fc2(h)) return self.fc31(h), self.fc32(h) class TextDecoder(nn.Module): """Parametrizes p(y|z). @param n_latents: integer number of latent dimensions """ def __init__(self, n_latents): super(TextDecoder, self).__init__() self.fc1 = nn.Linear(n_latents, 512) self.fc2 = nn.Linear(512, 512) self.fc3 = nn.Linear(512, 512) self.fc4 = nn.Linear(512, 10) self.swish = Swish() def forward(self, z): h = self.swish(self.fc1(z)) h = self.swish(self.fc2(h)) h = self.swish(self.fc3(h)) return self.fc4(h) # NOTE: no softmax here. See train.py class ProductOfExperts(nn.Module): """Return parameters for product of independent experts. See https://arxiv.org/pdf/1410.7827.pdf for equations. @param mu: M x D for M experts @param logvar: M x D for M experts """ def forward(self, mu, logvar, eps=1e-8): var = torch.exp(logvar) + eps # precision of i-th Gaussian expert at point x T = 1. / (var + eps) pd_mu = torch.sum(mu * T, dim=0) / torch.sum(T, dim=0) pd_var = 1. / torch.sum(T, dim=0) pd_logvar = torch.log(pd_var + eps) return pd_mu, pd_logvar class Swish(nn.Module): """https://arxiv.org/abs/1710.05941""" def forward(self, x): return x * F.sigmoid(x) def prior_expert(size, use_cuda=False): """Universal prior expert. Here we use a spherical Gaussian: N(0, 1). @param size: integer dimensionality of Gaussian @param use_cuda: boolean [default: False] cast CUDA on variables """ mu = Variable(torch.zeros(size)) logvar = Variable(torch.zeros(size)) if use_cuda: mu, logvar = mu.cuda(), logvar.cuda() return mu, logvar
5,973
31.11828
73
py
multimodal-vae-public
multimodal-vae-public-master/mnist/train.py
from __future__ import division from __future__ import print_function from __future__ import absolute_import import os import sys import shutil import torch import torch.nn as nn import torch.optim as optim import torch.nn.functional as F from torch.autograd import Variable from torchvision import transforms from torchvision.datasets import MNIST from model import MVAE def elbo_loss(recon_image, image, recon_text, text, mu, logvar, lambda_image=1.0, lambda_text=1.0, annealing_factor=1): """Bimodal ELBO loss function. @param recon_image: torch.Tensor reconstructed image @param image: torch.Tensor input image @param recon_text: torch.Tensor reconstructed text probabilities @param text: torch.Tensor input text (one-hot) @param mu: torch.Tensor mean of latent distribution @param logvar: torch.Tensor log-variance of latent distribution @param lambda_image: float [default: 1.0] weight for image BCE @param lambda_text: float [default: 1.0] weight for text BCE @param annealing_factor: integer [default: 1] multiplier for KL divergence term @return ELBO: torch.Tensor evidence lower bound """ image_bce, text_bce = 0, 0 # default params if recon_image is not None and image is not None: image_bce = torch.sum(binary_cross_entropy_with_logits( recon_image.view(-1, 1 * 28 * 28), image.view(-1, 1 * 28 * 28)), dim=1) if recon_text is not None and text is not None: text_bce = torch.sum(cross_entropy(recon_text, text), dim=1) # Kingma and Welling. Auto-Encoding Variational Bayes. ICLR, 2014 # https://arxiv.org/abs/1312.6114 KLD = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp(), dim=1) ELBO = torch.mean(lambda_image * image_bce + lambda_text * text_bce + annealing_factor * KLD) return ELBO def binary_cross_entropy_with_logits(input, target): """Sigmoid Activation + Binary Cross Entropy @param input: torch.Tensor (size N) @param target: torch.Tensor (size N) @return loss: torch.Tensor (size N) """ if not (target.size() == input.size()): raise ValueError("Target size ({}) must be the same as input size ({})".format( target.size(), input.size())) return (torch.clamp(input, 0) - input * target + torch.log(1 + torch.exp(-torch.abs(input)))) def cross_entropy(input, target, eps=1e-6): """k-Class Cross Entropy (Log Softmax + Log Loss) @param input: torch.Tensor (size N x K) @param target: torch.Tensor (size N x K) @param eps: error to add (default: 1e-6) @return loss: torch.Tensor (size N) """ if not (target.size(0) == input.size(0)): raise ValueError( "Target size ({}) must be the same as input size ({})".format( target.size(0), input.size(0))) log_input = F.log_softmax(input + eps, dim=1) y_onehot = Variable(log_input.data.new(log_input.size()).zero_()) y_onehot = y_onehot.scatter(1, target.unsqueeze(1), 1) loss = y_onehot * log_input return -loss class AverageMeter(object): """Computes and stores the average and current value""" def __init__(self): self.reset() def reset(self): self.val = 0 self.avg = 0 self.sum = 0 self.count = 0 def update(self, val, n=1): self.val = val self.sum += val * n self.count += n self.avg = self.sum / self.count def save_checkpoint(state, is_best, folder='./', filename='checkpoint.pth.tar'): if not os.path.isdir(folder): os.mkdir(folder) torch.save(state, os.path.join(folder, filename)) if is_best: shutil.copyfile(os.path.join(folder, filename), os.path.join(folder, 'model_best.pth.tar')) def load_checkpoint(file_path, use_cuda=False): checkpoint = torch.load(file_path) if use_cuda else \ torch.load(file_path, map_location=lambda storage, location: storage) model = MVAE(checkpoint['n_latents']) model.load_state_dict(checkpoint['state_dict']) return model if __name__ == "__main__": import argparse parser = argparse.ArgumentParser() parser.add_argument('--n-latents', type=int, default=64, help='size of the latent embedding [default: 64]') parser.add_argument('--batch-size', type=int, default=100, metavar='N', help='input batch size for training [default: 100]') parser.add_argument('--epochs', type=int, default=500, metavar='N', help='number of epochs to train [default: 500]') parser.add_argument('--annealing-epochs', type=int, default=200, metavar='N', help='number of epochs to anneal KL for [default: 200]') parser.add_argument('--lr', type=float, default=1e-3, metavar='LR', help='learning rate [default: 1e-3]') parser.add_argument('--log-interval', type=int, default=10, metavar='N', help='how many batches to wait before logging training status [default: 10]') parser.add_argument('--lambda-image', type=float, default=1., help='multipler for image reconstruction [default: 1]') parser.add_argument('--lambda-text', type=float, default=10., help='multipler for text reconstruction [default: 10]') parser.add_argument('--cuda', action='store_true', default=False, help='enables CUDA training [default: False]') args = parser.parse_args() args.cuda = args.cuda and torch.cuda.is_available() if not os.path.isdir('./trained_models'): os.makedirs('./trained_models') train_loader = torch.utils.data.DataLoader( MNIST('./data', train=True, download=True, transform=transforms.ToTensor()), batch_size=args.batch_size, shuffle=True) N_mini_batches = len(train_loader) test_loader = torch.utils.data.DataLoader( MNIST('./data', train=False, download=True, transform=transforms.ToTensor()), batch_size=args.batch_size, shuffle=False) model = MVAE(args.n_latents) optimizer = optim.Adam(model.parameters(), lr=args.lr) if args.cuda: model.cuda() def train(epoch): model.train() train_loss_meter = AverageMeter() # NOTE: is_paired is 1 if the example is paired for batch_idx, (image, text) in enumerate(train_loader): if epoch < args.annealing_epochs: # compute the KL annealing factor for the current mini-batch in the current epoch annealing_factor = (float(batch_idx + (epoch - 1) * N_mini_batches + 1) / float(args.annealing_epochs * N_mini_batches)) else: # by default the KL annealing factor is unity annealing_factor = 1.0 if args.cuda: image = image.cuda() text = text.cuda() image = Variable(image) text = Variable(text) batch_size = len(image) # refresh the optimizer optimizer.zero_grad() # pass data through model recon_image_1, recon_text_1, mu_1, logvar_1 = model(image, text) recon_image_2, recon_text_2, mu_2, logvar_2 = model(image) recon_image_3, recon_text_3, mu_3, logvar_3 = model(text=text) # compute ELBO for each data combo joint_loss = elbo_loss(recon_image_1, image, recon_text_1, text, mu_1, logvar_1, lambda_image=args.lambda_image, lambda_text=args.lambda_text, annealing_factor=annealing_factor) image_loss = elbo_loss(recon_image_2, image, None, None, mu_2, logvar_2, lambda_image=args.lambda_image, lambda_text=args.lambda_text, annealing_factor=annealing_factor) text_loss = elbo_loss(None, None, recon_text_3, text, mu_3, logvar_3, lambda_image=args.lambda_image, lambda_text=args.lambda_text, annealing_factor=annealing_factor) train_loss = joint_loss + image_loss + text_loss train_loss_meter.update(train_loss.data[0], batch_size) # compute gradients and take step train_loss.backward() optimizer.step() if batch_idx % args.log_interval == 0: print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}\tAnnealing-Factor: {:.3f}'.format( epoch, batch_idx * len(image), len(train_loader.dataset), 100. * batch_idx / len(train_loader), train_loss_meter.avg, annealing_factor)) print('====> Epoch: {}\tLoss: {:.4f}'.format(epoch, train_loss_meter.avg)) def test(epoch): model.eval() test_loss_meter = AverageMeter() for batch_idx, (image, text) in enumerate(test_loader): if args.cuda: image = image.cuda() text = text.cuda() image = Variable(image, volatile=True) text = Variable(text, volatile=True) batch_size = len(image) recon_image_1, recon_text_1, mu_1, logvar_1 = model(image, text) recon_image_2, recon_text_2, mu_2, logvar_2 = model(image) recon_image_3, recon_text_3, mu_3, logvar_3 = model(text=text) joint_loss = elbo_loss(recon_image_1, image, recon_text_1, text, mu_1, logvar_1) image_loss = elbo_loss(recon_image_2, image, None, None, mu_2, logvar_2) text_loss = elbo_loss(None, None, recon_text_3, text, mu_3, logvar_3) test_loss = joint_loss + image_loss + text_loss test_loss_meter.update(test_loss.data[0], batch_size) print('====> Test Loss: {:.4f}'.format(test_loss_meter.avg)) return test_loss_meter.avg best_loss = sys.maxint for epoch in range(1, args.epochs + 1): train(epoch) test_loss = test(epoch) is_best = test_loss < best_loss best_loss = min(test_loss, best_loss) # save the best model and current model save_checkpoint({ 'state_dict': model.state_dict(), 'best_loss': best_loss, 'n_latents': args.n_latents, 'optimizer' : optimizer.state_dict(), }, is_best, folder='./trained_models')
10,817
39.215613
105
py
multimodal-vae-public
multimodal-vae-public-master/fashionmnist/sample.py
from __future__ import division from __future__ import print_function from __future__ import absolute_import import sys import numpy as np import torch import torch.nn.functional as F from torch.autograd import Variable from torchvision import transforms from torchvision.utils import save_image from train import load_checkpoint from datasets import FashionMNIST from model import LABEL_IX_TO_STRING def fetch_fashionmnist_image(label): """Return a random image from the FashionMNIST dataset with label. @param label: integer a integer from 0 to 9 @return: torch.autograd.Variable FashionMNIST image """ dataset = FashionMNIST('./data', train=False, download=True, transform=transforms.ToTensor()) images = dataset.test_data.numpy() labels = dataset.test_labels.numpy() images = images[labels == label] image = images[np.random.choice(np.arange(images.shape[0]))] image = torch.from_numpy(image).float() image = image.unsqueeze(0) return Variable(image, volatile=True) def fetch_fashionmnist_text(label): """Randomly generate a number from 0 to 9. @param label: integer a integer from 0 to 9 @return: torch.autograd.Variable Variable wrapped around an integer. """ text = torch.LongTensor([label]) return Variable(text, volatile=True) if __name__ == "__main__": import os import argparse parser = argparse.ArgumentParser() parser.add_argument('model_path', type=str, help='path to trained model file') parser.add_argument('--n-samples', type=int, default=64, help='Number of images and texts to sample [default: 64]') # condition sampling on a particular images parser.add_argument('--condition-on-image', type=int, default=None, help='If True, generate text conditioned on an image.') # condition sampling on a particular text parser.add_argument('--condition-on-text', type=int, default=None, help='If True, generate images conditioned on a text.') parser.add_argument('--cuda', action='store_true', default=False, help='enables CUDA training [default: False]') args = parser.parse_args() args.cuda = args.cuda and torch.cuda.is_available() model = load_checkpoint(args.model_path, use_cuda=args.cuda) model.eval() if args.cuda: model.cuda() # mode 1: unconditional generation if not args.condition_on_image and not args.condition_on_text: mu = Variable(torch.Tensor([0])) std = Variable(torch.Tensor([1])) if args.cuda: mu = mu.cuda() std = std.cuda() # mode 2: generate conditioned on image elif args.condition_on_image and not args.condition_on_text: image = fetch_fashionmnist_image(args.condition_on_image) if args.cuda: image = image.cuda() mu, logvar = model.infer(1, image=image) std = logvar.mul(0.5).exp_() # mode 3: generate conditioned on text elif args.condition_on_text and not args.condition_on_image: text = fetch_fashionmnist_text(args.condition_on_text) if args.cuda: text = text.cuda() mu, logvar = model.infer(1, text=text) std = logvar.mul(0.5).exp_() # mode 4: generate conditioned on image and text elif args.condition_on_text and args.condition_on_image: image = fetch_fashionmnist_image(args.condition_on_image) text = fetch_fashionmnist_text(args.condition_on_text) if args.cuda: image = image.cuda() text = text.cuda() mu, logvar = model.infer(1, image=image, text=text) std = logvar.mul(0.5).exp_() # sample from uniform gaussian sample = Variable(torch.randn(args.n_samples, model.n_latents)) if args.cuda: sample = sample.cuda() # sample from particular gaussian by multiplying + adding mu = mu.expand_as(sample) std = std.expand_as(sample) sample = sample.mul(std).add_(mu) # generate image and text img_recon = F.sigmoid(model.image_decoder(sample)).cpu().data txt_recon = F.log_softmax(model.text_decoder(sample), dim=1).cpu().data # save image samples to filesystem save_image(img_recon.view(args.n_samples, 1, 28, 28), './sample_image.png') # save text samples to filesystem with open('./sample_text.txt', 'w') as fp: txt_recon_np = txt_recon.numpy() txt_recon_np = np.argmax(txt_recon_np, axis=1).tolist() for i, item in enumerate(txt_recon_np): fp.write('Text (%d): %s\n' % (i, LABEL_IX_TO_STRING[item]))
4,827
37.624
82
py
multimodal-vae-public
multimodal-vae-public-master/fashionmnist/model.py
from __future__ import division from __future__ import print_function from __future__ import absolute_import import numpy as np import torch import torch.nn as nn from torch.autograd import Variable from torch.nn import functional as F # MAP from index to the interpretable label LABEL_IX_TO_STRING = {0: 'T-shirt/top', 1: 'Trouser', 2: 'Pullover', 3: 'Dress', 4: 'Coat', 5: 'Sandal', 6: 'Shirt', 7: 'Sneaker', 8: 'Bag', 9: 'Ankle boot'} class MVAE(nn.Module): """Multimodal Variational Autoencoder. @param n_latents: integer number of latent dimensions """ def __init__(self, n_latents): super(MVAE, self).__init__() self.image_encoder = ImageEncoder(n_latents) self.image_decoder = ImageDecoder(n_latents) self.text_encoder = TextEncoder(n_latents) self.text_decoder = TextDecoder(n_latents) self.experts = ProductOfExperts() self.n_latents = n_latents def reparametrize(self, mu, logvar): if self.training: std = logvar.mul(0.5).exp_() eps = Variable(std.data.new(std.size()).normal_()) return eps.mul(std).add_(mu) else: return mu def forward(self, image=None, text=None): mu, logvar = self.infer(image, text) # reparametrization trick to sample z = self.reparametrize(mu, logvar) # reconstruct inputs based on that gaussian img_recon = self.image_decoder(z) txt_recon = self.text_decoder(z) return img_recon, txt_recon, mu, logvar def infer(self, image=None, text=None): batch_size = image.size(0) if image is not None else text.size(0) use_cuda = next(self.parameters()).is_cuda # check if CUDA mu, logvar = prior_expert((1, batch_size, self.n_latents), use_cuda=use_cuda) if image is not None: img_mu, img_logvar = self.image_encoder(image) mu = torch.cat((mu, img_mu.unsqueeze(0)), dim=0) logvar = torch.cat((logvar, img_logvar.unsqueeze(0)), dim=0) if text is not None: txt_mu, txt_logvar = self.text_encoder(text) mu = torch.cat((mu, txt_mu.unsqueeze(0)), dim=0) logvar = torch.cat((logvar, txt_logvar.unsqueeze(0)), dim=0) # product of experts to combine gaussians mu, logvar = self.experts(mu, logvar) return mu, logvar class ImageEncoder(nn.Module): """Parametrizes q(z|x). @param n_latents: integer number of latent dimensions """ def __init__(self, n_latents): super(ImageEncoder, self).__init__() self.features = nn.Sequential( nn.Conv2d(1, 64, 4, 2, 1, bias=False), Swish(), nn.Conv2d(64, 128, 4, 2, 1, bias=False), Swish()) self.classifier = nn.Sequential( nn.Linear(128 * 7 * 7, 512), Swish(), nn.Linear(512, n_latents * 2)) self.n_latents = n_latents def forward(self, x): n_latents = self.n_latents x = self.features(x) x = x.view(x.size(0), -1) x = self.classifier(x) return x[:, :n_latents], x[:, n_latents:] class ImageDecoder(nn.Module): """Parametrizes p(x|z). @param n_latents: integer number of latent dimensions """ def __init__(self, n_latents): super(ImageDecoder, self).__init__() self.n_latents = n_latents self.upsampler = nn.Sequential( nn.Linear(n_latents, 512), Swish(), nn.Linear(512, 128 * 7 * 7), Swish()) self.hallucinate = nn.Sequential( nn.ConvTranspose2d(128, 64, 4, 2, 1, bias=False), Swish(), nn.ConvTranspose2d(64, 1, 4, 2, 1, bias=False)) def forward(self, z): # the input will be a vector of size |n_latents| z = self.upsampler(z) z = z.view(-1, 128, 7, 7) z = self.hallucinate(z) return z # NOTE: no sigmoid here. See train.py class TextEncoder(nn.Module): """Parametrizes q(z|y). @param n_latents: integer number of latent dimensions """ def __init__(self, n_latents): super(TextEncoder, self).__init__() self.net = nn.Sequential( nn.Embedding(10, 512), Swish(), nn.Linear(512, 512), Swish(), nn.Linear(512, n_latents * 2)) self.n_latents = n_latents def forward(self, x): n_latents = self.n_latents x = self.net(x) return x[:, :n_latents], x[:, n_latents:] class TextDecoder(nn.Module): """Parametrizes p(y|z). @param n_latents: integer number of latent dimensions """ def __init__(self, n_latents): super(TextDecoder, self).__init__() self.net = nn.Sequential( nn.Linear(n_latents, 512), Swish(), nn.Linear(512, 512), Swish(), nn.Linear(512, 512), Swish(), nn.Linear(512, 10)) def forward(self, z): z = self.net(z) return z # NOTE: no softmax here. See train.py class ProductOfExperts(nn.Module): """Return parameters for product of independent experts. See https://arxiv.org/pdf/1410.7827.pdf for equations. @param mu: M x D for M experts @param logvar: M x D for M experts """ def forward(self, mu, logvar, eps=1e-8): var = torch.exp(logvar) + eps # precision of i-th Gaussian expert at point x T = 1. / (var + eps) pd_mu = torch.sum(mu * T, dim=0) / torch.sum(T, dim=0) pd_var = 1. / torch.sum(T, dim=0) pd_logvar = torch.log(pd_var + eps) return pd_mu, pd_logvar class Swish(nn.Module): def forward(self, x): return x * F.sigmoid(x) def prior_expert(size, use_cuda=False): """Universal prior expert. Here we use a spherical Gaussian: N(0, 1). @param size: integer dimensionality of Gaussian @param use_cuda: boolean [default: False] cast CUDA on variables """ mu = Variable(torch.zeros(size)) logvar = Variable(torch.log(torch.ones(size))) if use_cuda: mu, logvar = mu.cuda(), logvar.cuda() return mu, logvar
6,482
30.779412
82
py
multimodal-vae-public
multimodal-vae-public-master/fashionmnist/datasets.py
from __future__ import division from __future__ import print_function from __future__ import absolute_import from torchvision.datasets import MNIST class FashionMNIST(MNIST): """`Fashion-MNIST <https://github.com/zalandoresearch/fashion-mnist>`_ Dataset. Args: root (string): Root directory of dataset where ``processed/training.pt`` and ``processed/test.pt`` exist. train (bool, optional): If True, creates dataset from ``training.pt``, otherwise from ``test.pt``. download (bool, optional): If true, downloads the dataset from the internet and puts it in root directory. If dataset is already downloaded, it is not downloaded again. transform (callable, optional): A function/transform that takes in an PIL image and returns a transformed version. E.g, ``transforms.RandomCrop`` target_transform (callable, optional): A function/transform that takes in the target and transforms it. """ urls = [ 'http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/train-images-idx3-ubyte.gz', 'http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/train-labels-idx1-ubyte.gz', 'http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/t10k-images-idx3-ubyte.gz', 'http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/t10k-labels-idx1-ubyte.gz', ]
1,428
46.633333
96
py
multimodal-vae-public
multimodal-vae-public-master/fashionmnist/train.py
from __future__ import division from __future__ import print_function from __future__ import absolute_import import os import sys import shutil import torch import torch.nn as nn import torch.optim as optim import torch.nn.functional as F from torch.autograd import Variable from torchvision import transforms from model import MVAE from datasets import FashionMNIST def elbo_loss(recon_image, image, recon_text, text, mu, logvar, lambda_image=1.0, lambda_text=1.0, annealing_factor=1): """Bimodal ELBO loss function. @param recon_image: torch.Tensor reconstructed image @param image: torch.Tensor input image @param recon_text: torch.Tensor reconstructed text probabilities @param text: torch.Tensor input text (one-hot) @param mu: torch.Tensor mean of latent distribution @param logvar: torch.Tensor log-variance of latent distribution @param lambda_image: float [default: 1.0] weight for image BCE @param lambda_text: float [default: 1.0] weight for text BCE @param annealing_factor: integer [default: 1] multiplier for KL divergence term @return ELBO: torch.Tensor evidence lower bound """ image_bce, text_bce = 0, 0 # default params if recon_image is not None and image is not None: image_bce = torch.sum(binary_cross_entropy_with_logits( recon_image.view(-1, 1 * 28 * 28), image.view(-1, 1 * 28 * 28)), dim=1) if recon_text is not None and text is not None: text_bce = torch.sum(cross_entropy(recon_text, text), dim=1) # Kingma and Welling. Auto-Encoding Variational Bayes. ICLR, 2014 # https://arxiv.org/abs/1312.6114 KLD = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp(), dim=1) ELBO = torch.mean(lambda_image * image_bce + lambda_text * text_bce + annealing_factor * KLD) return ELBO def binary_cross_entropy_with_logits(input, target): """Sigmoid Activation + Binary Cross Entropy @param input: torch.Tensor (size N) @param target: torch.Tensor (size N) @return loss: torch.Tensor (size N) """ if not (target.size() == input.size()): raise ValueError("Target size ({}) must be the same as input size ({})".format( target.size(), input.size())) return (torch.clamp(input, 0) - input * target + torch.log(1 + torch.exp(-torch.abs(input)))) def cross_entropy(input, target, eps=1e-6): """k-Class Cross Entropy (Log Softmax + Log Loss) @param input: torch.Tensor (size N x K) @param target: torch.Tensor (size N x K) @param eps: error to add (default: 1e-6) @return loss: torch.Tensor (size N) """ if not (target.size(0) == input.size(0)): raise ValueError( "Target size ({}) must be the same as input size ({})".format( target.size(0), input.size(0))) log_input = F.log_softmax(input + eps, dim=1) y_onehot = Variable(log_input.data.new(log_input.size()).zero_()) y_onehot = y_onehot.scatter(1, target.unsqueeze(1), 1) loss = y_onehot * log_input return -loss class AverageMeter(object): """Computes and stores the average and current value""" def __init__(self): self.reset() def reset(self): self.val = 0 self.avg = 0 self.sum = 0 self.count = 0 def update(self, val, n=1): self.val = val self.sum += val * n self.count += n self.avg = self.sum / self.count def save_checkpoint(state, is_best, folder='./', filename='checkpoint.pth.tar'): if not os.path.isdir(folder): os.mkdir(folder) torch.save(state, os.path.join(folder, filename)) if is_best: shutil.copyfile(os.path.join(folder, filename), os.path.join(folder, 'model_best.pth.tar')) def load_checkpoint(file_path, use_cuda=False): checkpoint = torch.load(file_path) if use_cuda else \ torch.load(file_path, map_location=lambda storage, location: storage) model = MVAE(checkpoint['n_latents']) model.load_state_dict(checkpoint['state_dict']) return model if __name__ == "__main__": import argparse parser = argparse.ArgumentParser() parser.add_argument('--n-latents', type=int, default=64, help='size of the latent embedding [default: 64]') parser.add_argument('--batch-size', type=int, default=100, metavar='N', help='input batch size for training [default: 100]') parser.add_argument('--epochs', type=int, default=500, metavar='N', help='number of epochs to train [default: 500]') parser.add_argument('--annealing-epochs', type=int, default=200, metavar='N', help='number of epochs to anneal KL for [default: 200]') parser.add_argument('--lr', type=float, default=1e-3, metavar='LR', help='learning rate [default: 1e-3]') parser.add_argument('--log-interval', type=int, default=10, metavar='N', help='how many batches to wait before logging training status [default: 10]') parser.add_argument('--lambda-image', type=float, default=1., help='multipler for image reconstruction [default: 1]') parser.add_argument('--lambda-text', type=float, default=10., help='multipler for text reconstruction [default: 10]') parser.add_argument('--cuda', action='store_true', default=False, help='enables CUDA training [default: False]') args = parser.parse_args() args.cuda = args.cuda and torch.cuda.is_available() if not os.path.isdir('./trained_models'): os.makedirs('./trained_models') train_loader = torch.utils.data.DataLoader( FashionMNIST('./data', train=True, download=True, transform=transforms.ToTensor()), batch_size=args.batch_size, shuffle=True) N_mini_batches = len(train_loader) test_loader = torch.utils.data.DataLoader( FashionMNIST('./data', train=False, download=True, transform=transforms.ToTensor()), batch_size=args.batch_size, shuffle=False) model = MVAE(args.n_latents) optimizer = optim.Adam(model.parameters(), lr=args.lr) if args.cuda: model.cuda() def train(epoch): model.train() train_loss_meter = AverageMeter() # NOTE: is_paired is 1 if the example is paired for batch_idx, (image, text) in enumerate(train_loader): if epoch < args.annealing_epochs: # compute the KL annealing factor for the current mini-batch in the current epoch annealing_factor = (float(batch_idx + epoch * N_mini_batches + 1) / float(args.annealing_epochs * N_mini_batches)) else: # by default the KL annealing factor is unity annealing_factor = 1.0 if args.cuda: image = image.cuda() text = text.cuda() image = Variable(image) text = Variable(text) batch_size = len(image) # refresh the optimizer optimizer.zero_grad() # pass data through model recon_image_1, recon_text_1, mu_1, logvar_1 = model(image, text) recon_image_2, recon_text_2, mu_2, logvar_2 = model(image) recon_image_3, recon_text_3, mu_3, logvar_3 = model(text=text) # compute ELBO for each data combo joint_loss = elbo_loss(recon_image_1, image, recon_text_1, text, mu_1, logvar_1, lambda_image=args.lambda_image, lambda_text=args.lambda_text, annealing_factor=annealing_factor) image_loss = elbo_loss(recon_image_2, image, None, None, mu_2, logvar_2, lambda_image=args.lambda_image, lambda_text=args.lambda_text, annealing_factor=annealing_factor) text_loss = elbo_loss(None, None, recon_text_3, text, mu_3, logvar_3, lambda_image=args.lambda_image, lambda_text=args.lambda_text, annealing_factor=annealing_factor) train_loss = joint_loss + image_loss + text_loss train_loss_meter.update(train_loss.data[0], batch_size) # compute gradients and take step train_loss.backward() optimizer.step() if batch_idx % args.log_interval == 0: print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}\tAnnealing-Factor: {:.3f}'.format( epoch, batch_idx * len(image), len(train_loader.dataset), 100. * batch_idx / len(train_loader), train_loss_meter.avg, annealing_factor)) print('====> Epoch: {}\tLoss: {:.4f}'.format(epoch, train_loss_meter.avg)) def test(epoch): model.eval() test_loss_meter = AverageMeter() for batch_idx, (image, text) in enumerate(test_loader): if args.cuda: image = image.cuda() text = text.cuda() image = Variable(image, volatile=True) text = Variable(text, volatile=True) batch_size = len(image) recon_image_1, recon_text_1, mu_1, logvar_1 = model(image, text) recon_image_2, recon_text_2, mu_2, logvar_2 = model(image) recon_image_3, recon_text_3, mu_3, logvar_3 = model(text=text) joint_loss = elbo_loss(recon_image_1, image, recon_text_1, text, mu_1, logvar_1) image_loss = elbo_loss(recon_image_2, image, None, None, mu_2, logvar_2) text_loss = elbo_loss(None, None, recon_text_3, text, mu_3, logvar_3) test_loss = joint_loss + image_loss + text_loss test_loss_meter.update(test_loss.data[0], batch_size) print('====> Test Loss: {:.4f}'.format(test_loss_meter.avg)) return test_loss_meter.avg best_loss = sys.maxint for epoch in range(1, args.epochs + 1): train(epoch) test_loss = test(epoch) is_best = test_loss < best_loss best_loss = min(test_loss, best_loss) # save the best model and current model save_checkpoint({ 'state_dict': model.state_dict(), 'best_loss': best_loss, 'n_latents': args.n_latents, 'optimizer' : optimizer.state_dict(), }, is_best, folder='./trained_models')
10,820
39.226766
105
py
multimodal-vae-public
multimodal-vae-public-master/multimnist/sample.py
from __future__ import division from __future__ import print_function from __future__ import absolute_import import sys import numpy as np import torch import torch.nn.functional as F from torch.autograd import Variable from torchvision import transforms from torchvision.utils import save_image from datasets import MultiMNIST from train import load_checkpoint from utils import char_tensor, charlist_tensor from utils import tensor_to_string def fetch_multimnist_image(label): """Return a random image from the MultiMNIST dataset with label. @param label: string a string of up to 4 digits @return: torch.autograd.Variable MultiMNIST image """ dataset = MultiMNIST('./data', train=False, download=True, transform=transforms.ToTensor(), target_transform=charlist_tensor) images = dataset.test_data labels = dataset.test_labels n_rows = len(images) images = [] for i in xrange(n_rows): image = images[i] text = labels[i] if tensor_to_string(text.squeeze(0)) == label: images.append(image) if len(images) == 0: sys.exit('No images with label (%s) found.' % label) images = torch.cat(images).cpu().numpy() ix = np.random.choice(np.arange(images.shape[0])) image = images[ix] image = torch.from_numpy(image).float() image = image.unsqueeze(0) return Variable(image, volatile=True) def fetch_multimnist_text(label): """Randomly generate a number from 0 to 9. @param label: string a string of up to 4 digits @return: torch.autograd.Variable Variable wrapped around an integer. """ text = char_tensor(label).unsqueeze(0) return Variable(text, volatile=True) if __name__ == "__main__": import os import argparse parser = argparse.ArgumentParser() parser.add_argument('model_path', type=str, help='path to trained model file') parser.add_argument('--n-samples', type=int, default=64, help='Number of images and texts to sample [default: 64]') # condition sampling on a particular images parser.add_argument('--condition-on-image', type=int, default=None, help='If True, generate text conditioned on an image.') # condition sampling on a particular text parser.add_argument('--condition-on-text', type=int, default=None, help='If True, generate images conditioned on a text.') parser.add_argument('--cuda', action='store_true', default=False, help='enables CUDA training') args = parser.parse_args() args.cuda = args.cuda and torch.cuda.is_available() model = load_checkpoint(args.model_path, use_cuda=args.cuda) model.eval() if args.cuda: model.cuda() # mode 1: unconditional generation if not args.condition_on_image and not args.condition_on_text: mu = Variable(torch.Tensor([0])) std = Variable(torch.Tensor([1])) if args.cuda: mu = mu.cuda() std = std.cuda() # mode 2: generate conditioned on image elif args.condition_on_image and not args.condition_on_text: image = fetch_multimnist_image(args.condition_on_image) if args.cuda: image = image.cuda() mu, logvar = model.infer(1, image=image) std = logvar.mul(0.5).exp_() # mode 3: generate conditioned on text elif args.condition_on_text and not args.condition_on_image: text = fetch_multimnist_text(args.condition_on_text) if args.cuda: text = text.cuda() mu, logvar = model.infer(1, text=text) std = logvar.mul(0.5).exp_() # mode 4: generate conditioned on image and text elif args.condition_on_text and args.condition_on_image: image = fetch_multimnist_image(args.condition_on_image) text = fetch_multimnist_text(args.condition_on_text) if args.cuda: image = image.cuda() text = text.cuda() mu, logvar = model.infer(1, image=image, text=text) std = logvar.mul(0.5).exp_() # sample from uniform gaussian sample = Variable(torch.randn(args.n_samples, model.n_latents)) if args.cuda: sample = sample.cuda() # sample from particular gaussian by multiplying + adding mu = mu.expand_as(sample) std = std.expand_as(sample) sample = sample.mul(std).add_(mu) # generate image and text img_recon = F.sigmoid(model.image_decoder(sample)).cpu().data txt_recon = F.log_softmax(model.text_decoder(sample), dim=1).cpu().data txt_recon = torch.max(txt_recon, dim=2)[1] # save image samples to filesystem save_image(img_recon.view(args.n_samples, 1, 50, 50), './sample_image.png') # save text samples to filesystem with open('./sample_text.txt', 'w') as fp: for i in xrange(text_recon.size(0)): text_recon_str = tensor_to_string(text_recon[i]) fp.write('Text (%d): %s\n' % (i, text_recon_str))
5,196
36.121429
82
py
multimodal-vae-public
multimodal-vae-public-master/multimnist/utils.py
from __future__ import division from __future__ import print_function from __future__ import absolute_import import string import random import time import math import torch from torch.autograd import Variable max_length = 4 # max of 4 characters in an image all_characters = '0123456789' n_characters = len(all_characters) # add 2 characters; b/c we always generate a fixed number # of characters, we do not need an EOS token SOS = n_characters FILL = n_characters + 1 # placeholder for nothing n_characters += 2 def char_tensor(string): """Turn a string into a tensor. @param string: str object @return tensor: torch.Tensor object. Not a Variable. """ tensor = torch.ones(max_length).long() * FILL for c in xrange(len(string)): tensor[c] = all_characters.index(string[c]) return tensor def charlist_tensor(charlist): """Turn a list of indexes into a tensor.""" string = ''.join([str(i) for i in charlist]) return char_tensor(string) def tensor_to_string(tensor): """Identical to tensor_to_string but for LongTensors.""" string = '' for i in range(tensor.size(0)): top_i = tensor[i] string += index_to_char(top_i) return string def index_to_char(top_i): if top_i == SOS: return '^' # FILL is the default character elif top_i == FILL: return '' else: return all_characters[top_i]
1,417
23.877193
60
py
multimodal-vae-public
multimodal-vae-public-master/multimnist/model.py
"""This model will be quite similar to mnist/model.py except we will need to be slightly fancier in the encoder/decoders for each modality. Likely, we will need convolutions/deconvolutions and RNNs. """ from __future__ import division from __future__ import print_function from __future__ import absolute_import import torch import torch.nn as nn from torch.autograd import Variable from torch.nn import functional as F from torch.nn.parameter import Parameter from utils import n_characters, max_length from utils import SOS, FILL class MVAE(nn.Module): """Multimodal Variational Autoencoder. @param n_latents: integer number of latent dimensions """ def __init__(self, n_latents): super(MVAE, self).__init__() self.image_encoder = ImageEncoder(n_latents) self.image_decoder = ImageDecoder(n_latents) self.text_encoder = TextEncoder(n_latents, n_characters, n_hiddens=200, bidirectional=True) self.text_decoder = TextDecoder(n_latents, n_characters, n_hiddens=200) self.experts = ProductOfExperts() self.n_latents = n_latents def reparametrize(self, mu, logvar): if self.training: std = logvar.mul(0.5).exp_() eps = Variable(std.data.new(std.size()).normal_()) return eps.mul(std).add_(mu) else: return mu def forward(self, image=None, text=None): mu, logvar = self.infer(image, text) # reparametrization trick to sample z = self.reparametrize(mu, logvar) # reconstruct inputs based on that gaussian img_recon = self.image_decoder(z) txt_recon = self.text_decoder(z) return img_recon, txt_recon, mu, logvar def infer(self, image=None, text=None): batch_size = image.size(0) if image is not None else text.size(0) use_cuda = next(self.parameters()).is_cuda # check if CUDA # initialize the universal prior expert mu, logvar = prior_expert((1, batch_size, self.n_latents), use_cuda=use_cuda) if image is not None: img_mu, img_logvar = self.image_encoder(image) mu = torch.cat((mu, img_mu.unsqueeze(0)), dim=0) logvar = torch.cat((logvar, img_logvar.unsqueeze(0)), dim=0) if text is not None: txt_mu, txt_logvar = self.text_encoder(text) mu = torch.cat((mu, txt_mu.unsqueeze(0)), dim=0) logvar = torch.cat((logvar, txt_logvar.unsqueeze(0)), dim=0) # product of experts to combine gaussians mu, logvar = self.experts(mu, logvar) return mu, logvar class ImageEncoder(nn.Module): """Parametrizes q(z|x). This task is quite a bit harder than MNIST so we probably need to use an CNN of some form. This will be good to get us ready for natural images. @param n_latents: integer size of latent vector """ def __init__(self, n_latents): super(ImageEncoder, self).__init__() self.features = nn.Sequential( nn.Conv2d(1, 32, 4, 2, 1, bias=False), Swish(), nn.Conv2d(32, 64, 4, 2, 1, bias=False), nn.BatchNorm2d(64), Swish(), nn.Conv2d(64, 128, 4, 2, 1, bias=False), nn.BatchNorm2d(128), Swish(), nn.Conv2d(128, 256, 4, 2, 0, bias=False), nn.BatchNorm2d(256), Swish()) self.classifier = nn.Sequential( nn.Linear(256 * 2 * 2, 512), Swish(), nn.Dropout(p=0.1), nn.Linear(512, n_latents * 2)) self.n_latents = n_latents def forward(self, x): n_latents = self.n_latents x = self.features(x) x = x.view(-1, 256 * 2 * 2) x = self.classifier(x) return x[:, :n_latents], x[:, n_latents:] class ImageDecoder(nn.Module): """Parametrizes p(x|z). @param n_latents: integer number of latent dimensions """ def __init__(self, n_latents): super(ImageDecoder, self).__init__() self.upsample = nn.Sequential( nn.Linear(n_latents, 256 * 2 * 2), Swish()) self.hallucinate = nn.Sequential( nn.ConvTranspose2d(256, 128, 4, 2, 0, bias=False), nn.BatchNorm2d(128), Swish(), nn.ConvTranspose2d(128, 64, 4, 2, 1, bias=False), nn.BatchNorm2d(64), Swish(), nn.ConvTranspose2d(64, 32, 5, 2, 1, bias=False), nn.BatchNorm2d(32), Swish(), nn.ConvTranspose2d(32, 1, 4, 2, 1, bias=False)) def forward(self, z): # the input will be a vector of size |n_latents| z = self.upsample(z) z = z.view(-1, 256, 2, 2) z = self.hallucinate(z) return z # NOTE: no sigmoid here. See train.py class TextEncoder(nn.Module): """Parametrizes q(z|y). We train an embedding layer from the 10 digit space to move to a continuous domain. The GRU is optionally bidirectional. @param n_latents: integer size of latent vector @param n_characters: integer number of possible characters (10 for MNIST) @param n_hiddens: integer [default: 200] number of hidden units in GRU @param bidirectional: boolean [default: True] hyperparameter for GRU. """ def __init__(self, n_latents, n_characters, n_hiddens=200, bidirectional=True): super(TextEncoder, self).__init__() self.embed = nn.Embedding(n_characters, n_hiddens) self.gru = nn.GRU(n_hiddens, n_hiddens, 1, dropout=0.1, bidirectional=bidirectional) self.h2p = nn.Linear(n_hiddens, n_latents * 2) # hiddens to parameters self.n_latents = n_latents self.n_hiddens = n_hiddens self.bidirectional = bidirectional def forward(self, x): n_hiddens = self.n_hiddens n_latents = self.n_latents x = self.embed(x) x = x.transpose(0, 1) # GRU expects (seq_len, batch, ...) x, h = self.gru(x, None) x = x[-1] # take only the last value if self.bidirectional: x = x[:, :n_hiddens] + x[:, n_hiddens:] # sum bidirectional outputs x = self.h2p(x) return x[:, :n_latents], x[:, n_latents:] class TextDecoder(nn.Module): """Parametrizes p(y|z). GRU for text decoding. Given a start token, sample a character via an RNN and repeat for a fixed length. @param n_latents: integer size of latent vector @param n_characters: integer size of characters (10 for MNIST) @param n_hiddens: integer [default: 200] number of hidden units in GRU """ def __init__(self, n_latents, n_characters, n_hiddens=200): super(TextDecoder, self).__init__() self.embed = nn.Embedding(n_characters, n_hiddens) self.z2h = nn.Linear(n_latents, n_hiddens) self.gru = nn.GRU(n_hiddens + n_latents, n_hiddens, 2, dropout=0.1) self.h2o = nn.Linear(n_hiddens + n_latents, n_characters) self.n_latents = n_latents self.n_characters = n_characters def forward(self, z): n_latents = self.n_latents n_characters = self.n_characters batch_size = z.size(0) # first input character is SOS c_in = Variable(torch.LongTensor([SOS]).repeat(batch_size)) # store output word here words = Variable(torch.zeros(batch_size, max_length, n_characters)) if z.is_cuda: c_in = c_in.cuda() words = words.cuda() # get hiddens from latents h = self.z2h(z).unsqueeze(0).repeat(2, 1, 1) # look through n_steps and generate characters for i in xrange(max_length): c_out, h = self.step(i, z, c_in, h) sample = torch.max(F.log_softmax(c_out, dim=1), dim=1)[1] words[:, i] = c_out c_in = sample return words # (batch_size, seq_len, ...) def step(self, ix, z, c_in, h): c_in = swish(self.embed(c_in)) c_in = torch.cat((c_in, z), dim=1) c_in = c_in.unsqueeze(0) c_out, h = self.gru(c_in, h) c_out = c_out.squeeze(0) c_out = torch.cat((c_out, z), dim=1) c_out = self.h2o(c_out) return c_out, h # NOTE: no softmax here. See train.py class ProductOfExperts(nn.Module): """Return parameters for product of independent experts. See https://arxiv.org/pdf/1410.7827.pdf for equations. @param mu: M x D for M experts @param logvar: M x D for M experts """ def forward(self, mu, logvar, eps=1e-8): var = torch.exp(logvar) + eps # precision of i-th Gaussian expert at point x T = 1. / var pd_mu = torch.sum(mu * T, dim=0) / torch.sum(T, dim=0) pd_var = 1. / torch.sum(T, dim=0) pd_logvar = torch.log(pd_var) return pd_mu, pd_logvar class Swish(nn.Module): def forward(self, x): return x * F.sigmoid(x) def swish(x): return x * F.sigmoid(x) def prior_expert(size, use_cuda=False): """Universal prior expert. Here we use a spherical Gaussian: N(0, 1). @param size: integer dimensionality of Gaussian @param use_cuda: boolean [default: False] cast CUDA on variables """ mu = Variable(torch.zeros(size)) logvar = Variable(torch.log(torch.ones(size))) if use_cuda: mu, logvar = mu.cuda(), logvar.cuda() return mu, logvar
9,790
34.219424
83
py
multimodal-vae-public
multimodal-vae-public-master/multimnist/datasets.py
""" This script generates a dataset similar to the MultiMNIST dataset described in [1]. However, we remove any translation. [1] Eslami, SM Ali, et al. "Attend, infer, repeat: Fast scene understanding with generative models." Advances in Neural Information Processing Systems. 2016. """ from __future__ import division from __future__ import print_function from __future__ import absolute_import import os import sys import random import numpy as np import numpy.random as npr from PIL import Image from random import shuffle from scipy.misc import imresize import torch import torchvision.datasets as dset from torch.utils.data.dataset import Dataset class MultiMNIST(Dataset): """Images with 0 to 4 digits of non-overlapping MNIST numbers. @param root: string path to dataset root @param train: boolean [default: True] whether to return training examples or testing examples @param transform: ?torchvision.Transforms optional function to apply to training inputs @param target_transform: ?torchvision.Transforms optional function to apply to training outputs """ processed_folder = 'multimnist' training_file = 'training.pt' test_file = 'test.pt' def __init__(self, root, train=True, transform=None, target_transform=None, download=False): self.root = os.path.expanduser(root) self.transform = transform self.target_transform = target_transform self.train = train # training set or test set if download: self.download() if not self._check_exists(): raise RuntimeError('Dataset not found.' + ' You can use download=True to download it') if self.train: self.train_data, self.train_labels = torch.load( os.path.join(self.root, self.processed_folder, self.training_file)) else: self.test_data, self.test_labels = torch.load( os.path.join(self.root, self.processed_folder, self.test_file)) def __getitem__(self, index): """ Args: index (int): Index Returns: tuple: (image, target) where target is index of the target class. """ if self.train: img, target = self.train_data[index], self.train_labels[index] else: img, target = self.test_data[index], self.test_labels[index] # doing this so that it is consistent with all other datasets # to return a PIL Image img = Image.fromarray(img.numpy(), mode='L') if self.transform is not None: img = self.transform(img) if self.target_transform is not None: target = self.target_transform(target) return img, target def __len__(self): if self.train: return len(self.train_data) else: return len(self.test_data) def _check_exists(self): return os.path.exists(os.path.join(self.root, self.processed_folder, self.training_file)) and \ os.path.exists(os.path.join(self.root, self.processed_folder, self.test_file)) def download(self): if self._check_exists(): return make_dataset(self.root, self.processed_folder, self.training_file, self.test_file) # -- code for generating MultiMNIST torch objects. -- # INSTRUCTIONS: run this file. def sample_one(canvas_size, mnist, resize=True, translate=True): i = np.random.randint(mnist['digits'].shape[0]) digit = mnist['digits'][i] label = mnist['labels'][i] if resize: # resize only if user specified scale = 0.1 * np.random.randn() + 1.3 resized = imresize(digit, 1. / scale) else: resized = digit w = resized.shape[0] assert w == resized.shape[1] padding = canvas_size - w if translate: # translate only if user specified pad_l = np.random.randint(0, padding) pad_r = np.random.randint(0, padding) pad_width = ((pad_l, padding - pad_l), (pad_r, padding - pad_r)) positioned = np.pad(resized, pad_width, 'constant', constant_values=0) else: pad_l = padding // 2 pad_r = padding // 2 pad_width = ((pad_l, padding - pad_l), (pad_r, padding - pad_r)) positioned = np.pad(resized, pad_width, 'constant', constant_values=0) return positioned, label def sample_multi(num_digits, canvas_size, mnist, resize=True, translate=True): canvas = np.zeros((canvas_size, canvas_size)) labels = [] for _ in range(num_digits): positioned_digit, label = sample_one(canvas_size, mnist, resize=resize, translate=translate) canvas += positioned_digit labels.append(label) # Crude check for overlapping digits. if np.max(canvas) > 255: return sample_multi(num_digits, canvas_size, mnist, resize=resize, translate=translate) else: return canvas, labels def mk_dataset(n, mnist, min_digits, max_digits, canvas_size, resize=True, translate=True): x = [] y = [] for _ in range(n): num_digits = np.random.randint(min_digits, max_digits + 1) canvas, labels = sample_multi(num_digits, canvas_size, mnist, resize=resize, translate=translate) x.append(canvas) y.append(labels) return np.array(x, dtype=np.uint8), y def load_mnist(): train_loader = torch.utils.data.DataLoader( dset.MNIST(root='./data', train=True, download=True)) test_loader = torch.utils.data.DataLoader( dset.MNIST(root='./data', train=False, download=True)) train_data = { 'digits': train_loader.dataset.train_data.numpy(), 'labels': train_loader.dataset.train_labels } test_data = { 'digits': test_loader.dataset.test_data.numpy(), 'labels': test_loader.dataset.test_labels } return train_data, test_data def make_dataset(root, folder, training_file, test_file, min_digits=0, max_digits=2, resize=True, translate=True): if not os.path.isdir(os.path.join(root, folder)): os.makedirs(os.path.join(root, folder)) np.random.seed(681307) train_mnist, test_mnist = load_mnist() train_x, train_y = mk_dataset(60000, train_mnist, min_digits, max_digits, 50, resize=resize, translate=translate) test_x, test_y = mk_dataset(10000, test_mnist, min_digits, max_digits, 50, resize=resize, translate=translate) train_x = torch.from_numpy(train_x).byte() test_x = torch.from_numpy(test_x).byte() training_set = (train_x, train_y) test_set = (test_x, test_y) with open(os.path.join(root, folder, training_file), 'wb') as f: torch.save(training_set, f) with open(os.path.join(root, folder, test_file), 'wb') as f: torch.save(test_set, f) def sample_one_fixed(canvas_size, mnist, pad_l, pad_r, scale=1.3): i = np.random.randint(mnist['digits'].shape[0]) digit = mnist['digits'][i] label = mnist['labels'][i] resized = imresize(digit, 1. / scale) w = resized.shape[0] assert w == resized.shape[1] padding = canvas_size - w pad_width = ((pad_l, padding - pad_l), (pad_r, padding - pad_r)) positioned = np.pad(resized, pad_width, 'constant', constant_values=0) return positioned, label def sample_multi_fixed(num_digits, canvas_size, mnist, reverse=False, scramble=False, no_repeat=False): canvas = np.zeros((canvas_size, canvas_size)) labels = [] pads = [(4, 4), (4, 23), (23, 4), (23, 23)] for i in range(num_digits): if no_repeat: # keep trying to generate examples that are # not already in previously generated labels while True: positioned_digit, label = sample_one_fixed( canvas_size, mnist, pads[i][0], pads[i][1]) if label not in labels: break else: positioned_digit, label = sample_one_fixed( canvas_size, mnist, pads[i][0], pads[i][1]) canvas += positioned_digit labels.append(label) if reverse and random.random() > 0.5: labels = labels[::-1] if scramble: random.shuffle(labels) # Crude check for overlapping digits. if np.max(canvas) > 255: return sample_multi_fixed(num_digits, canvas_size, mnist, reverse=reverse, scramble=scramble, no_repeat=no_repeat) else: return canvas, labels def mk_dataset_fixed(n, mnist, min_digits, max_digits, canvas_size, reverse=False, scramble=False, no_repeat=False): x = [] y = [] for _ in range(n): num_digits = np.random.randint(min_digits, max_digits + 1) canvas, labels = sample_multi_fixed(num_digits, canvas_size, mnist, reverse=reverse, scramble=scramble, no_repeat=no_repeat) x.append(canvas) y.append(labels) return np.array(x, dtype=np.uint8), y def make_dataset_fixed(root, folder, training_file, test_file, min_digits=0, max_digits=3, reverse=False, scramble=False, no_repeat=False): if not os.path.isdir(os.path.join(root, folder)): os.makedirs(os.path.join(root, folder)) np.random.seed(681307) train_mnist, test_mnist = load_mnist() train_x, train_y = mk_dataset_fixed(60000, train_mnist, min_digits, max_digits, 50, reverse=reverse, scramble=scramble, no_repeat=no_repeat) test_x, test_y = mk_dataset_fixed(10000, test_mnist, min_digits, max_digits, 50, reverse=reverse, scramble=scramble, no_repeat=no_repeat) train_x = torch.from_numpy(train_x).byte() test_x = torch.from_numpy(test_x).byte() training_set = (train_x, train_y) test_set = (test_x, test_y) with open(os.path.join(root, folder, training_file), 'wb') as f: torch.save(training_set, f) with open(os.path.join(root, folder, test_file), 'wb') as f: torch.save(test_set, f) if __name__ == "__main__": import argparse parser = argparse.ArgumentParser() parser.add_argument('--min-digits', type=int, default=0, help='minimum number of digits to add to an image') parser.add_argument('--max-digits', type=int, default=4, help='maximum number of digits to add to an image') parser.add_argument('--no-resize', action='store_true', default=False, help='if True, fix the image to be MNIST size') parser.add_argument('--no-translate', action='store_true', default=False, help='if True, fix the image to be in the center') parser.add_argument('--fixed', action='store_true', default=False, help='If True, ignore resize/translate options and generate') parser.add_argument('--scramble', action='store_true', default=False, help='If True, scramble labels and generate. Only does something if fixed is True.') parser.add_argument('--reverse', action='store_true', default=False, help='If True, reverse flips the labels i.e. 4321 instead of 1234 with 0.5 probability.') parser.add_argument('--no-repeat', action='store_true', default=False, help='If True, do not generate images with multiple of the same label.') args = parser.parse_args() args.resize = not args.no_resize args.translate = not args.no_translate if args.no_repeat and not args.fixed: raise Exception('Must have --fixed if --no-repeat is supplied.') if args.scramble and not args.fixed: raise Exception('Must have --fixed if --scramble is supplied.') if args.reverse and not args.fixed: raise Exception('Must have --fixed if --reverse is supplied.') if args.reverse and args.scramble: print('Found --reversed and --scrambling. Overriding --reversed.') args.reverse = False # Generate the training set and dump it to disk. (Note, this will # always generate the same data, else error out.) if args.fixed: make_dataset_fixed('./data', 'multimnist', 'training.pt', 'test.pt', min_digits=args.min_digits, max_digits=args.max_digits, reverse=args.reverse, scramble=args.scramble, no_repeat=args.no_repeat) else: # if not fixed, then make classic MultiMNIST dataset # VAEs in general have trouble handling translation and rotation, # likely resulting in blurry reconstructions without additional # attention mechanisms. See AIR [1]. make_dataset('./data', 'multimnist', 'training.pt', 'test.pt', min_digits=args.min_digits, max_digits=args.max_digits, resize=args.resize, translate=args.translate)
13,354
37.93586
113
py
multimodal-vae-public
multimodal-vae-public-master/multimnist/train.py
from __future__ import division from __future__ import print_function from __future__ import absolute_import import os import sys import shutil from tqdm import tqdm import torch import torch.nn as nn import torch.optim as optim import torch.nn.functional as F from torch.autograd import Variable from torchvision import transforms from model import MVAE from datasets import MultiMNIST from utils import charlist_tensor def elbo_loss(recon_image, image, recon_text, text, mu, logvar, lambda_image=1.0, lambda_text=1.0, annealing_factor=1): """Bimodal ELBO loss function. @param recon_image: torch.Tensor reconstructed image @param image: torch.Tensor input image @param recon_text: torch.Tensor reconstructed text probabilities @param text: torch.Tensor input text (one-hot) @param mu: torch.Tensor mean of latent distribution @param logvar: torch.Tensor log-variance of latent distribution @param lambda_image: float [default: 1.0] weight for image BCE @param lambda_text: float [default: 1.0] weight for text BCE @param annealing_factor: integer [default: 1] multiplier for KL divergence term @return ELBO: torch.Tensor evidence lower bound """ image_bce, text_bce = 0, 0 # default params if recon_image is not None and image is not None: image_bce = torch.sum(binary_cross_entropy_with_logits( recon_image.view(-1, 1 * 50 * 50), image.view(-1, 1 * 50 * 50)), dim=1) if recon_text is not None and text is not None: batch_size, n_digits = recon_text.size(0), recon_text.size(1) recon_text = recon_text.view(-1, recon_text.size(2)) text = text.view(-1) # sum over the different classes text_bce = torch.sum(cross_entropy(recon_text, text), dim=1) text_bce = text_bce.view(batch_size, n_digits) # sum over the number of digits text_bce = torch.sum(text_bce, dim=1) # Kingma and Welling. Auto-Encoding Variational Bayes. ICLR, 2014 # https://arxiv.org/abs/1312.6114 KLD = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp(), dim=1) ELBO = torch.mean(lambda_image * image_bce + lambda_text * text_bce + annealing_factor * KLD) return ELBO def binary_cross_entropy_with_logits(input, target): """Sigmoid Activation + Binary Cross Entropy @param input: torch.Tensor (size N) @param target: torch.Tensor (size N) @return loss: torch.Tensor (size N) """ if not (target.size() == input.size()): raise ValueError("Target size ({}) must be the same as input size ({})".format( target.size(), input.size())) return (torch.clamp(input, 0) - input * target + torch.log(1 + torch.exp(-torch.abs(input)))) def cross_entropy(input, target, eps=1e-6): """k-Class Cross Entropy (Log Softmax + Log Loss) @param input: torch.Tensor (size N x K) @param target: torch.Tensor (size N x K) @param eps: error to add (default: 1e-6) @return loss: torch.Tensor (size N) """ if not (target.size(0) == input.size(0)): raise ValueError( "Target size ({}) must be the same as input size ({})".format( target.size(0), input.size(0))) log_input = F.log_softmax(input + eps, dim=1) y_onehot = Variable(log_input.data.new(log_input.size()).zero_()) y_onehot = y_onehot.scatter(1, target.unsqueeze(1), 1) loss = y_onehot * log_input return -loss class AverageMeter(object): """Computes and stores the average and current value""" def __init__(self): self.reset() def reset(self): self.val = 0 self.avg = 0 self.sum = 0 self.count = 0 def update(self, val, n=1): self.val = val self.sum += val * n self.count += n self.avg = self.sum / self.count def save_checkpoint(state, is_best, folder='./', filename='checkpoint.pth.tar'): if not os.path.isdir(folder): os.mkdir(folder) torch.save(state, os.path.join(folder, filename)) if is_best: shutil.copyfile(os.path.join(folder, filename), os.path.join(folder, 'model_best.pth.tar')) def load_checkpoint(file_path, use_cuda=False): checkpoint = torch.load(file_path) if use_cuda else \ torch.load(file_path, map_location=lambda storage, location: storage) model = MVAE(checkpoint['n_latents']) model.load_state_dict(checkpoint['state_dict']) return model if __name__ == "__main__": import argparse parser = argparse.ArgumentParser() parser.add_argument('--n-latents', type=int, default=64, help='size of the latent embedding [default: 64]') parser.add_argument('--batch-size', type=int, default=100, metavar='N', help='input batch size for training [default: 100]') parser.add_argument('--epochs', type=int, default=500, metavar='N', help='number of epochs to train [default: 500]') parser.add_argument('--annealing-epochs', type=int, default=200, metavar='N', help='number of epochs to anneal KL for [default: 200]') parser.add_argument('--lr', type=float, default=1e-3, metavar='LR', help='learning rate [default: 1e-3]') parser.add_argument('--log-interval', type=int, default=10, metavar='N', help='how many batches to wait before logging training status [default: 10]') parser.add_argument('--lambda-image', type=float, default=1., help='multipler for image reconstruction [default: 1]') parser.add_argument('--lambda-text', type=float, default=10., help='multipler for text reconstruction [default: 10]') parser.add_argument('--cuda', action='store_true', default=False, help='enables CUDA training [default: False]') args = parser.parse_args() args.cuda = args.cuda and torch.cuda.is_available() if not os.path.isdir('./trained_models'): os.makedirs('./trained_models') train_loader = torch.utils.data.DataLoader( MultiMNIST('./data', train=True, download=True, transform=transforms.ToTensor(), target_transform=charlist_tensor), batch_size=args.batch_size, shuffle=True) N_mini_batches = len(train_loader) test_loader = torch.utils.data.DataLoader( MultiMNIST('./data', train=False, download=True, transform=transforms.ToTensor(), target_transform=charlist_tensor), batch_size=args.batch_size, shuffle=False) model = MVAE(args.n_latents) optimizer = optim.Adam(model.parameters(), lr=args.lr) if args.cuda: model.cuda() def train(epoch): model.train() train_loss_meter = AverageMeter() # NOTE: is_paired is 1 if the example is paired for batch_idx, (image, text) in enumerate(train_loader): if epoch < args.annealing_epochs: # compute the KL annealing factor for the current mini-batch in the current epoch annealing_factor = (float(batch_idx + (epoch - 1) * N_mini_batches + 1) / float(args.annealing_epochs * N_mini_batches)) else: # by default the KL annealing factor is unity annealing_factor = 1.0 if args.cuda: image = image.cuda() text = text.cuda() image = Variable(image) text = Variable(text) batch_size = len(image) # refresh the optimizer optimizer.zero_grad() # pass data through model recon_image_1, recon_text_1, mu_1, logvar_1 = model(image, text) recon_image_2, recon_text_2, mu_2, logvar_2 = model(image) recon_image_3, recon_text_3, mu_3, logvar_3 = model(text=text) # compute ELBO for each data combo joint_loss = elbo_loss(recon_image_1, image, recon_text_1, text, mu_1, logvar_1, lambda_image=args.lambda_image, lambda_text=args.lambda_text, annealing_factor=annealing_factor) image_loss = elbo_loss(recon_image_2, image, None, None, mu_2, logvar_2, lambda_image=args.lambda_image, lambda_text=args.lambda_text, annealing_factor=annealing_factor) text_loss = elbo_loss(None, None, recon_text_3, text, mu_3, logvar_3, lambda_image=args.lambda_image, lambda_text=args.lambda_text, annealing_factor=annealing_factor) train_loss = joint_loss + image_loss + text_loss train_loss_meter.update(train_loss.data[0], batch_size) train_loss.backward() optimizer.step() if batch_idx % args.log_interval == 0: print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}\tAnnealing-Factor: {:.3f}'.format( epoch, batch_idx * len(image), len(train_loader.dataset), 100. * batch_idx / len(train_loader), train_loss_meter.avg, annealing_factor)) print('====> Epoch: {}\tLoss: {:.4f}'.format(epoch, train_loss_meter.avg)) def test(epoch): model.eval() test_loss_meter = AverageMeter() for batch_idx, (image, text) in enumerate(test_loader): if args.cuda: image = image.cuda() text = text.cuda() image = Variable(image, volatile=True) text = Variable(text, volatile=True) batch_size = len(image) recon_image_1, recon_text_1, mu_1, logvar_1 = model(image, text) recon_image_2, recon_text_2, mu_2, logvar_2 = model(image) recon_image_3, recon_text_3, mu_3, logvar_3 = model(text=text) joint_loss = elbo_loss(recon_image_1, image, recon_text_1, text, mu_1, logvar_1) image_loss = elbo_loss(recon_image_2, image, None, None, mu_2, logvar_2) text_loss = elbo_loss(None, None, recon_text_3, text, mu_3, logvar_3) test_loss = joint_loss + image_loss + text_loss test_loss_meter.update(test_loss.data[0], batch_size) print('====> Test Loss: {:.4f}'.format(test_loss_meter.avg)) return test_loss_meter.avg best_loss = sys.maxint for epoch in range(1, args.epochs + 1): train(epoch) test_loss = test(epoch) is_best = test_loss < best_loss best_loss = min(test_loss, best_loss) # save the best model and current model save_checkpoint({ 'state_dict': model.state_dict(), 'best_loss': best_loss, 'n_latents': args.n_latents, 'optimizer' : optimizer.state_dict(), }, is_best, folder='./trained_models')
11,314
39.555556
105
py
multimodal-vae-public
multimodal-vae-public-master/celeba/sample.py
from __future__ import division from __future__ import print_function from __future__ import absolute_import import numpy as np import torch import torch.nn.functional as F from torch.autograd import Variable from torchvision import transforms from torchvision.utils import save_image from train import load_checkpoint from datasets import ATTR_IX_TO_KEEP, N_ATTRS from datasets import ATTR_TO_IX_DICT, IX_TO_ATTR_DICT from datasets import tensor_to_attributes from datasets import CelebAttributes def fetch_celeba_image(attr_str): """Return a random image from the CelebA dataset with label. @param label: string name of the attribute (see ATTR_TO_IX_DICT) @return: torch.autograd.Variable CelebA image """ loader = torch.utils.data.DataLoader( CelebAttributes( partition='test', image_transform=transforms.Compose([transforms.Resize(64), transforms.CenterCrop(64), transforms.ToTensor()])), batch_size=128, shuffle=False) images, attrs = [], [] for batch_idx, (image, attr) in enumerate(loader): images.append(image) attrs.append(attr) images = torch.cat(images).cpu().numpy() attrs = torch.cat(attrs).cpu().numpy() attr_ix = ATTR_IX_TO_KEEP.index(ATTR_TO_IX_DICT[attr_str]) images = images[attrs[:, attr_ix] == 1] image = images[np.random.choice(np.arange(images.shape[0]))] image = torch.from_numpy(image).float() image = image.unsqueeze(0) return Variable(image, volatile=True) def fetch_celeba_attrs(attr_str): """Return a random image from the CelebA dataset with label. @param label: string name of the attribute (see ATTR_TO_IX_DICT) @return: torch.autograd.Variable Variable wrapped around an integer. """ attrs = torch.zeros(N_ATTRS) attr_ix = ATTR_IX_TO_KEEP.index(ATTR_TO_IX_DICT[attr_str]) attrs[attr_ix] = 1 return Variable(attrs.unsqueeze(0), volatile=True) if __name__ == "__main__": import os import argparse parser = argparse.ArgumentParser() parser.add_argument('model_path', type=str, help='path to trained model file') parser.add_argument('--n-samples', type=int, default=64, help='Number of images and texts to sample [default: 64]') # condition sampling on a particular images parser.add_argument('--condition-on-image', type=int, default=None, help='If True, generate text conditioned on an image.') # condition sampling on a particular text parser.add_argument('--condition-on-text', type=int, default=None, help='If True, generate images conditioned on a text.') parser.add_argument('--cuda', action='store_true', default=False, help='enables CUDA training [default: False]') args = parser.parse_args() args.cuda = args.cuda and torch.cuda.is_available() model = load_checkpoint(args.model_path, use_cuda=args.cuda) model.eval() if args.cuda: model.cuda() # mode 1: unconditional generation if not args.condition_on_image and not args.condition_on_attrs: mu = Variable(torch.Tensor([0])) std = Variable(torch.Tensor([1])) if args.cuda: mu = mu.cuda() std = std.cuda() # mode 2: generate conditioned on image elif args.condition_on_image and not args.condition_on_attrs: image = fetch_celeba_image(args.condition_on_image) if args.cuda: image = image.cuda() mu, logvar = model.get_params(image=image) std = logvar.mul(0.5).exp_() # mode 3: generate conditioned on attrs elif args.condition_on_attrs and not args.condition_on_image: attrs = fetch_celeba_attrs(args.condition_on_attrs) if args.cuda: attrs = attrs.cuda() mu, logvar = model.get_params(attrs=attrs) std = logvar.mul(0.5).exp_() # mode 4: generate conditioned on image and attrs elif args.condition_on_attrs and args.condition_on_image: image = fetch_celeba_image(args.condition_on_image) attrs = fetch_celeba_attrs(args.condition_on_attrs) if args.cuda: image = image.cuda() attrs = attrs.cuda() mu, logvar = model.get_params(image=image, attrs=attrs) std = logvar.mul(0.5).exp_() # sample from uniform gaussian sample = Variable(torch.randn(args.n_samples, model.n_latents)) if args.cuda: sample = sample.cuda() # sample from particular gaussian by multiplying + adding mu = mu.expand_as(sample) std = std.expand_as(sample) sample = sample.mul(std).add_(mu) # generate image and text image_recon = F.sigmoid(model.image_decoder(sample)).cpu().data attrs_recon = F.sigmoid(model.attrs_decoder(sample)).cpu().data # save image samples to filesystem save_image(image_recon.view(args.n_samples, 3, 64, 64), './sample_image.png') # save text samples to filesystem sample_attrs = [] for i in xrange(attrs_recon.size(0)): attrs = tensor_to_attributes(attrs_recon[i]) sample_attrs.append(','.join(attrs)) with open('./sample_attrs.txt', 'w') as fp: for attrs in sample_attrs: fp.write('%s\n' % attrs)
5,535
38.542857
82
py
multimodal-vae-public
multimodal-vae-public-master/celeba/model.py
from __future__ import division from __future__ import print_function from __future__ import absolute_import import torch import torch.nn as nn from torch.autograd import Variable from torch.nn import functional as F from datasets import N_ATTRS class MVAE(nn.Module): """Multimodal Variational Autoencoder. @param n_latents: integer number of latent dimensions """ def __init__(self, n_latents): super(MVAE, self).__init__() self.image_encoder = ImageEncoder(n_latents) self.image_decoder = ImageDecoder(n_latents) self.attrs_encoder = AttributeEncoder(n_latents) self.attrs_decoder = AttributeDecoder(n_latents) self.experts = ProductOfExperts() self.n_latents = n_latents def reparametrize(self, mu, logvar): if self.training: std = logvar.mul(0.5).exp_() eps = Variable(std.data.new(std.size()).normal_()) return eps.mul(std).add_(mu) else: # return mean during inference return mu def forward(self, image=None, attrs=None): mu, logvar = self.infer(image, attrs) # reparametrization trick to sample z = self.reparametrize(mu, logvar) # reconstruct inputs based on that gaussian image_recon = self.image_decoder(z) attrs_recon = self.attrs_decoder(z) return image_recon, attrs_recon, mu, logvar def infer(self, image=None, attrs=None): batch_size = image.size(0) if image is not None else attrs.size(0) use_cuda = next(self.parameters()).is_cuda # check if CUDA # initialize the universal prior expert mu, logvar = prior_expert((1, batch_size, self.n_latents), use_cuda=use_cuda) if image is not None: image_mu, image_logvar = self.image_encoder(image) mu = torch.cat((mu, image_mu.unsqueeze(0)), dim=0) logvar = torch.cat((logvar, image_logvar.unsqueeze(0)), dim=0) if attrs is not None: attrs_mu, attrs_logvar = self.attrs_encoder(attrs) mu = torch.cat((mu, attrs_mu.unsqueeze(0)), dim=0) logvar = torch.cat((logvar, attrs_logvar.unsqueeze(0)), dim=0) # product of experts to combine gaussians mu, logvar = self.experts(mu, logvar) return mu, logvar class ImageEncoder(nn.Module): """Parametrizes q(z|x). This is the standard DCGAN architecture. @param n_latents: integer number of latent variable dimensions. """ def __init__(self, n_latents): super(ImageEncoder, self).__init__() self.features = nn.Sequential( nn.Conv2d(3, 32, 4, 2, 1, bias=False), Swish(), nn.Conv2d(32, 64, 4, 2, 1, bias=False), nn.BatchNorm2d(64), Swish(), nn.Conv2d(64, 128, 4, 2, 1, bias=False), nn.BatchNorm2d(128), Swish(), nn.Conv2d(128, 256, 4, 1, 0, bias=False), nn.BatchNorm2d(256), Swish()) self.classifier = nn.Sequential( nn.Linear(256 * 5 * 5, 512), Swish(), nn.Dropout(p=0.1), nn.Linear(512, n_latents * 2)) self.n_latents = n_latents def forward(self, x): n_latents = self.n_latents x = self.features(x) x = x.view(-1, 256 * 5 * 5) x = self.classifier(x) return x[:, :n_latents], x[:, n_latents:] class ImageDecoder(nn.Module): """Parametrizes p(x|z). This is the standard DCGAN architecture. @param n_latents: integer number of latent variable dimensions. """ def __init__(self, n_latents): super(ImageDecoder, self).__init__() self.upsample = nn.Sequential( nn.Linear(n_latents, 256 * 5 * 5), Swish()) self.hallucinate = nn.Sequential( nn.ConvTranspose2d(256, 128, 4, 1, 0, bias=False), nn.BatchNorm2d(128), Swish(), nn.ConvTranspose2d(128, 64, 4, 2, 1, bias=False), nn.BatchNorm2d(64), Swish(), nn.ConvTranspose2d(64, 32, 4, 2, 1, bias=False), nn.BatchNorm2d(32), Swish(), nn.ConvTranspose2d(32, 3, 4, 2, 1, bias=False)) def forward(self, z): # the input will be a vector of size |n_latents| z = self.upsample(z) z = z.view(-1, 256, 5, 5) z = self.hallucinate(z) return z # NOTE: no sigmoid here. See train.py class AttributeEncoder(nn.Module): """Parametrizes q(z|y). We use a single inference network that encodes all 18 features. @param n_latents: integer number of latent variable dimensions. """ def __init__(self, n_latents): super(AttributeEncoder, self).__init__() self.net = nn.Sequential( nn.Linear(N_ATTRS, 512), nn.BatchNorm1d(512), Swish(), nn.Linear(512, 512), nn.BatchNorm1d(512), Swish(), nn.Linear(512, n_latents * 2)) self.n_latents = n_latents def forward(self, x): n_latents = self.n_latents x = self.net(x) return x[:, :n_latents], x[:, n_latents:] class AttributeDecoder(nn.Module): """Parametrizes p(y|z). We use a single generative network that decodes all 18 features. @param n_latents: integer number of latent variable dimensions. """ def __init__(self, n_latents): super(AttributeDecoder, self).__init__() self.net = nn.Sequential( nn.Linear(n_latents, 512), nn.BatchNorm1d(512), Swish(), nn.Linear(512, 512), nn.BatchNorm1d(512), Swish(), nn.Linear(512, 512), nn.BatchNorm1d(512), Swish(), nn.Linear(512, N_ATTRS)) def forward(self, z): z = self.net(z) # not a one-hotted prediction: this returns a value # for every single index return z # NOTE: no sigmoid here. See train.py class ProductOfExperts(nn.Module): """Return parameters for product of independent experts. See https://arxiv.org/pdf/1410.7827.pdf for equations. @param mu: M x D for M experts @param logvar: M x D for M experts """ def forward(self, mu, logvar, eps=1e-8): var = torch.exp(logvar) + eps # precision of i-th Gaussian expert at point x T = 1. / var pd_mu = torch.sum(mu * T, dim=0) / torch.sum(T, dim=0) pd_var = 1. / torch.sum(T, dim=0) pd_logvar = torch.log(pd_var) return pd_mu, pd_logvar class Swish(nn.Module): """https://arxiv.org/abs/1710.05941""" def forward(self, x): return x * F.sigmoid(x) def prior_expert(size, use_cuda=False): """Universal prior expert. Here we use a spherical Gaussian: N(0, 1). @param size: integer dimensionality of Gaussian @param use_cuda: boolean [default: False] cast CUDA on variables """ mu = Variable(torch.zeros(size)) logvar = Variable(torch.log(torch.ones(size))) if use_cuda: mu, logvar = mu.cuda(), logvar.cuda() return mu, logvar
7,415
31.243478
74
py
multimodal-vae-public
multimodal-vae-public-master/celeba/datasets.py
from __future__ import division from __future__ import print_function from __future__ import absolute_import import os import sys import copy import random import numpy as np import numpy.random as npr from PIL import Image from random import shuffle from scipy.misc import imresize import torch from torch.utils.data.dataset import Dataset VALID_PARTITIONS = {'train': 0, 'val': 1, 'test': 2} # go from label index to interpretable index ATTR_TO_IX_DICT = {'Sideburns': 30, 'Black_Hair': 8, 'Wavy_Hair': 33, 'Young': 39, 'Heavy_Makeup': 18, 'Blond_Hair': 9, 'Attractive': 2, '5_o_Clock_Shadow': 0, 'Wearing_Necktie': 38, 'Blurry': 10, 'Double_Chin': 14, 'Brown_Hair': 11, 'Mouth_Slightly_Open': 21, 'Goatee': 16, 'Bald': 4, 'Pointy_Nose': 27, 'Gray_Hair': 17, 'Pale_Skin': 26, 'Arched_Eyebrows': 1, 'Wearing_Hat': 35, 'Receding_Hairline': 28, 'Straight_Hair': 32, 'Big_Nose': 7, 'Rosy_Cheeks': 29, 'Oval_Face': 25, 'Bangs': 5, 'Male': 20, 'Mustache': 22, 'High_Cheekbones': 19, 'No_Beard': 24, 'Eyeglasses': 15, 'Bags_Under_Eyes': 3, 'Wearing_Necklace': 37, 'Wearing_Lipstick': 36, 'Big_Lips': 6, 'Narrow_Eyes': 23, 'Chubby': 13, 'Smiling': 31, 'Bushy_Eyebrows': 12, 'Wearing_Earrings': 34} # we only keep 18 of the more visually distinctive features # See [1] Perarnau, Guim, et al. "Invertible conditional gans for # image editing." arXiv preprint arXiv:1611.06355 (2016). ATTR_IX_TO_KEEP = [4, 5, 8, 9, 11, 12, 15, 17, 18, 20, 21, 22, 26, 28, 31, 32, 33, 35] IX_TO_ATTR_DICT = {v:k for k, v in ATTR_TO_IX_DICT.iteritems()} N_ATTRS = len(ATTR_IX_TO_KEEP) ATTR_TO_PLOT = ['Heavy_Makeup', 'Male', 'Mouth_Slightly_Open', 'Smiling', 'Wavy_Hair'] class CelebAttributes(Dataset): """Define dataset of images of celebrities and attributes. The user needs to have pre-defined the Anno and Eval folder from http://mmlab.ie.cuhk.edu.hk/projects/CelebA.html @param partition: string train|val|test [default: train] See VALID_PARTITIONS global variable. @param data_dir: string path to root of dataset images [default: ./data] @param image_transform: ?torchvision.Transforms optional function to apply to training inputs @param attr_transform: ?torchvision.Transforms optional function to apply to training outputs """ def __init__(self, partition='train', data_dir='./data', image_transform=None, attr_transform=None): self.partition = partition self.image_transform = image_transform self.attr_transform = attr_transform self.data_dir = data_dir assert partition in VALID_PARTITIONS.keys() self.image_paths = load_eval_partition(partition, data_dir=data_dir) self.attr_data = load_attributes(self.image_paths, partition, data_dir=data_dir) self.size = int(len(self.image_paths)) def __getitem__(self, index): """ Args: index (int): Index Returns: tuple: (image, target) where target is index of the target class. """ image_path = os.path.join(self.data_dir, 'img_align_celeba', self.image_paths[index]) attr = self.attr_data[index] image = Image.open(image_path).convert('RGB') if self.image_transform is not None: image = self.image_transform(image) if self.attr_transform is not None: attr = self.attr_transform(attr) return image, attr def __len__(self): return self.size def load_eval_partition(partition, data_dir='./data'): """After downloading the dataset, we can load a subset for training or testing. @param partition: string which subset to use (train|val|test) @param data_dir: string [default: ./data] where the images are saved """ eval_data = [] with open(os.path.join(data_dir, 'Eval/list_eval_partition.txt')) as fp: rows = fp.readlines() for row in rows: path, label = row.strip().split(' ') label = int(label) if label == VALID_PARTITIONS[partition]: eval_data.append(path) return eval_data def load_attributes(paths, partition, data_dir='./data'): """Load the attributes into a torch tensor. @param paths: string a numpy array of attributes (1 or 0) @param partition: string which subset to use (train|val|test) @param data_dir: string [default: ./data] where the images are saved """ if os.path.isfile(os.path.join(data_dir, 'Anno/attr_%s.npy' % partition)): attr_data = np.load(os.path.join(data_dir, 'Anno/attr_%s.npy' % partition)) else: attr_data = [] with open(os.path.join(data_dir, 'Anno/list_attr_celeba.txt')) as fp: rows = fp.readlines() for ix, row in enumerate(rows[2:]): row = row.strip().split() path, attrs = row[0], row[1:] if path in paths: attrs = np.array(attrs).astype(int) attrs[attrs < 0] = 0 attr_data.append(attrs) attr_data = np.vstack(attr_data).astype(np.int64) attr_data = torch.from_numpy(attr_data).float() return attr_data[:, ATTR_IX_TO_KEEP] def tensor_to_attributes(tensor): """Use this for the <image_transform>. @param tensor: PyTorch Tensor D dimensional tensor @return attributes: list of strings """ attrs = [] n = tensor.size(0) tensor = torch.round(tensor) for i in xrange(n): if tensor[i] > 0.5: attr = IX_TO_ATTR_DICT[ATTR_IX_TO_KEEP[i]] attrs.append(attr) return attrs
6,170
39.333333
111
py
multimodal-vae-public
multimodal-vae-public-master/celeba/train.py
from __future__ import division from __future__ import print_function from __future__ import absolute_import import os import sys import shutil from tqdm import tqdm import torch import torch.nn as nn import torch.optim as optim import torch.nn.functional as F from torch.autograd import Variable from torchvision import transforms from model import MVAE from datasets import CelebAttributes from datasets import N_ATTRS def elbo_loss(recon_image, image, recon_attrs, attrs, mu, logvar, lambda_image=1.0, lambda_attrs=1.0, annealing_factor=1): """Bimodal ELBO loss function. @param recon_image: torch.Tensor reconstructed image @param image: torch.Tensor input image @param recon_attrs: torch.Tensor reconstructed attribute probabilities @param attrs: torch.Tensor input attributes @param mu: torch.Tensor mean of latent distribution @param logvar: torch.Tensor log-variance of latent distribution @param lambda_image: float [default: 1.0] weight for image BCE @param lambda_attrs: float [default: 1.0] weight for attribute BCE @param annealing_factor: integer [default: 1] multiplier for KL divergence term @return ELBO: torch.Tensor evidence lower bound """ image_bce, attrs_bce = 0, 0 # default params if recon_image is not None and image is not None: image_bce = torch.sum(binary_cross_entropy_with_logits( recon_image.view(-1, 3 * 64 * 64), image.view(-1, 3 * 64 * 64)), dim=1) if recon_attrs is not None and attrs is not None: for i in xrange(N_ATTRS): attr_bce = binary_cross_entropy_with_logits( recon_attrs[:, i], attrs[:, i]) attrs_bce += attr_bce # Kingma and Welling. Auto-Encoding Variational Bayes. ICLR, 2014 # https://arxiv.org/abs/1312.6114 KLD = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp(), dim=1) ELBO = torch.mean(lambda_image * image_bce + lambda_attrs * attrs_bce + annealing_factor * KLD) return ELBO def binary_cross_entropy_with_logits(input, target): """Sigmoid Activation + Binary Cross Entropy @param input: torch.Tensor (size N) @param target: torch.Tensor (size N) @return loss: torch.Tensor (size N) """ if not (target.size() == input.size()): raise ValueError("Target size ({}) must be the same as input size ({})".format( target.size(), input.size())) return (torch.clamp(input, 0) - input * target + torch.log(1 + torch.exp(-torch.abs(input)))) class AverageMeter(object): """Computes and stores the average and current value""" def __init__(self): self.reset() def reset(self): self.val = 0 self.avg = 0 self.sum = 0 self.count = 0 def update(self, val, n=1): self.val = val self.sum += val * n self.count += n self.avg = self.sum / self.count def save_checkpoint(state, is_best, folder='./', filename='checkpoint.pth.tar'): if not os.path.isdir(folder): os.mkdir(folder) torch.save(state, os.path.join(folder, filename)) if is_best: shutil.copyfile(os.path.join(folder, filename), os.path.join(folder, 'model_best.pth.tar')) def load_checkpoint(file_path, use_cuda=False): checkpoint = torch.load(file_path) if use_cuda else \ torch.load(file_path, map_location=lambda storage, location: storage) model = MVAE(checkpoint['n_latents']) model.load_state_dict(checkpoint['state_dict']) return model if __name__ == "__main__": import argparse parser = argparse.ArgumentParser() parser.add_argument('--n-latents', type=int, default=100, help='size of the latent embedding [default: 100]') parser.add_argument('--batch-size', type=int, default=100, metavar='N', help='input batch size for training [default: 100]') parser.add_argument('--epochs', type=int, default=100, metavar='N', help='number of epochs to train [default: 100]') parser.add_argument('--annealing-epochs', type=int, default=20, metavar='N', help='number of epochs to anneal KL for [default: 20]') parser.add_argument('--lr', type=float, default=1e-4, metavar='LR', help='learning rate [default: 1e-4]') parser.add_argument('--log-interval', type=int, default=10, metavar='N', help='how many batches to wait before logging training status [default: 10]') parser.add_argument('--lambda-image', type=float, default=1., help='multipler for image reconstruction [default: 1]') parser.add_argument('--lambda-attrs', type=float, default=10., help='multipler for attributes reconstruction [default: 10]') parser.add_argument('--cuda', action='store_true', default=False, help='enables CUDA training [default: False]') args = parser.parse_args() args.cuda = args.cuda and torch.cuda.is_available() if not os.path.isdir('./trained_models'): os.makedirs('./trained_models') # crop the input image to 64 x 64 preprocess_data = transforms.Compose([transforms.Resize(64), transforms.CenterCrop(64), transforms.ToTensor()]) train_loader = torch.utils.data.DataLoader( CelebAttributes(partition='train', data_dir='./data', image_transform=preprocess_data), batch_size=args.batch_size, shuffle=True) N_mini_batches = len(train_loader) test_loader = torch.utils.data.DataLoader( CelebAttributes(partition='val', data_dir='./data', image_transform=preprocess_data), batch_size=args.batch_size, shuffle=False) model = MVAE(args.n_latents) optimizer = optim.Adam(model.parameters(), lr=args.lr) if args.cuda: model.cuda() def train(epoch): model.train() train_loss_meter = AverageMeter() # NOTE: is_paired is 1 if the example is paired for batch_idx, (image, attrs) in enumerate(train_loader): if epoch < args.annealing_epochs: # compute the KL annealing factor for the current mini-batch in the current epoch annealing_factor = (float(batch_idx + (epoch - 1) * N_mini_batches + 1) / float(args.annealing_epochs * N_mini_batches)) else: # by default the KL annealing factor is unity annealing_factor = 1.0 if args.cuda: image = image.cuda() attrs = attrs.cuda() image = Variable(image) attrs = Variable(attrs) batch_size = len(image) # refresh the optimizer optimizer.zero_grad() # pass data through model recon_image_1, recon_attrs_1, mu_1, logvar_1 = model(image, attrs) recon_image_2, recon_attrs_2, mu_2, logvar_2 = model(image) recon_image_3, recon_attrs_3, mu_3, logvar_3 = model(attrs=attrs) # compute ELBO for each data combo joint_loss = elbo_loss(recon_image_1, image, recon_attrs_1, attrs, mu_1, logvar_1, lambda_image=args.lambda_image, lambda_attrs=args.lambda_attrs, annealing_factor=annealing_factor) image_loss = elbo_loss(recon_image_2, image, None, None, mu_2, logvar_2, lambda_image=args.lambda_image, lambda_attrs=args.lambda_attrs, annealing_factor=annealing_factor) attrs_loss = elbo_loss(None, None, recon_attrs_3, attrs, mu_3, logvar_3, lambda_image=args.lambda_image, lambda_attrs=args.lambda_attrs, annealing_factor=annealing_factor) train_loss = joint_loss + image_loss + attrs_loss train_loss_meter.update(train_loss.data[0], batch_size) # compute and take gradient step train_loss.backward() optimizer.step() if batch_idx % args.log_interval == 0: print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}\tAnnealing-Factor: {:.3f}'.format( epoch, batch_idx * len(image), len(train_loader.dataset), 100. * batch_idx / len(train_loader), train_loss_meter.avg, annealing_factor)) print('====> Epoch: {}\tLoss: {:.4f}'.format(epoch, train_loss_meter.avg)) def test(epoch): model.eval() test_loss_meter = AverageMeter() pbar = tqdm(total=len(test_loader)) for batch_idx, (image, attrs) in enumerate(test_loader): if args.cuda: image = image.cuda() attrs = attrs.cuda() image = Variable(image, volatile=True) attrs = Variable(attrs, volatile=True) batch_size = len(image) recon_image_1, recon_attrs_1, mu_1, logvar_1 = model(image, attrs) recon_image_2, recon_attrs_2, mu_2, logvar_2 = model(image) recon_image_3, recon_attrs_3, mu_3, logvar_3 = model(attrs=attrs) joint_loss = elbo_loss(recon_image_1, image, recon_attrs_1, attrs, mu_1, logvar_1, lambda_image=args.lambda_image, lambda_attrs=args.lambda_attrs) image_loss = elbo_loss(recon_image_2, image, None, None, mu_2, logvar_2, lambda_image=args.lambda_image, lambda_attrs=args.lambda_attrs) attrs_loss = elbo_loss(None, None, recon_attrs_3, attrs, mu_3, logvar_3, lambda_image=args.lambda_image, lambda_attrs=args.lambda_attrs) test_loss = joint_loss + image_loss + attrs_loss test_loss_meter.update(test_loss.data[0], batch_size) pbar.update() pbar.close() print('====> Test Loss: {:.4f}'.format(test_loss_meter.avg)) return test_loss_meter.avg best_loss = sys.maxint for epoch in range(1, args.epochs + 1): train(epoch) loss = test(epoch) is_best = loss < best_loss best_loss = min(loss, best_loss) # save the best model and current model save_checkpoint({ 'state_dict': model.state_dict(), 'best_loss': best_loss, 'n_latents': args.n_latents, 'optimizer' : optimizer.state_dict(), }, is_best, folder='./trained_models')
11,037
40.340824
105
py
multimodal-vae-public
multimodal-vae-public-master/celeba19/model.py
from __future__ import division from __future__ import print_function from __future__ import absolute_import import sys import torch import torch.nn as nn from torch.autograd import Variable from torch.nn import functional as F sys.path.append('../celeba') from datasets import N_ATTRS class MVAE(nn.Module): """Multimodal Variational Autoencoder. @param n_latents: integer number of latent dimensions """ def __init__(self, n_latents): super(MVAE, self).__init__() self.image_encoder = ImageEncoder(n_latents) self.image_decoder = ImageDecoder(n_latents) # have an inference network and decoder for each attribute (18 total) self.attr_encoders = nn.ModuleList([AttributeEncoder(n_latents) for _ in xrange(N_ATTRS)]) self.attr_decoders = nn.ModuleList([AttributeDecoder(n_latents) for _ in xrange(N_ATTRS)]) self.experts = ProductOfExperts() self.n_latents = n_latents def reparametrize(self, mu, logvar): if self.training: std = logvar.mul(0.5).exp_() eps = Variable(std.data.new(std.size()).normal_()) return eps.mul(std).add_(mu) else: # return mean during inference return mu def forward(self, image=None, attrs=[None for _ in xrange(N_ATTRS)]): """Forward pass through the MVAE. @param image: ?PyTorch.Tensor @param attrs: list of ?PyTorch.Tensors If a single attribute is missing, pass None instead of a Tensor. Regardless if all attributes are missing, still pass a list of <N_ATTR> None's. @return image_recon: PyTorch.Tensor @return attr_recons: list of PyTorch.Tensors (N_ATTRS length) """ mu, logvar = self.infer(image, attrs) # reparametrization trick to sample z = self.reparametrize(mu, logvar) # reconstruct inputs based on that gaussian image_recon = self.image_decoder(z) attr_recons = [] for i in xrange(N_ATTRS): attr_recon = self.attr_decoders[i](z) attr_recons.append(attr_recon.squeeze(1)) return image_recon, attr_recons, mu, logvar def infer(self, image=None, attrs=[None for _ in xrange(N_ATTRS)]): # get the batch size if image is not None: batch_size = len(image) else: for i in xrange(N_ATTRS): if attrs[i] is not None: batch_size = len(attrs[i]) break use_cuda = next(self.parameters()).is_cuda # check if CUDA mu, logvar = prior_expert((1, batch_size, self.n_latents), use_cuda=use_cuda) if image is not None: image_mu, image_logvar = self.image_encoder(image) mu = torch.cat((mu, image_mu.unsqueeze(0)), dim=0) logvar = torch.cat((logvar, image_logvar.unsqueeze(0)), dim=0) for i in xrange(N_ATTRS): if attrs[i] is not None: attr_mu, attr_logvar = self.attr_encoders[i](attrs[i].long()) mu = torch.cat((mu, attr_mu.unsqueeze(0)), dim=0) logvar = torch.cat((logvar, attr_logvar.unsqueeze(0)), dim=0) # product of experts to combine gaussians mu, logvar = self.experts(mu, logvar) return mu, logvar class ImageEncoder(nn.Module): """Parametrizes q(z|x). This is the standard DCGAN architecture. @param n_latents: integer number of latent variable dimensions. """ def __init__(self, n_latents): super(ImageEncoder, self).__init__() self.features = nn.Sequential( nn.Conv2d(3, 32, 4, 2, 1, bias=False), Swish(), nn.Conv2d(32, 64, 4, 2, 1, bias=False), nn.BatchNorm2d(64), Swish(), nn.Conv2d(64, 128, 4, 2, 1, bias=False), nn.BatchNorm2d(128), Swish(), nn.Conv2d(128, 256, 4, 1, 0, bias=False), nn.BatchNorm2d(256), Swish()) self.classifier = nn.Sequential( nn.Linear(256 * 5 * 5, 512), Swish(), nn.Dropout(p=0.1), nn.Linear(512, n_latents * 2)) self.n_latents = n_latents def forward(self, x): n_latents = self.n_latents x = self.features(x) x = x.view(-1, 256 * 5 * 5) x = self.classifier(x) return x[:, :n_latents], x[:, n_latents:] class ImageDecoder(nn.Module): """Parametrizes p(x|z). This is the standard DCGAN architecture. @param n_latents: integer number of latent variable dimensions. """ def __init__(self, n_latents): super(ImageDecoder, self).__init__() self.upsample = nn.Sequential( nn.Linear(n_latents, 256 * 5 * 5), Swish()) self.hallucinate = nn.Sequential( nn.ConvTranspose2d(256, 128, 4, 1, 0, bias=False), nn.BatchNorm2d(128), Swish(), nn.ConvTranspose2d(128, 64, 4, 2, 1, bias=False), nn.BatchNorm2d(64), Swish(), nn.ConvTranspose2d(64, 32, 4, 2, 1, bias=False), nn.BatchNorm2d(32), Swish(), nn.ConvTranspose2d(32, 3, 4, 2, 1, bias=False)) def forward(self, z): # the input will be a vector of size |n_latents| z = self.upsample(z) z = z.view(-1, 256, 5, 5) z = self.hallucinate(z) return z # NOTE: no sigmoid here. See train.py class AttributeEncoder(nn.Module): """Parametrizes q(z|y). We use a single inference network that encodes a single attribute. @param n_latents: integer number of latent variable dimensions. """ def __init__(self, n_latents): super(AttributeEncoder, self).__init__() self.net = nn.Sequential( nn.Embedding(2, 512), Swish(), nn.Linear(512, 512), Swish(), nn.Linear(512, n_latents * 2)) self.n_latents = n_latents def forward(self, x): n_latents = self.n_latents x = self.net(x.long()) return x[:, :n_latents], x[:, n_latents:] class AttributeDecoder(nn.Module): """Parametrizes p(y|z). We use a single generative network that decodes a single attribute. @param n_latents: integer number of latent variable dimensions. """ def __init__(self, n_latents): super(AttributeDecoder, self).__init__() self.net = nn.Sequential( nn.Linear(n_latents, 512), Swish(), nn.Linear(512, 512), Swish(), nn.Linear(512, 512), Swish(), nn.Linear(512, 1)) def forward(self, z): z = self.net(z) return z # NOTE: no sigmoid here. See train.py class ProductOfExperts(nn.Module): """Return parameters for product of independent experts. See https://arxiv.org/pdf/1410.7827.pdf for equations. @param mu: M x D for M experts @param logvar: M x D for M experts """ def forward(self, mu, logvar, eps=1e-8): var = torch.exp(logvar) + eps # precision of i-th Gaussian expert at point x T = 1. / var pd_mu = torch.sum(mu * T, dim=0) / torch.sum(T, dim=0) pd_var = 1. / torch.sum(T, dim=0) pd_logvar = torch.log(pd_var) return pd_mu, pd_logvar class Swish(nn.Module): """https://arxiv.org/abs/1710.05941""" def forward(self, x): return x * F.sigmoid(x) def prior_expert(size, use_cuda=False): """Universal prior expert. Here we use a spherical Gaussian: N(0, 1). @param size: integer dimensionality of Gaussian @param use_cuda: boolean [default: False] cast CUDA on variables """ mu = Variable(torch.zeros(size)) logvar = Variable(torch.log(torch.ones(size))) if use_cuda: mu, logvar = mu.cuda(), logvar.cuda() return mu, logvar
8,328
32.316
91
py
multimodal-vae-public
multimodal-vae-public-master/celeba19/train.py
from __future__ import division from __future__ import print_function from __future__ import absolute_import import os import sys import shutil import numpy as np from tqdm import tqdm from itertools import combinations import torch import torch.nn as nn import torch.optim as optim import torch.nn.functional as F from torch.autograd import Variable from torchvision import transforms from model import MVAE sys.path.append('../celeba') from datasets import N_ATTRS from datasets import CelebAttributes def elbo_loss(recon, data, mu, logvar, lambda_image=1.0, lambda_attrs=1.0, annealing_factor=1.): """Compute the ELBO for an arbitrary number of data modalities. @param recon: list of torch.Tensors/Variables Contains one for each modality. @param data: list of torch.Tensors/Variables Size much agree with recon. @param mu: Torch.Tensor Mean of the variational distribution. @param logvar: Torch.Tensor Log variance for variational distribution. @param lambda_image: float [default: 1.0] weight for image BCE @param lambda_attr: float [default: 1.0] weight for attribute BCE @param annealing_factor: float [default: 1] Beta - how much to weight the KL regularizer. """ assert len(recon) == len(data), "must supply ground truth for every modality." n_modalities = len(recon) batch_size = mu.size(0) BCE = 0 # reconstruction cost for ix in xrange(n_modalities): # dimensionality > 1 implies an image if len(recon[ix].size()) > 1: recon_ix = recon[ix].view(batch_size, -1) data_ix = data[ix].view(batch_size, -1) BCE += lambda_image * torch.sum(binary_cross_entropy_with_logits(recon_ix, data_ix), dim=1) else: # this is for an attribute BCE += lambda_attrs * binary_cross_entropy_with_logits(recon[ix], data[ix]) KLD = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp(), dim=1) ELBO = torch.mean(BCE + annealing_factor * KLD) return ELBO def binary_cross_entropy_with_logits(input, target): """Sigmoid Activation + Binary Cross Entropy @param input: torch.Tensor (size N) @param target: torch.Tensor (size N) @return loss: torch.Tensor (size N) """ if not (target.size() == input.size()): raise ValueError("Target size ({}) must be the same as input size ({})".format( target.size(), input.size())) return (torch.clamp(input, 0) - input * target + torch.log(1 + torch.exp(-torch.abs(input)))) def tensor_2d_to_list(x): # convert a 2D tensor to a list of 1D tensors. n_dims = x.size(1) list_of_tensors = [] for i in xrange(n_dims): list_of_tensors.append(x[:, i]) return list_of_tensors def enumerate_combinations(n): """Enumerate entire pool of combinations. We use this to define the domain of ELBO terms, (the pool of 2^19 ELBO terms). @param n: integer number of features (19 for Celeb19) @return: a list of ALL permutations """ combos = [] for i in xrange(2, n): # 1 to n - 1 _combos = list(combinations(range(n), i)) combos += _combos combos_np = np.zeros((len(combos), n)) for i in xrange(len(combos)): for idx in combos[i]: combos_np[i][idx] = 1 combos_np = combos_np.astype(np.bool) return combos_np def sample_combinations(pool, size=1): """Return boolean list of which data points to use to compute a modality. Ignore combinations that are all True or only contain a single True. @param pool: np.array enumerating all possible combinations. @param size: integer (default: 1) number of combinations to sample. """ n_modalities = pool.shape[1] pool_size = len(pool) pool_sums = np.sum(pool, axis=1) pool_dist = np.bincount(pool_sums) pool_space = np.where(pool_dist > 0)[0] sample_pool = np.random.choice(pool_space, size, replace=True) sample_dist = np.bincount(sample_pool) if sample_dist.size < n_modalities: zeros_pad = np.zeros(n_modalities - sample_dist.size).astype(np.int) sample_dist = np.concatenate((sample_dist, zeros_pad)) sample_combo = [] for ix in xrange(n_modalities): if sample_dist[ix] > 0: pool_i = pool[pool_sums == ix] combo_i = np.random.choice(range(pool_i.shape[0]), size=sample_dist[ix], replace=False) sample_combo.append(pool_i[combo_i]) sample_combo = np.concatenate(sample_combo) return sample_combo class AverageMeter(object): """Computes and stores the average and current value""" def __init__(self): self.reset() def reset(self): self.val = 0 self.avg = 0 self.sum = 0 self.count = 0 def update(self, val, n=1): self.val = val self.sum += val * n self.count += n self.avg = self.sum / self.count def save_checkpoint(state, is_best, folder='./', filename='checkpoint.pth.tar'): if not os.path.isdir(folder): os.mkdir(folder) torch.save(state, os.path.join(folder, filename)) if is_best: shutil.copyfile(os.path.join(folder, filename), os.path.join(folder, 'model_best.pth.tar')) def load_checkpoint(file_path, use_cuda=False): checkpoint = torch.load(file_path) if use_cuda else \ torch.load(file_path, map_location=lambda storage, location: storage) model = MVAE(checkpoint['n_latents']) model.load_state_dict(checkpoint['state_dict']) return model if __name__ == "__main__": import argparse parser = argparse.ArgumentParser() parser.add_argument('--n-latents', type=int, default=100, help='size of the latent embedding [default: 100]') parser.add_argument('--batch-size', type=int, default=100, metavar='N', help='input batch size for training [default: 100]') parser.add_argument('--epochs', type=int, default=100, metavar='N', help='number of epochs to train [default: 100]') parser.add_argument('--annealing-epochs', type=int, default=20, metavar='N', help='number of epochs to anneal KL for [default: 20]') parser.add_argument('--lr', type=float, default=1e-4, metavar='LR', help='learning rate [default: 1e-4]') parser.add_argument('--log-interval', type=int, default=10, metavar='N', help='how many batches to wait before logging training status [default: 10]') parser.add_argument('--approx-m', type=int, default=1, help='number of ELBO terms to approx. the full MVAE objective [default: 1]') parser.add_argument('--lambda-image', type=float, default=1., help='multipler for image reconstruction [default: 1]') parser.add_argument('--lambda-attrs', type=float, default=10., help='multipler for attributes reconstruction [default: 10]') parser.add_argument('--cuda', action='store_true', default=False, help='enables CUDA training [default: False]') args = parser.parse_args() args.cuda = args.cuda and torch.cuda.is_available() if not os.path.isdir('./trained_models'): os.makedirs('./trained_models') # crop the input image to 64 x 64 preprocess_data = transforms.Compose([transforms.Resize(64), transforms.CenterCrop(64), transforms.ToTensor()]) train_loader = torch.utils.data.DataLoader( CelebAttributes(partition='train', data_dir='./data', image_transform=preprocess_data), batch_size=args.batch_size, shuffle=True) N_mini_batches = len(train_loader) test_loader = torch.utils.data.DataLoader( CelebAttributes(partition='val', data_dir='./data', image_transform=preprocess_data), batch_size=args.batch_size, shuffle=False) model = MVAE(args.n_latents) optimizer = optim.Adam(model.parameters(), lr=args.lr) if args.cuda: model.cuda() # enumerate all combinations so we can sample from this # every gradient step. NOTE: probably not the most efficient # way to do this but oh well. combination_pool = enumerate_combinations(19) def train(epoch): model.train() train_loss_meter = AverageMeter() for batch_idx, (image, attrs) in enumerate(train_loader): if epoch < args.annealing_epochs: # compute the KL annealing factor for the current mini-batch in the current epoch annealing_factor = (float(batch_idx + (epoch - 1) * N_mini_batches + 1) / float(args.annealing_epochs * N_mini_batches)) else: # by default the KL annealing factor is unity annealing_factor = 1.0 if args.cuda: image = image.cuda() attrs = attrs.cuda() image = Variable(image) attrs = Variable(attrs) attrs = tensor_2d_to_list(attrs) # convert tensor to list batch_size = len(image) # refresh the optimizer optimizer.zero_grad() train_loss = 0 # accumulate train loss here so we don't store a lot of things. n_elbo_terms = 0 # track number of ELBO terms # compute ELBO using all data (``complete") recon_image, recon_attrs, mu, logvar = model(image, attrs) train_loss += elbo_loss([recon_image] + recon_attrs, [image] + attrs, mu, logvar, lambda_image=args.lambda_image, lambda_attrs=args.lambda_attrs, annealing_factor=annealing_factor) n_elbo_terms += 1 # keep track of how many terms there are # compute ELBO using only image data recon_image, _, mu, logvar = model(image=image) train_loss += elbo_loss([recon_image], [image], mu, logvar, lambda_image=args.lambda_image, lambda_attrs=args.lambda_attrs, annealing_factor=annealing_factor) n_elbo_terms += 1 # keep track of how many terms there are # compute ELBO using only text data for ix in xrange(len(attrs)): _, recon_attrs, mu, logvar = model(attrs=[attrs[k] if k == ix else None for k in xrange(len(attrs))]) train_loss += elbo_loss([recon_attrs[ix]], [attrs[ix]], mu, logvar, annealing_factor=annealing_factor) n_elbo_terms += 1 # sample some number of terms if args.approx_m > 0: sample_combos = sample_combinations(combination_pool, size=args.approx_m) for sample_combo in sample_combos: attrs_combo = sample_combo[1:] recon_image, recon_attrs, mu, logvar = model(image=image if sample_combo[0] else None, attrs=[attrs[ix] if attrs_combo[ix] else None for ix in xrange(attrs_combo.size)]) if sample_combo[0]: # check if image is present elbo = elbo_loss([recon_image] + [recon_attrs[ix] for ix in xrange(attrs_combo.size) if attrs_combo[ix]], [image] + [attrs[ix] for ix in xrange(attrs_combo.size) if attrs_combo[ix]], mu, logvar, annealing_factor=annealing_factor) else: elbo = elbo_loss([recon_attrs[ix] for ix in xrange(attrs_combo.size) if attrs_combo[ix]], [attrs[ix] for ix in xrange(attrs_combo.size) if attrs_combo[ix]], mu, logvar, annealing_factor=annealing_factor) train_loss += elbo n_elbo_terms += 1 assert n_elbo_terms == (len(attrs) + 1) + 1 + args.approx_m # N + 1 + M train_loss_meter.update(train_loss.data[0], len(image)) # compute and take gradient step train_loss.backward() optimizer.step() if batch_idx % args.log_interval == 0: print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}\tAnnealing-Factor: {:.3f}'.format( epoch, batch_idx * batch_size, len(train_loader.dataset), 100. * batch_idx / len(train_loader), train_loss_meter.avg, annealing_factor)) print('====> Epoch: {}\tLoss: {:.4f}'.format(epoch, train_loss_meter.avg)) def test(epoch): model.eval() test_loss = 0 # for simplicitly, here i'm only going to track the joint loss. pbar = tqdm(total=len(test_loader)) for batch_idx, (image, attrs) in enumerate(test_loader): if args.cuda: image, attrs = image.cuda(), attrs.cuda() image = Variable(image, volatile=True) attrs = Variable(attrs, volatile=True) batch_size = image.size(0) attrs = tensor_2d_to_list(attrs) # compute the elbo using all data. recon_image, recon_attrs, mu, logvar = model(image, attrs) test_loss += elbo_loss([recon_image] + recon_attrs, [image] + attrs, mu, logvar).data[0] pbar.update() pbar.close() test_loss /= len(test_loader) print('====> Test Loss: {:.4f}'.format(test_loss)) return test_loss best_loss = sys.maxint for epoch in range(1, args.epochs + 1): train(epoch) loss = test(epoch) is_best = loss < best_loss best_loss = min(loss, best_loss) # save the best model and current model save_checkpoint({ 'state_dict': model.state_dict(), 'best_loss': best_loss, 'n_latents': args.n_latents, 'optimizer' : optimizer.state_dict(), }, is_best, folder=args.out_dir)
14,718
40.345506
129
py
Fine-tuning-NOs
Fine-tuning-NOs-master/main.py
from pytorch_lightning.callbacks import ModelCheckpoint import pytorch_lightning as pl import yaml import argparse import utilities import os import torch import shutil def datasetFactory(config, do, args=None): c_data =config["data"] if args is None: gl = utilities.GettingLists(data_for_training=c_data["n_sample"], wave_eq = c_data["PDE_type"], data_base = c_data["process"], PATH = c_data["PATH"]) return utilities.MyLoader(GL=gl, do = do, config=config, args = None) elif args is not None: gl = utilities.GettingLists(data_for_training=c_data["n_sample"], wave_eq = c_data["PDE_type"], data_base = args.data_base, PATH = args.PATH) return utilities.MyLoader(GL=gl, do = do, config=config, args=args) def main(args, config = None): if config is None: with open(args.config_file, 'r') as stream: config = yaml.load(stream, yaml.FullLoader) print(config) print(args) model = utilities.choosing_model(config) print(model) if args.all_ckp == False: save_file = os.path.join(config["ckpt"]["PATH"], config["ckpt"]["save_dir"] ) checkpoint_callback = ModelCheckpoint( dirpath=save_file, every_n_epochs = 1, save_last = True, monitor = 'val_loss', mode = 'min', save_top_k = args.save_top_k, filename="model-{epoch:03d}-{val_loss:.4f}", ) elif args.all_ckp == True: save_file = os.path.join(config["ckpt"]["PATH"], "all_epochs", f'{config["train"]["epochs"]}_{config["model"]["activ"]}', config["ckpt"]["save_dir"] ) checkpoint_callback = ModelCheckpoint( dirpath=save_file, every_n_epochs = 1, save_top_k = -1, filename="model-{epoch:03d}-{val_loss:.4f}", ) if os.path.exists(save_file): print(f"The model directory exists. Overwrite? {args.erase}") if args.erase == True: shutil.rmtree(save_file) if args.checkpoint is None: #left the default values provided by the config file train_dataloader, val_dataloader = datasetFactory(config=config, do=args.do, args=None) max_epochs = config["train"]["epochs"] elif args.checkpoint is not None: print(f"Load from checkpoint {args.checkpoint}") #model=model.load_from_checkpoint(args.checkpoint) checkpoint = torch.load(args.checkpoint, map_location=lambda storage, loc: storage) print(checkpoint.keys()) model.load_state_dict(checkpoint["state_dict"]) print(model.learning_rate) #change optimizer if needed if args.lr is None: args.lr = config["train"]["lr"] if args.weight_decay is None: args.weight_decay = config["train"]["weight_decay"] if args.optimizer is not None: if args.optimizer == "SGD": optimizer = torch.optim.SGD(model.parameters(), lr=args.lr, momentum=0.9) elif args.optimizer == "Adam": optimizer = torch.optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.weight_decay) else: optimizer = torch.optim.AdamW(model.parameters(), lr=args.lr, weight_decay=args.weight_decay) #change the scheduler if needed if args.scheduler is not None: if args.scheduler == "ReduceLROnPlateau": scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.1, patience=10, verbose=True, eps=1e-08, min_lr=0) elif args.scheduler == "CosineAnnealingLR": scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=10, eta_min=0, last_epoch=-1) else: if args.step_size is None: args.step_size = config["train"]["step_size"] if args.gamma is None: args.gamma = config["train"]["gamma"] scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=args.step_size, gamma=args.gamma) model.configure_optimizers(optimizer, scheduler) #change the number of epochs if needed if args.epochs is not None: print(f"Change the number of epochs to {args.epochs}") max_epochs = args.epochs else: max_epochs = config["train"]["epochs"] print(checkpoint["epoch"]) #change the filename if needed if args.filename is not None: print(f"Change the filename to {args.filename}") checkpoint_callback.filename = args.filename+"-{epoch:03d}-{val_loss:.4f}" if args.data_base is None: args.data_base = config["data"]["process"] args.PATH = config["data"]["PATH"] else: args.PATH = os.path.join('save_files', 'acoustic', args.data_base) #change the config file if needed through the command line train_dataloader, val_dataloader = datasetFactory(config=config, do = args.do, args=args) if args.usual_ckpt == True: trainer = pl.Trainer(max_epochs=max_epochs, accelerator=args.accelerator, devices=args.devices, default_root_dir=save_file) elif args.usual_ckpt == False: trainer = pl.Trainer(max_epochs=max_epochs, accelerator=args.accelerator, devices=args.devices, callbacks=[checkpoint_callback]) if args.resume == True: trainer.fit(model, train_dataloader, val_dataloader, ckpt_path=args.checkpoint) else: trainer.fit(model, train_dataloader, val_dataloader) if __name__ == '__main__': parser = argparse.ArgumentParser('Training of the Architectures', add_help=True) parser.add_argument('-c','--config_file', type=str, help='Path to the configuration file', default='config/acoustic/GRF_7Hz/FNO25k.yaml') parser.add_argument('-a', '--all_ckp', type = bool, help='Allow to save all the ckpt', default=False) parser.add_argument('-u_ckpt', '--usual_ckpt', type = bool, help='Allow to save the usual ckpt as in pytorch-lightning\ use it only if you want to save the ckpt in a different directory\ or multiple analysis of networks', default=False) parser.add_argument('-e', '--erase', type = bool, help='erase_save_dir', default=False) parser.add_argument( '-ckpt', '--checkpoint', type = str, help='checkpoint file to load', default=None) parser.add_argument('-r', '--resume', type = bool, help='resume training', default=False) parser.add_argument('-savetop', '--save_top_k', type = int, help='save top k ckpt', default=3) parser.add_argument('-lr', '--lr', type = float, help='learning rate', default=None) parser.add_argument('-o', '--optimizer', type = str, help='optimizer', default=None) parser.add_argument('-s', '--scheduler', type = str, help='scheduler', default=None) parser.add_argument('-ep', '--epochs', type = int, help='number of epochs', default=None) parser.add_argument('-f', '--filename', type = str, help='filename', default=None) parser.add_argument('-b', '--batch_size', type = int, help='batch_size', default=None) parser.add_argument('-lw', '--load_workers', type = int, help='load_workers', default=None) parser.add_argument('-db', '--data_base', type = str, help='database', default=None) parser.add_argument('-weight_decay', '--weight_decay', type = float, help='weight_decay', default=None) parser.add_argument('-step_size', '--step_size', type = int, help='step_size', default=None) parser.add_argument('-gamma', '--gamma', type = float, help='gamma', default=None) parser.add_argument('-P', '--PATH', type = str, help='PATH', default=None) parser.add_argument('-d', '--devices', type = int, help='devices', default=1) parser.add_argument('-acc', '--accelerator', type = str, help='accelerator', default='gpu') parser.add_argument('-do', '--do', type=str, help='do', default="train") args=parser.parse_args() config_file = args.config_file main(args)
10,447
45.435556
153
py
Fine-tuning-NOs
Fine-tuning-NOs-master/reconstruction_data.py
from main import choosing_model import yaml import argparse import utilities import os import torch import pytorch_lightning as pl import numpy as np import matplotlib.pyplot as plt from utilities import to_numpy def saving_files(x, y, out, database, name): PATH = "make_graph/data"+'/'+database+'/'+name x = to_numpy(x) y = to_numpy(y) out = to_numpy(out) if not os.path.exists(PATH): os.makedirs(PATH) os.chdir(PATH) np.save('wavespeed.npy', x) np.save('data.npy', y) np.save(f'data_{name}.npy', out) def datasetFactoryTest(config): c_data =config["data"] gl = utilities.GettingLists(data_for_training=c_data["n_sample"], wave_eq = c_data["PDE_type"], data_base = c_data["process"], PATH = c_data["PATH"]) return utilities.MyLoader(GL=gl, do = "test", config=config) if __name__ == '__main__': parser = argparse.ArgumentParser('Getting data from the test set', add_help=False) parser.add_argument('-c','--config_file', type=str, help='Path to the configuration file', default='config/acoustic/GRF_7Hz/FNO25k.yaml') args=parser.parse_args() config_file = args.config_file with open(config_file, 'r') as stream: config = yaml.load(stream, yaml.FullLoader) print(config) if config["Project"]["name"]== "sFNO+epsilon_v2": file_ckpt = "epoch=199-step=166800.ckpt" else: file_ckpt = "epoch=99-step=50000.ckpt" c_save = config["ckpt"] model = choosing_model(config) test_dataloader = datasetFactoryTest(config) myloss = utilities.LpLoss(size_average=False) PATH = os.path.join(c_save["PATH"], c_save["save_dir"], "lightning_logs", f"version_{0}",\ "checkpoints", file_ckpt) checkpoint = torch.load(PATH, map_location=lambda storage, loc: storage) model.load_state_dict(checkpoint["state_dict"]) model.cuda() model.eval() k_list = [k for k in range(10)] save = True batch_size = 20 with torch.no_grad(): for x, y in test_dataloader: s= x.shape[2] x, y = (x[:batch_size,...]).cuda(), (y[:batch_size,...]).cuda() out = model(x).reshape(batch_size, s, s, -1) break saving_files(x, y, out, database=config["data"]["process"], name =config["ckpt"]["alias"])
2,493
32.253333
94
py
Fine-tuning-NOs
Fine-tuning-NOs-master/reconstruction_plot.py
from main import choosing_model import yaml import argparse import utilities import os import torch import pytorch_lightning as pl import numpy as np import matplotlib.pyplot as plt from utilities import to_numpy def plotting(in_, NN_out, out, name, database, k_list =[1,2,3,4], save=False, vmin=-0.5, vmax =0.5, shrink = 0.8): in_ = to_numpy(in_)[k_list,...] NN_out = to_numpy(NN_out)[k_list,...] out = to_numpy(out)[k_list,...] for k in k_list: if database == 'GRF_7Hz': s = 128 in_k = in_[k,...].reshape(s,s) out_k = out[k,...].reshape(s,s) NN_k =NN_out[k,...].reshape(s,s) plt.figure(figsize=(10,10)) plt.subplot(131) plt.imshow(in_k, vmin=1., vmax =3., cmap = 'jet') plt.colorbar(shrink =shrink) plt.title(f'wavespeed: {k}') plt.subplot(132) plt.imshow(out_k, vmin=vmin, vmax =vmax, cmap = 'seismic') plt.colorbar(shrink =shrink) plt.title(f'HDG sample: {k}') plt.subplot(133) plt.imshow(NN_k, vmin=vmin, vmax =vmax, cmap = 'seismic') plt.colorbar(shrink =shrink) plt.title(f'{name} sample: {k}') elif database ==('GRF_12Hz') or ('GRF_15Hz'): s = 64 in_k = in_[k,...].reshape(s,s) out_k = out[k,...].reshape(s,s,-1) NN_k =NN_out[k,...].reshape(s,s,-1) plt.figure(figsize=(20,10)) plt.subplot(231) plt.imshow(in_k, vmin=1., vmax =5., cmap = 'jet') plt.colorbar(shrink =shrink) plt.title(f'wavespeed: {k}') plt.subplot(232) plt.imshow(out_k[:,:,0].reshape(s,s), vmin=vmin, vmax =vmax, cmap = 'seismic') plt.colorbar(shrink =shrink) plt.title(f'HDG (real) sample: {k}') plt.subplot(233) plt.imshow(NN_k[:,:,0].reshape(s,s), vmin=vmin, vmax =vmax, cmap = 'seismic') plt.colorbar(shrink =shrink) plt.title(f'{name} (real) sample: {k}') plt.subplot(235) plt.imshow(out_k[:,:,1].reshape(s,s), vmin=vmin, vmax =vmax, cmap = 'seismic') plt.colorbar(shrink =shrink) plt.title(f'HDG (imaginary) sample: {k}') plt.subplot(236) plt.imshow(NN_k[:,:,1].reshape(s,s), vmin=vmin, vmax =vmax, cmap = 'seismic') plt.colorbar(shrink =shrink) plt.title(f'{name} (imaginary) sample: {k}') if save== True: saving_dir = f'make_graph/figures/{database}/'+f'{name}' if not os.path.exists(saving_dir): os.makedirs(saving_dir) plt.savefig(f"{saving_dir}/ex_{k}.png") def datasetFactoryTest(config): c_data =config["data"] gl = utilities.GettingLists(data_for_training=c_data["n_sample"], wave_eq = c_data["PDE_type"], data_base = c_data["process"], PATH = c_data["PATH"]) return utilities.MyLoader(GL=gl, do = "test", config=config) if __name__ == '__main__': parser = argparse.ArgumentParser('Plotting from test data', add_help=False) parser.add_argument('-c','--config_file', type=str, help='Path to the configuration file', default='config/acoustic/GRF_7Hz/FNO25k.yaml') parser.add_argument('-s','--shrink', type=float, help='shrink bar value', default=0.8) args=parser.parse_args() config_file = args.config_file with open(config_file, 'r') as stream: config = yaml.load(stream, yaml.FullLoader) print(config) c_save = config["ckpt"] model = choosing_model(config) if config["Project"]["name"]== "sFNO+epsilon_v2": file_ckpt = "epoch=199-step=166800.ckpt" else: file_ckpt = "epoch=99-step=50000.ckpt" test_dataloader = datasetFactoryTest(config) myloss = utilities.LpLoss(size_average=False) PATH = os.path.join(c_save["PATH"], c_save["save_dir"], "lightning_logs", f"version_{0}",\ "checkpoints", file_ckpt) checkpoint = torch.load(PATH, map_location=lambda storage, loc: storage) model.load_state_dict(checkpoint["state_dict"]) model.cuda() model.eval() k_list = [k for k in range(10)] save = True with torch.no_grad(): for x, y in test_dataloader: batch_size, s= x.shape[0:2] x, y = x.cuda(), y.cuda() out = model(x).reshape(batch_size, s, s,-1) break plotting( in_ = x, NN_out =out, out= y, name=config["ckpt"]["alias"], database = config["data"]["process"], k_list= k_list, save = save, shrink= args.shrink, vmin=-0.2, vmax =0.2)
4,950
35.138686
94
py
Fine-tuning-NOs
Fine-tuning-NOs-master/OOD.py
import yaml from evaluation import saving_files import argparse import utilities from utilities import to_numpy import os import torch import pytorch_lightning as pl import numpy as np import matplotlib.pyplot as plt def load_ood(arg, size = 64, dir_skeleton= None): if dir_skeleton is None: dir_skeleton = 'set_{:02d}'.format(args.ood_sample) dir_ood = os.path.join('OOD', "OOD_files", dir_skeleton) data = np.load(os.path.join(dir_ood, f'data_set{args.ood_sample}_freq{args.freq}.npy')) model = np.load(os.path.join(dir_ood, f'model_set{args.ood_sample}.npy')) model = torch.tensor(model*1e-3, dtype=torch.float).view(-1,size,size,1) data =torch.tensor(data, dtype=torch.float).view(-1, size,size,2) print(f'vp= {model.shape}, data={data.shape}') return model, data def test_ood(config, args, name =None, dir_skeleton= None, realization_k = 0, x=None, y=None): if dir_skeleton is None: dir_skeleton = 'set_{:02d}'.format(args.ood_sample)+f'_freq{args.freq}' if name is None: name= config["ckpt"]["alias"] model = utilities.choosing_model(config) if x is None or y is None: x, y =load_ood(args) myloss = utilities.LpLoss(size_average=False) checkpoint = torch.load(args.checkpoint, map_location=lambda storage, loc: storage) model.load_state_dict(checkpoint["state_dict"]) model.cuda() model.eval() loss_dict = { 'test_loss_ood': 0.0 } x, y = x.cuda(), y.cuda() batch_size, s, s, _ = x.shape out = model(x).reshape(batch_size, s, s, -1) loss_test = myloss(out.view(batch_size,-1), y.view(batch_size,-1)) loss_dict['test_loss_ood']+= loss_test.item()/batch_size print(f"test test_loss_ood: {loss_dict['test_loss_ood']}") if args.save_graph: print("generating and saving graph") utilities.plotting(in_ = x, NN_out = out, out = y, name = name, database=dir_skeleton, PATH='OOD', ksample=realization_k) if args.save_npy: print("saving npy files") utilities.saving_files(in_files = to_numpy(x), out_files = to_numpy(y), NN_out_files = to_numpy(out), NN_name = name, database=dir_skeleton, PATH='OOD', realization_k = realization_k) return loss_dict['test_loss_ood'] if __name__ == '__main__': parser = argparse.ArgumentParser('out of distribution check', add_help=False) parser.add_argument('-c','--config_file', type=str, help='Path to the configuration file', default='config/acoustic/GRF_7Hz/FNO25k.yaml') parser.add_argument('-ckpt', '--checkpoint', type=str, help='Path to the checkpoint file', default=None) parser.add_argument('-ood','--ood_sample', type=int, help='out of distribution set', default=0) parser.add_argument('-sg','--save-graph', type=bool, help='Saving Image', default=True) parser.add_argument('-snpy','--save-npy', type=bool, help='Saving NPY', default=True) parser.add_argument('-f','--freq', type=int, help='frequency of the OOD', default=None) parser.add_argument('-vmax','--vamax', type=float, help='vmax of the OOD', default=0.5) parser.add_argument('-vmin','--vmin', type=float, help='vmin of the OOD', default=-0.5) parser.add_argument('-s','--shrink', type=float, help='shrink bar value', default=0.8) args=parser.parse_args() config_file = args.config_file assert args.ood_sample in [0,1,2,3,4,5], "out of distribution sample should be in [0,1,2,3,4,5]" assert args.freq in [None, 12, 15], "frequency should be in [12,15] Hz" with open(config_file, 'r') as stream: config = yaml.load(stream, yaml.FullLoader) if args.freq is None: args.freq = config['data']['frequency'] # getting the name of the dataset dir_skeleton = 'set_{:02d}'.format(args.ood_sample)+f'_freq{args.freq}' if args.checkpoint is None: c_save = config["ckpt"] if config["ckpt"]["alias"]== "sFNO+epsilon_v2": ckpt = "epoch=199-step=166800.ckpt" else: ckpt = "epoch=99-step=50000.ckpt" x, y =load_ood(args) list_test = [] for k in range(0,3): args.checkpoint = os.path.join(c_save["PATH"], c_save["save_dir"], "lightning_logs", f"version_{k}",\ "checkpoints", ckpt) list_test.append(test_ood(config,args=args, realization_k = k, x=x, y=y)) print(list_test) saving_files(list_test, database=dir_skeleton, name =config["ckpt"]["alias"], dir_= "OOD") print(f"Load from checkpoint {args.checkpoint}")
5,428
42.087302
100
py
Fine-tuning-NOs
Fine-tuning-NOs-master/evaluation.py
import yaml import argparse import utilities import os import torch import numpy as np from main import datasetFactory import pytorch_lightning as pl def saving_files(data, database, name, dir_= "make_graph"): if len(data) != 1: PATH = os.path.join(dir_, "test_loss", database) if not os.path.exists(PATH): os.makedirs(PATH) np.savetxt(os.path.join(PATH, f'{name}.csv'), data, delimiter=",") else: PATH = os.path.join(dir_, "test_loss", database,f"{name}.csv") if not os.path.exists(PATH): os.makedirs(PATH) #add a new row in th csv file with open(PATH, "a") as f: f.write(str(data[0])) def test(config, args): model = utilities.choosing_model(config) test_dataloader = datasetFactory(config, do = args.do, args=None) myloss = utilities.LpLoss(size_average=False) print(f"Load from checkpoint {args.checkpoint}") checkpoint = torch.load(args.checkpoint, map_location=lambda storage, loc: storage) model.load_state_dict(checkpoint["state_dict"]) model.cuda() model.eval() loss_dict = { 'test_loss': 0.0 } with torch.no_grad(): for x, y in test_dataloader: batch_size, s= x.shape[0:2] x, y = x.cuda(), y.cuda() out = model(x).reshape(batch_size, s, s, -1) loss_test = myloss(out.view(batch_size,-1), y.view(batch_size,-1)) loss_dict['test_loss']+= loss_test.item() return loss_dict['test_loss'] / len(test_dataloader.dataset) if __name__ == '__main__': parser = argparse.ArgumentParser('Testing losses', add_help=False) parser.add_argument('-c','--config_file', type=str, help='Path to the configuration file', default='config/acoustic/GRF_7Hz/FNO25k.yaml') parser.add_argument('-do', '--do', type=str, help='do', default="test") parser.add_argument('-n', '--numb_samples', type= int, default = 3) parser.add_argument('-ckpt', '--checkpoint', type=str, help='Path to the checkpoint file', default=None) args=parser.parse_args() config_file = args.config_file with open(config_file, 'r') as stream: config = yaml.load(stream, yaml.FullLoader) if config["model"]["activ"] is None: activ = "Identity" else: activ = config["model"]["activ"] database= activ+"_"+config["data"]["process"] name= config["ckpt"]["alias"] if args.checkpoint is None: c_save = config["ckpt"] if config["ckpt"]["alias"]== "sFNO+epsilon_v2": ckpt = "epoch=199-step=166800.ckpt" else: ckpt = "epoch=99-step=50000.ckpt" list_test = [] for k in range(0,args.numb_samples): args.checkpoint = os.path.join(c_save["PATH"], c_save["save_dir"], "lightning_logs", f"version_{k}",\ "checkpoints", ckpt) list_test.append(test(config,args=args)) print(list_test) saving_files(list_test, database=database, name =name) elif args.checkpoint is not None: list_test= test(config,args=args) print(list_test) saving_files([list_test], database=database, name =name)
3,492
36.159574
98
py
Fine-tuning-NOs
Fine-tuning-NOs-master/models/sFNO_epsilon_v2.py
import pytorch_lightning as pl import torch from torch import optim, nn from .FNO import fourier_conv_2d from .basics_model import LayerNorm, get_grid2D, FC_nn from timm.models.layers import DropPath, trunc_normal_ import torch.nn.functional as F from utilities import LpLoss from .sFNO import IO_layer ################################################### # Integral Operator Layer Block with skip connection ################################################### class IO_ResNetblock(nn.Module): def __init__(self, features_, wavenumber, drop_path = 0., drop = 0.): super().__init__() self.IO = IO_layer(features_, wavenumber, drop) self.pwconv1 = nn.Conv2d(features_, 4* features_, 1) # pointwise/1x1 convs, implemented with linear layers self.act = nn.GELU() self.pwconv2 =nn.Conv2d(4 * features_, features_,1) self.norm1 = LayerNorm(features_, eps=1e-5, data_format = "channels_first") self.norm2 = LayerNorm(features_, eps=1e-5, data_format = "channels_first") self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() def forward(self, x): skip = x x = self.norm1(x) x =self.IO(x) x =skip+self.drop_path(x) #NonLocal Layers skip = x x = self.norm2(x) #local x = self.pwconv1(x) x = self.act(x) x = self.pwconv2(x) x = skip + self.drop_path(x) return x ####################################### #sFNO_epsilon_v2 ####################################### class sFNO_epsilon_v2(pl.LightningModule): def __init__(self, in_chans = 3, out_chans = 1, modes = [12, 12, 12, 12], depths = [3,3,9,3], dims = [36, 36, 32, 34], drop_path_rate = 0., drop = 0., head_init_scale=1., padding=9, with_grid = True, loss = "rel_l2", learning_rate = 1e-3, step_size= 100, gamma= 0.5, weight_decay= 1e-5, ): super().__init__() if loss == 'l1': self.criterion = nn.L1Loss() elif loss == 'l2': self.criterion = nn.MSELoss() elif loss == 'smooth_l1': self.criterion = nn.SmoothL1Loss() elif loss == "rel_l2": self.criterion =LpLoss() self.with_grid = with_grid self.padding = padding self.learning_rate = learning_rate self.step_size = step_size self.gamma = gamma self.weight_decay = weight_decay self.lifting_layers = nn.ModuleList() steam = nn.Conv2d(in_chans, dims[0], 1,1) self.lifting_layers.append(steam) for i in range(3): lifting_layers = nn.Sequential( LayerNorm(dims[i], eps=1e-6, data_format= "channels_first"), nn.Conv2d(dims[i], dims[i+1], kernel_size = 1, stride = 1) ) self.lifting_layers.append(lifting_layers) self.stages = nn.ModuleList() dp_rates = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] cur = 0 for i in range(4): stage = nn.Sequential( *[IO_ResNetblock(features_=dims[i], wavenumber=[modes[i]]*2, drop_path=dp_rates[cur + j], drop =drop) for j in range(depths[i])] ) self.stages.append(stage) cur += depths[i] self.head = nn.Conv2d(dims[-1], out_chans,1,1) def forward_features(self, x): x=x.permute(0,3,1,2).contiguous() x = self.lifting_layers[0](x) x = F.pad(x, [0,self.padding, 0, self.padding]) for i in range(1,4): x = self.lifting_layers[i](x) x = self.stages[i](x) x = x[..., :-self.padding, :-self.padding] return x def forward(self, x): if self.with_grid: grid = get_grid2D(x.shape, x.device) x = torch.cat((x, grid), dim=-1) del grid x = self.forward_features(x) x = self.head(x) return x def training_step(self, batch: torch.Tensor, batch_idx): # training_step defines the train loop. # it is independent of forward x, y = batch batch_size = x.shape[0] out= self(x) loss = self.criterion(out.view(batch_size,-1), y.view(batch_size,-1)) self.log("loss", loss, on_epoch=True, prog_bar=True, logger=True) return loss def validation_step(self, val_batch: torch.Tensor, batch_idx): x, y = val_batch batch_size = x.shape[0] out= self(x) val_loss = self.criterion(out.view(batch_size,-1), y.view(batch_size,-1)) self.log('val_loss', val_loss, on_epoch=True, prog_bar=True, logger=True) return val_loss def configure_optimizers(self): optimizer = optim.AdamW(self.parameters(), lr=self.learning_rate, weight_decay=self.weight_decay) scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=self.step_size, gamma=self.gamma) return { "optimizer": optimizer, "lr_scheduler": { "scheduler": scheduler }, } ####################################### # Ensemble of the sFNO_epsilon_v2_proj # the only diff. is in allowing the projection # to be taken as an input ####################################### class sFNO_epsilon_v2_proj(pl.LightningModule): def __init__(self, in_chans = 3, proj = None, modes = [12, 12, 12, 12], depths = [3,3,9,3], dims = [36, 36, 32, 34], drop_path_rate = 0., drop = 0., head_init_scale=1., padding=9, with_grid = True, loss = "rel_l2", learning_rate = 1e-3, step_size= 100, gamma= 0.5, weight_decay= 1e-5, ): super().__init__() if loss == 'l1': self.criterion = nn.L1Loss() elif loss == 'l2': self.criterion = nn.MSELoss() elif loss == 'smooth_l1': self.criterion = nn.SmoothL1Loss() elif loss == "rel_l2": self.criterion =LpLoss() self.with_grid = with_grid self.padding = padding self.learning_rate = learning_rate self.step_size = step_size self.gamma = gamma self.weight_decay = weight_decay self.lifting_layers = nn.ModuleList() steam = nn.Conv2d(in_chans, dims[0], 1,1) self.lifting_layers.append(steam) for i in range(3): lifting_layers = nn.Sequential( LayerNorm(dims[i], eps=1e-6, data_format= "channels_first"), nn.Conv2d(dims[i], dims[i+1], kernel_size = 1, stride = 1) ) self.lifting_layers.append(lifting_layers) self.stages = nn.ModuleList() dp_rates = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] cur = 0 for i in range(4): stage = nn.Sequential( *[IO_ResNetblock(features_=dims[i], wavenumber=[modes[i]]*2, drop_path=dp_rates[cur + j], drop =drop) for j in range(depths[i])] ) self.stages.append(stage) cur += depths[i] if proj is None: self.proj = FC_nn([features_, features_//2, 1], activation = "relu", outermost_norm=False ) else: self.proj = proj def forward_features(self, x): x=x.permute(0,3,1,2).contiguous() x = self.lifting_layers[0](x) x = F.pad(x, [0,self.padding, 0, self.padding]) for i in range(1,4): x = self.lifting_layers[i](x) x = self.stages[i](x) x = x[..., :-self.padding, :-self.padding] return x def forward(self, x): if self.with_grid: grid = get_grid2D(x.shape, x.device) x = torch.cat((x, grid), dim=-1) del grid x = self.forward_features(x) x = x.permute(0, 2, 3, 1 ) x = self.proj(x) return x def training_step(self, batch: torch.Tensor, batch_idx): # training_step defines the train loop. # it is independent of forward x, y = batch batch_size = x.shape[0] out= self(x) loss = self.criterion(out.view(batch_size,-1), y.view(batch_size,-1)) self.log("loss", loss, on_epoch=True, prog_bar=True, logger=True) return loss def validation_step(self, val_batch: torch.Tensor, batch_idx): x, y = val_batch batch_size = x.shape[0] out= self(x) val_loss = self.criterion(out.view(batch_size,-1), y.view(batch_size,-1)) self.log('val_loss', val_loss, on_epoch=True, prog_bar=True, logger=True) return val_loss def configure_optimizers(self, optimizer=None, scheduler=None): if optimizer is None: optimizer = optim.AdamW(self.parameters(), lr=self.learning_rate, weight_decay=self.weight_decay) if scheduler is None: scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=self.step_size, gamma=self.gamma) return { "optimizer": optimizer, "lr_scheduler": { "scheduler": scheduler }, }
10,276
35.967626
114
py
Fine-tuning-NOs
Fine-tuning-NOs-master/models/FNO_residual.py
import pytorch_lightning as pl import torch from torch import optim, nn from .FNO import fourier_conv_2d from .basics_model import LayerNorm, get_grid2D, FC_nn, set_activ import torch.nn.functional as F from utilities import LpLoss from timm.models.layers import DropPath ####################################### # Integral Operator Layer ####################################### class IO_layer(nn.Module): def __init__(self, features_, wavenumber, drop = 0., activation = "relu"): super().__init__() self.W = nn.Conv2d(features_, features_, 1) self.IO = fourier_conv_2d(features_, features_,*wavenumber) self.act = set_activ(activation) self.dropout = nn.Dropout(drop) def forward(self, x): x = self.IO(x)+self.W(x) x = self.dropout(x) x = self.act(x) return x class FNO_residual_Block(nn.Module): def __init__(self, features_, wavenumber, drop = 0., drop_path= 0., activation = "relu"): super().__init__() self.IO = IO_layer(features_=features_, wavenumber=wavenumber, drop= drop, activation= activation) self.act = set_activ(activation) self.norm1 = LayerNorm(features_, eps=1e-5, data_format = "channels_first") self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() def forward(self, x): input = x x = self.norm1(x) x = self.IO(x) x =input+self.drop_path(x) #NonLocal Layers return x ####################################### # Ensemble of the sFNO_epsilon_v1 ####################################### class FNO_residual(pl.LightningModule): def __init__(self, wavenumber, features_, padding = 9, lifting = None, proj = None, dim_input = 1, with_grid= True, loss = "rel_l2", learning_rate = 1e-2, step_size= 100, gamma= 0.5, weight_decay= 1e-5, drop = 0., drop_path= 0., activation = "relu" ): super().__init__() self.with_grid = with_grid self.padding = padding self.layers = len(wavenumber) self.learning_rate = learning_rate self.step_size = step_size self.gamma = gamma self.weight_decay = weight_decay if loss == 'l1': self.criterion = nn.L1Loss() elif loss == 'l2': self.criterion = nn.MSELoss() elif loss == 'smooth_l1': self.criterion = nn.SmoothL1Loss() elif loss == "rel_l2": self.criterion =LpLoss() if with_grid == True: dim_input+=2 if lifting is None: self.lifting = FC_nn([dim_input, features_//2, features_], activation = "relu", outermost_norm=False ) else: self.lifting = lifting if proj is None: self.proj = FC_nn([features_, features_//2, 1], activation = "relu", drop = drop, outermost_norm=False ) else: self.proj = proj self.no = [] self.dp_rates = [x.item() for x in torch.linspace(0, drop_path, self.layers )] for l in range(self.layers): self.no.append(FNO_residual_Block(features_ = features_, wavenumber=[wavenumber[l]]*2, drop= drop, drop_path= self.dp_rates[l], activation=activation)) self.no =nn.Sequential(*self.no) def forward(self, x: torch.Tensor): if self.with_grid == True: grid = get_grid2D(x.shape, x.device) x = torch.cat((x, grid), dim=-1) x = self.lifting(x) x = x.permute(0, 3, 1, 2) x = F.pad(x, [0,self.padding, 0,self.padding]) x = self.no(x) x = x[..., :-self.padding, :-self.padding] x = x.permute(0, 2, 3, 1 ) x =self.proj(x) return x def training_step(self, batch: torch.Tensor, batch_idx): # training_step defines the train loop. # it is independent of forward x, y = batch batch_size = x.shape[0] out= self(x) loss = self.criterion(out.view(batch_size,-1), y.view(batch_size,-1)) self.log("loss", loss, on_epoch=True, prog_bar=True, logger=True) return loss def validation_step(self, val_batch: torch.Tensor, batch_idx): x, y = val_batch batch_size = x.shape[0] out= self(x) val_loss = self.criterion(out.view(batch_size,-1), y.view(batch_size,-1)) self.log('val_loss', val_loss, on_epoch=True, prog_bar=True, logger=True) return val_loss def configure_optimizers(self, optimizer=None, scheduler=None): if optimizer is None: optimizer = optim.AdamW(self.parameters(), lr=self.learning_rate, weight_decay=self.weight_decay) if scheduler is None: scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=self.step_size, gamma=self.gamma) return { "optimizer": optimizer, "lr_scheduler": { "scheduler": scheduler }, }
5,887
34.46988
109
py
Fine-tuning-NOs
Fine-tuning-NOs-master/models/basics_model.py
import torch import torch.nn as nn import torch.nn.functional as F import numpy as np ########################################## # Fully connected Layer ########################################## class FCLayer(nn.Module): """Fully connected layer """ def __init__(self, in_feature, out_feature, activation = "gelu", is_normalized = True): super().__init__() if is_normalized: self.LinearBlock = nn.Sequential( nn.Linear(in_feature,out_feature), LayerNorm(out_feature), ) else: self.LinearBlock = nn.Linear(in_feature,out_feature) self.act = set_activ(activation) def forward(self, x): return self.act(self.LinearBlock(x)) ########################################## # Fully connected Block ########################################## class FC_nn(nn.Module): r"""Simple MLP to code lifting and projection""" def __init__(self, sizes = [2, 128, 128, 1], activation = 'relu', outermost_linear = True, outermost_norm = True, drop = 0.): super().__init__() self.dropout = nn.Dropout(drop) self.net = nn.ModuleList([FCLayer(in_feature= m, out_feature= n, activation=activation, is_normalized = False) for m, n in zip(sizes[:-2], sizes[1:-1]) ]) if outermost_linear == True: self.net.append(FCLayer(sizes[-2],sizes[-1], activation = None, is_normalized = outermost_norm)) else: self.net.append(FCLayer(in_feature= sizes[-2], out_feature= sizes[-1], activation=activation, is_normalized = outermost_norm)) def forward(self,x): for module in self.net: x = module(x) x = self.dropout(x) return x ###### Inverse Bottleneck ######## class MLP_inv_bottleneck(nn.Module): """Inverse Bottleneck MLP""" def __init__(self, dim, activation = 'gelu'): super().__init__() self.nonlinear = set_activ(activation) self.L1 = nn.Linear(dim, 4*dim) self.L2 = nn.Linear(4*dim, dim) def forward(self,x): x = self.L1(x) x = self.nonlinear(x) x = self.L2(x) return x ########## Simple MLP ############## class MLP_join(nn.Module): """Simple MLP to code lifting and projection""" def __init__(self, sizes = [1, 128, 128, 1], activation = 'gelu', drop = 0.): super(MLP_join, self).__init__() self.hidden_layer = sizes self.nonlinear = set_activ(activation) self.dropout = nn.Dropout(drop) self.net = nn.ModuleList([nn.Linear(m, n) for m, n in zip(sizes[:-1], sizes[1:]) ]) def forward(self,x): for module in self.net[:-1]: x = module(x) x = self.nonlinear(x) x = self.dropout(x) return self.net[-1](x) ########## Layer Normalization ############## class LayerNorm(nn.Module): r""" LayerNorm that supports two data formats: channels_last (default) or channels_first. The ordering of the dimensions in the inputs. channels_last corresponds to inputs with shape (batch_size, height, width, channels) while channels_first corresponds to inputs with shape (batch_size, channels, height, width). """ def __init__(self, normalized_shape, eps=1e-6, data_format="channels_last"): super().__init__() self.weight = nn.Parameter(torch.ones(normalized_shape)) self.bias = nn.Parameter(torch.zeros(normalized_shape)) self.eps = eps self.data_format = data_format if self.data_format not in ["channels_last", "channels_first"]: raise NotImplementedError self.normalized_shape = (normalized_shape, ) def forward(self, x): if self.data_format == "channels_last": return F.layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps) elif self.data_format == "channels_first": u = x.mean(1, keepdim=True) s = (x - u).pow(2).mean(1, keepdim=True) x = (x - u) / torch.sqrt(s + self.eps) x = self.weight[:, None, None] * x + self.bias[:, None, None] return x ###################################################################################### # new additions over the main code class GroupNorm(nn.GroupNorm): """ Group Normalization with 1 group. Input: tensor in shape [B, C, H, W] """ def __init__(self, num_channels, **kwargs): super().__init__(1, num_channels, **kwargs) ###################################################################################### # Miscellaneous functions ###################################################################################### ########## Getting the 2D grid using the batch def get_grid2D(shape, device): batchsize, size_x, size_y = shape[0], shape[1], shape[2] gridx = torch.tensor(np.linspace(0, 1, size_x), dtype=torch.float) gridx = gridx.reshape(1, size_x, 1, 1).repeat([batchsize, 1, size_y, 1]) gridy = torch.tensor(np.linspace(0, 1, size_y), dtype=torch.float) gridy = gridy.reshape(1, 1, size_y, 1).repeat([batchsize, size_x, 1, 1]) return torch.cat((gridx, gridy), dim=-1).to(device) ########## Set automatically the activation function for the NN def set_activ(activation): if activation is not None: activation = activation.lower() if activation == 'relu': nonlinear = F.relu elif activation == "leaky_relu": nonlinear = F.leaky_relu elif activation == 'tanh': nonlinear = F.tanh elif activation == 'sine': nonlinear= torch.sin elif activation == 'gelu': nonlinear= F.gelu elif activation == 'elu': nonlinear = F.elu_ elif activation == None: nonlinear = nn.Identity() else: raise Exception('The activation is not recognized from the list') return nonlinear
6,354
38.228395
94
py
Fine-tuning-NOs
Fine-tuning-NOs-master/models/FNO.py
import pytorch_lightning as pl import torch from torch import optim, nn from .basics_model import get_grid2D, set_activ, FC_nn from utilities import LpLoss ####################################### # Fourier Convolution, # \int_D k(x-y) v(y) dy # = \mathcal{F}^{-1}(P \mathcal{F}(v)) ####################################### class fourier_conv_2d(nn.Module): def __init__(self, in_, out_, wavenumber1, wavenumber2): super(fourier_conv_2d, self).__init__() self.out_ = out_ self.wavenumber1 = wavenumber1 self.wavenumber2 = wavenumber2 scale = (1 / (in_ * out_)) self.weights1 = nn.Parameter(scale * torch.rand(in_, out_, wavenumber1, wavenumber2, dtype=torch.cfloat)) self.weights2 = nn.Parameter(scale * torch.rand(in_, out_, wavenumber1, wavenumber2, dtype=torch.cfloat)) # Complex multiplication def compl_mul2d(self, input, weights): # (batch, in_channel, x,y ), (in_channel, out_channel, x,y) -> (batch, out_channel, x,y) return torch.einsum("bixy,ioxy->boxy", input, weights) def forward(self, x): batchsize = x.shape[0] #Compute Fourier coeffcients up to factor of e^(- something constant) x_ft = torch.fft.rfft2(x) # Multiply relevant Fourier modes out_ft = torch.zeros(batchsize, self.out_, x.size(-2), x.size(-1)//2 + 1, dtype=torch.cfloat, device=x.device) out_ft[:, :, :self.wavenumber1, :self.wavenumber2] = \ self.compl_mul2d(x_ft[:, :, :self.wavenumber1, :self.wavenumber2], self.weights1) out_ft[:, :, -self.wavenumber1:, :self.wavenumber2] = \ self.compl_mul2d(x_ft[:, :, -self.wavenumber1:, :self.wavenumber2], self.weights2) #Return to physical space x = torch.fft.irfft2(out_ft, s=(x.size(-2), x.size(-1))) return x ####################################### # Fourier Layer: # \sigma( Wx + FourierConv(x)) ####################################### class Fourier_layer(nn.Module): def __init__(self, features_, wavenumber, activation = 'relu', is_last = False): super(Fourier_layer, self).__init__() self.W = nn.Conv2d(features_, features_, 1) self.fourier_conv = fourier_conv_2d(features_, features_ , *wavenumber) if is_last== False: self.act = set_activ(activation) else: self.act = set_activ(None) def forward(self, x): x1 = self.fourier_conv(x) x2 = self.W(x) return self.act(x1 + x2) ####################################### # FNO: Ensemble of the FNO ####################################### class FNO(pl.LightningModule): def __init__(self, wavenumber, features_, padding = 9, activation= 'relu', lifting = None, proj = None, dim_input = 1, with_grid= True, loss = "rel_l2", learning_rate = 1e-2, step_size= 100, gamma= 0.5, weight_decay= 1e-5, ): super(FNO, self).__init__() self.with_grid = with_grid self.padding = padding self.layers = len(wavenumber) self.learning_rate = learning_rate self.step_size = step_size self.gamma = gamma self.weight_decay = weight_decay if loss == 'l1': self.criterion = nn.L1Loss() elif loss == 'l2': self.criterion = nn.MSELoss() elif loss == 'smooth_l1': self.criterion = nn.SmoothL1Loss() elif loss == "rel_l2": self.criterion =LpLoss() if with_grid == True: dim_input+=2 if lifting is None: self.lifting = FC_nn([dim_input, features_//2, features_], activation = "relu", outermost_norm=False ) else: self.lifting = lifting if proj is None: self.proj = FC_nn([features_, features_//2, 1], activation = "relu", outermost_norm=False ) else: self.proj = proj self.fno = [] for l in range(self.layers-1): self.fno.append(Fourier_layer(features_ = features_, wavenumber=[wavenumber[l]]*2, activation = activation)) self.fno.append(Fourier_layer(features_=features_, wavenumber=[wavenumber[-1]]*2, activation = activation, is_last= True)) self.fno =nn.Sequential(*self.fno) def forward(self, x: torch.Tensor): if self.with_grid == True: grid = get_grid2D(x.shape, x.device) x = torch.cat((x, grid), dim=-1) x = self.lifting(x) x = x.permute(0, 3, 1, 2) x = nn.functional.pad(x, [0,self.padding, 0,self.padding]) x = self.fno(x) x = x[..., :-self.padding, :-self.padding] x = x.permute(0, 2, 3, 1 ) x =self.proj(x) return x def training_step(self, batch: torch.Tensor, batch_idx): # training_step defines the train loop. # it is independent of forward x, y = batch batch_size = x.shape[0] out= self(x) loss = self.criterion(out.view(batch_size,-1), y.view(batch_size,-1)) self.log("loss", loss, on_epoch=True, prog_bar=True, logger=True) return loss def validation_step(self, val_batch: torch.Tensor, batch_idx): x, y = val_batch batch_size = x.shape[0] out= self(x) val_loss = self.criterion(out.view(batch_size,-1), y.view(batch_size,-1)) self.log('val_loss', val_loss, on_epoch=True, prog_bar=True, logger=True) return val_loss def configure_optimizers(self, optimizer=None, scheduler=None): if optimizer is None: optimizer = optim.AdamW(self.parameters(), lr=self.learning_rate, weight_decay=self.weight_decay) if scheduler is None: scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=self.step_size, gamma=self.gamma) return { "optimizer": optimizer, "lr_scheduler": { "scheduler": scheduler }, }
6,612
39.078788
119
py
Fine-tuning-NOs
Fine-tuning-NOs-master/models/sFNO_epsilon_v1.py
import pytorch_lightning as pl import torch from torch import optim, nn from .FNO import fourier_conv_2d from .basics_model import LayerNorm, get_grid2D, FC_nn, set_activ import torch.nn.functional as F from utilities import LpLoss from timm.models.layers import DropPath ####################################### # Integral Operator Layer ####################################### class IO_layer(nn.Module): def __init__(self, features_, wavenumber, drop = 0., activation = "relu"): super().__init__() self.W = nn.Conv2d(features_, features_, 1) self.IO = fourier_conv_2d(features_, features_,*wavenumber) self.act = set_activ(activation) self.dropout = nn.Dropout(drop) def forward(self, x): x = self.IO(x)+self.W(x) x = self.dropout(x) x = self.act(x) return x class MetaFormerNO_Block(nn.Module): def __init__(self, features_, wavenumber, drop = 0., drop_path= 0., activation = "relu"): super().__init__() self.IO = IO_layer(features_=features_, wavenumber=wavenumber, drop= drop, activation = activation) self.norm1 = LayerNorm(features_, eps=1e-5, data_format = "channels_first") self.norm2 = LayerNorm(features_, eps=1e-5, data_format = "channels_last") self.pwconv1 = nn.Linear(features_, 4*features_) # pointwise/1x1 convs, implemented with linear layers self.act = set_activ(activation) if activation is not None else set_activ("gelu") self.pwconv2 = nn.Linear(4*features_, features_) self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() def forward(self, x): input = x x = self.norm1(x) x = self.IO(x) x =input+self.drop_path(x) #NonLocal Layers input = x x = x.permute(0, 2, 3, 1)# (N, C, H, W)-> (N, H, W, C) x = self.norm2(x) x = self.pwconv1(x) x = self.act(x) x = self.pwconv2(x) x = x.permute(0, 3, 1, 2) # (N, H, W, C) -> (N, C, H, W) x = input + self.drop_path(x) return x ####################################### # Ensemble of the sFNO_epsilon_v1 ####################################### class sFNO_epsilon_v1(pl.LightningModule): def __init__(self, wavenumber, features_, padding = 9, lifting = None, proj = None, dim_input = 1, with_grid= True, loss = "rel_l2", learning_rate = 1e-2, step_size= 100, gamma= 0.5, weight_decay= 1e-5, drop = 0., drop_path= 0., activation = "relu" ): super().__init__() self.with_grid = with_grid self.padding = padding self.layers = len(wavenumber) self.learning_rate = learning_rate self.step_size = step_size self.gamma = gamma self.weight_decay = weight_decay if loss == 'l1': self.criterion = nn.L1Loss() elif loss == 'l2': self.criterion = nn.MSELoss() elif loss == 'smooth_l1': self.criterion = nn.SmoothL1Loss() elif loss == "rel_l2": self.criterion =LpLoss() if with_grid == True: dim_input+=2 if lifting is None: self.lifting = FC_nn([dim_input, features_//2, features_], activation = "relu", outermost_norm=False ) else: self.lifting = lifting if proj is None: self.proj = FC_nn([features_, features_//2, 1], activation = "relu", drop = drop, outermost_norm=False ) else: self.proj = proj self.no = [] self.dp_rates = [x.item() for x in torch.linspace(0, drop_path, self.layers )] for l in range(self.layers): self.no.append(MetaFormerNO_Block(features_ = features_, wavenumber=[wavenumber[l]]*2, drop= drop, drop_path= self.dp_rates[l], activation=activation)) self.no =nn.Sequential(*self.no) def forward(self, x: torch.Tensor): if self.with_grid == True: grid = get_grid2D(x.shape, x.device) x = torch.cat((x, grid), dim=-1) x = self.lifting(x) x = x.permute(0, 3, 1, 2) x = F.pad(x, [0,self.padding, 0,self.padding]) x = self.no(x) x = x[..., :-self.padding, :-self.padding] x = x.permute(0, 2, 3, 1 ) x =self.proj(x) return x def training_step(self, batch: torch.Tensor, batch_idx): # training_step defines the train loop. # it is independent of forward x, y = batch batch_size = x.shape[0] out= self(x) loss = self.criterion(out.view(batch_size,-1), y.view(batch_size,-1)) self.log("loss", loss, on_epoch=True, prog_bar=True, logger=True) return loss def validation_step(self, val_batch: torch.Tensor, batch_idx): x, y = val_batch batch_size = x.shape[0] out= self(x) val_loss = self.criterion(out.view(batch_size,-1), y.view(batch_size,-1)) self.log('val_loss', val_loss, on_epoch=True, prog_bar=True, logger=True) return val_loss def configure_optimizers(self, optimizer=None, scheduler=None): if optimizer is None: optimizer = optim.AdamW(self.parameters(), lr=self.learning_rate, weight_decay=self.weight_decay) if scheduler is None: scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=self.step_size, gamma=self.gamma) return { "optimizer": optimizer, "lr_scheduler": { "scheduler": scheduler }, }
6,482
35.627119
110
py
Fine-tuning-NOs
Fine-tuning-NOs-master/models/sFNO.py
import pytorch_lightning as pl import torch from torch import optim, nn from .FNO import fourier_conv_2d from .basics_model import LayerNorm, get_grid2D, FC_nn, set_activ import torch.nn.functional as F from utilities import LpLoss ####################################### # Integral Operator Layer ####################################### class IO_layer(nn.Module): def __init__(self, features_, wavenumber, drop = 0., activation = "relu"): super().__init__() self.W = nn.Conv2d(features_, features_, 1) self.IO = fourier_conv_2d(features_, features_,*wavenumber) self.act = set_activ(activation) self.dropout = nn.Dropout(drop) def forward(self, x): x = self.IO(x)+self.W(x) x = self.dropout(x) x = self.act(x) return x ####################################### # Integral Operator Block ####################################### class IO_Block(nn.Module): def __init__(self, features_, wavenumber, drop = 0., activation = "relu"): super().__init__() self.IO = IO_layer(features_=features_, wavenumber=wavenumber, drop= drop, activation = activation) self.pwconv1 = nn.Linear(features_, 4*features_) # pointwise/1x1 convs, implemented with linear layers self.act = set_activ(activation) if activation is not None else set_activ("gelu") self.norm = nn.LayerNorm(features_, eps=1e-5) self.pwconv2 = nn.Linear(4*features_, features_) # def forward(self, x): x =(self.IO(x)).permute(0,2,3,1) #B C W H -> B W H C x = self.pwconv1(x) x = self.act(x) x = self.pwconv2(x) x = self.norm(x) x = x.permute(0, 3, 1, 2) return x ####################################### # sFNO: Ensemble of the sFNO ####################################### class sFNO(pl.LightningModule): def __init__(self, wavenumber, features_, padding = 9, lifting = None, proj = None, dim_input = 1, with_grid= True, loss = "rel_l2", learning_rate = 1e-2, step_size= 100, gamma= 0.5, weight_decay= 1e-5, drop = 0., activation = "relu" ): super().__init__() self.with_grid = with_grid self.padding = padding self.layers = len(wavenumber) self.learning_rate = learning_rate self.step_size = step_size self.gamma = gamma self.weight_decay = weight_decay if loss == 'l1': self.criterion = nn.L1Loss() elif loss == 'l2': self.criterion = nn.MSELoss() elif loss == 'smooth_l1': self.criterion = nn.SmoothL1Loss() elif loss == "rel_l2": self.criterion =LpLoss() if with_grid == True: dim_input+=2 if lifting is None: self.lifting = FC_nn([dim_input, features_//2, features_], activation = "relu", outermost_norm=False ) else: self.lifting = lifting if proj is None: self.proj = FC_nn([features_, features_//2, 1], activation = "relu", drop = drop, outermost_norm=False ) else: self.proj = proj self.fno = [] for l in range(self.layers): self.fno.append(IO_Block(features_ = features_, wavenumber=[wavenumber[l]]*2, drop= drop, activation= activation)) self.fno =nn.Sequential(*self.fno) def forward(self, x: torch.Tensor): if self.with_grid == True: grid = get_grid2D(x.shape, x.device) x = torch.cat((x, grid), dim=-1) x = self.lifting(x) x = x.permute(0, 3, 1, 2) x = F.pad(x, [0,self.padding, 0,self.padding]) x = self.fno(x) x = x[..., :-self.padding, :-self.padding] x = x.permute(0, 2, 3, 1 ) x =self.proj(x) return x def training_step(self, batch: torch.Tensor, batch_idx): # training_step defines the train loop. # it is independent of forward x, y = batch batch_size = x.shape[0] out= self(x) loss = self.criterion(out.view(batch_size,-1), y.view(batch_size,-1)) self.log("loss", loss, on_epoch=True, prog_bar=True, logger=True) return loss def validation_step(self, val_batch: torch.Tensor, batch_idx): x, y = val_batch batch_size = x.shape[0] out= self(x) val_loss = self.criterion(out.view(batch_size,-1), y.view(batch_size,-1)) self.log('val_loss', val_loss, on_epoch=True, prog_bar=True, logger=True) return val_loss def configure_optimizers(self, optimizer=None, scheduler=None): if optimizer is None: optimizer = optim.AdamW(self.parameters(), lr=self.learning_rate, weight_decay=self.weight_decay) if scheduler is None: scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=self.step_size, gamma=self.gamma) return { "optimizer": optimizer, "lr_scheduler": { "scheduler": scheduler }, }
5,862
35.64375
110
py
Fine-tuning-NOs
Fine-tuning-NOs-master/models/sFNO_epsilon_v2_updated.py
import pytorch_lightning as pl import torch from torch import optim, nn from .FNO import fourier_conv_2d from .basics_model import LayerNorm, get_grid2D, set_activ, GroupNorm import torch.nn.functional as F from utilities import LpLoss from timm.models.layers import DropPath, trunc_normal_ import os from .sFNO_epsilon_v1 import IO_layer class Mlp(nn.Module): """ Implementation of MLP with 1*1 convolutions. Input: tensor with shape [B, C, H, W] """ def __init__(self, in_features, hidden_features=None, out_features=None, activation = "leaky_relu"): super().__init__() out_features = out_features or in_features hidden_features = hidden_features or in_features self.fc1 = nn.Conv2d(in_features, hidden_features, 1) self.act = set_activ(activation) self.fc2 = nn.Conv2d(hidden_features, out_features, 1) self.apply(self._init_weights) def _init_weights(self, m): if isinstance(m, nn.Conv2d): trunc_normal_(m.weight, std=.02) if m.bias is not None: nn.init.constant_(m.bias, 0) def forward(self, x): x = self.fc1(x) x = self.act(x) x = self.fc2(x) return x ####################################### # Transformer look-alike block with Neural Operators ####################################### class NOFormerBlock(nn.Module): def __init__(self, features_, wavenumber, drop = 0., drop_path= 0., activation = "leaky_relu", use_layer_scale=True, layer_scale_init_value=1e-5, norm_layer=GroupNorm, mlp_ratio=4): super().__init__() self.IO = IO_layer(features_=features_, wavenumber=wavenumber, drop= drop, activation = activation) self.norm1 = norm_layer(features_) self.norm2 = norm_layer(features_) self.act = set_activ(activation) if activation is not None else set_activ("gelu") mlp_hidden_features = int(features_ * mlp_ratio) self.mlp = Mlp(in_features=features_, hidden_features=mlp_hidden_features, activation=activation) self.drop_path= DropPath(drop_path) if drop_path > 0. else nn.Identity() self.use_layer_scale = use_layer_scale if use_layer_scale: self.layer_scale_1= nn.Parameter(torch.ones((features_))*layer_scale_init_value, requires_grad=True) self.layer_scale_2= nn.Parameter(torch.ones((features_))*layer_scale_init_value, requires_grad=True) def forward(self, x): if self.use_layer_scale: x = x+ self.drop_path(self.layer_scale_1.unsqueeze(-1).unsqueeze(-1)*self.IO(self.norm1(x))) x = x+ self.drop_path(self.layer_scale_2.unsqueeze(-1).unsqueeze(-1)*self.mlp(self.norm2(x))) else: x = x+ self.drop_path(self.IO(self.norm1(x))) x = x+ self.drop_path(self.mlp(self.norm2(x))) return x ####################################### class sFNO_epsilon_v2_updated(pl.LightningModule): def __init__(self, stage_list, features_stage_list, wavenumber_stage_list, dim_input = None, dim_output = None, proj= None, lifting=None, activation="leaky_relu", norm_layer=GroupNorm, drop_rate= 0., drop_path_rate= 0., use_layer_scale=True, layer_scale_init_value=1e-5, with_grid=True, padding=9, loss = "rel_l2", learning_rate = 1e-3, step_size= 70, gamma= 0.5, weight_decay= 1e-5, mlp_ratio=4): super().__init__() self.save_hyperparameters() if loss == 'l1': self.criterion = nn.L1Loss() elif loss == 'l2': self.criterion = nn.MSELoss() elif loss == 'smooth_l1': self.criterion = nn.SmoothL1Loss() elif loss == "rel_l2": self.criterion =LpLoss() self.padding = padding self.with_grid = with_grid self.padding = padding self.learning_rate = learning_rate self.step_size = step_size self.gamma = gamma self.weight_decay = weight_decay if with_grid == True: dim_input+=2 if lifting is None: self.lifting = Mlp(in_features=dim_input, out_features=features_stage_list[0], hidden_features=features_stage_list[0], activation=activation) else: self.lifting = lifting if proj is None: self.proj = Mlp(in_features=features_stage_list[-1], out_features=dim_output, hidden_features=features_stage_list[-1], activation=activation) else: self.proj = proj assert len(features_stage_list) == len(wavenumber_stage_list) == len(stage_list) network = [] dp_rates=[x.item() for x in torch.linspace(0, drop_path_rate, sum(stage_list))] cur = 0 for i in range(len(stage_list)): stage = self.Ensemble_stage(features_=features_stage_list[i], index= i, layers=stage_list, wavenumber_stage=wavenumber_stage_list[i], mlp_ratio=mlp_ratio, activation=activation, norm_layer=norm_layer, drop_rate=drop_rate, drop_path_rate=dp_rates[cur:cur+stage_list[i]], use_layer_scale=use_layer_scale, layer_scale_init_value=layer_scale_init_value) network.append(stage) cur += stage_list[i] self.network = nn.ModuleList(network) ####################################### def Ensemble_stage(self, features_, index, layers, wavenumber_stage, mlp_ratio, activation, norm_layer, drop_rate, drop_path_rate, use_layer_scale, layer_scale_init_value, ): """ generate the ensemble of blocks return: NOFormerBlock """ blocks = [] for j in range(layers[index]): blocks.append(NOFormerBlock(features_= features_, wavenumber= [wavenumber_stage]*2, norm_layer=norm_layer, drop= drop_rate, drop_path= drop_path_rate[j], use_layer_scale=use_layer_scale, layer_scale_init_value=layer_scale_init_value, mlp_ratio=mlp_ratio, activation=activation)) blocks = nn.Sequential(*blocks) return blocks def forward_NOFormer(self, x): """ forward the NOFormer """ for stage in self.network: x = stage(x) return x def add_grid(self, x): """ add grid to the input """ grid = get_grid2D(x.shape, x.device) x = torch.cat((x, grid), dim=-1) del grid return x def forward(self, x): if self.with_grid == True: x = self.add_grid(x) x = self.lifting(x.permute(0, 3, 1, 2)) x = F.pad(x, [0,self.padding, 0,self.padding]) x = self.forward_NOFormer(x) x =x[..., :-self.padding, :-self.padding] x = self.proj(x) return x.permute(0, 2, 3, 1) def training_step(self, batch: torch.Tensor, batch_idx): # training_step defines the train loop. # it is independent of forward x, y = batch batch_size = x.shape[0] out= self(x) loss = self.criterion(out.view(batch_size,-1), y.view(batch_size,-1)) self.log("loss", loss, on_epoch=True, prog_bar=True, logger=True) return loss def validation_step(self, val_batch: torch.Tensor, batch_idx): x, y = val_batch batch_size = x.shape[0] out= self(x) val_loss = self.criterion(out.view(batch_size,-1), y.view(batch_size,-1)) self.log('val_loss', val_loss, on_epoch=True, prog_bar=True, logger=True) return val_loss def test_step(self, test_batch: torch.Tensor, batch_idx): x, y = test_batch batch_size = x.shape[0] out= self(x) test_loss = self.criterion(out.view(batch_size,-1), y.view(batch_size,-1)) self.log('test_loss', test_loss, on_epoch=True, prog_bar=True, logger=True) return test_loss def configure_optimizers(self, optimizer=None, scheduler=None): if optimizer is None: optimizer = optim.AdamW(self.parameters(), lr=self.learning_rate, weight_decay=self.weight_decay) if scheduler is None: scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=self.step_size, gamma=self.gamma) return { "optimizer": optimizer, "lr_scheduler": { "scheduler": scheduler }, }
10,270
38.35249
112
py
Fine-tuning-NOs
Fine-tuning-NOs-master/models/__init__.py
from .basics_model import * from .FNO import FNO from .FNO_residual import FNO_residual from .sFNO_epsilon_v2 import sFNO_epsilon_v2, sFNO_epsilon_v2_proj from .sFNO import sFNO from .sFNO_epsilon_v1 import sFNO_epsilon_v1 from .sFNO_epsilon_v2_updated import sFNO_epsilon_v2_updated
283
39.571429
66
py
Fine-tuning-NOs
Fine-tuning-NOs-master/make_graph/make_box_plots.py
import seaborn as sns import pandas as pd import matplotlib.pyplot as plt import argparse if __name__ == '__main__': parser = argparse.ArgumentParser('Freq', add_help=False) parser.add_argument('-f','--freq', type=int, default=7) parser.add_argument('-min','--min', type=float, default=0.) parser.add_argument('-max','--max', type=float, default=0.5) args=parser.parse_args() df = pd.read_csv(f"make_graph/test_GRF_{args.freq}Hz.csv") print(df) fig, ax = plt.subplots(figsize=(10,6)) #ax.set_yscale("log") ''' box = sns.boxplot( data=df, notch=False, showcaps=True, flierprops={"marker": "x"}, boxprops={"facecolor": (.4, .6, .8, .5)}, medianprops={"color": "coral"}, ax=ax, linewidth =2, orient = "h" )''' box = sns.violinplot( data=df, flierprops={"marker": "x"}, inner="stick", palette= "pastel6", width=0.8, ax=ax, linewidth =2.5, orient = "h", scale="width", ) #box.set(xscale="log") ax.set_xlim([args.min, args.max]) # Tweak the visual presentation ax.xaxis.grid(False) ax.yaxis.grid(True) sns.despine(trim=True, left=True) box.set_title(f"Test Loss (rel. L2): Helmholtz {args.freq} Hz") plt.savefig(f'make_graph/box_plot_test_loss_{args.freq}.png')
1,441
31.772727
73
py
Fine-tuning-NOs
Fine-tuning-NOs-master/utilities/model_factory.py
from models import * def choosing_model(config): c_nn = config["model"] c_train = config["train"] # 7 Hz data only contains the real part of the field if config["Project"]["database"]=='GRF_7Hz': if config["Project"]["name"] == "FNO": model =FNO( wavenumber = c_nn["modes_list"], features_ = c_nn["features"], learning_rate = c_train["lr"], step_size= c_train["step_size"], gamma= c_train["gamma"], weight_decay= c_train["weight_decay"], activation = c_nn["activ"] ) elif config["Project"]["name"] == "sFNO": model =sFNO( wavenumber = c_nn["modes_list"], drop = c_nn["drop"], features_ = c_nn["features"], learning_rate = c_train["lr"], step_size= c_train["step_size"], gamma= c_train["gamma"], weight_decay= c_train["weight_decay"], activation = c_nn["activ"] ) elif config["Project"]["name"] == "FNO_residual": model =FNO_residual( wavenumber = c_nn["modes_list"], drop = c_nn["drop"], drop_path = c_nn["drop_path"], features_ = c_nn["features"], learning_rate = c_train["lr"], step_size= c_train["step_size"], gamma= c_train["gamma"], weight_decay= c_train["weight_decay"], activation = c_nn["activ"] ) elif config["Project"]["name"] == "sFNO+epsilon_v1": model =sFNO_epsilon_v1( wavenumber = c_nn["modes_list"], drop = c_nn["drop"], drop_path = c_nn["drop_path"], features_ = c_nn["features"], learning_rate = c_train["lr"], step_size= c_train["step_size"], gamma= c_train["gamma"], weight_decay= c_train["weight_decay"], activation = c_nn["activ"] ) elif config["Project"]["name"] == "sFNO+epsilon_v2": model =sFNO_epsilon_v2( modes = c_nn["modes_list"], drop_path_rate = c_nn["drop_path"], drop = c_nn["drop"], depths = c_nn["depths"], dims = c_nn["dims"], learning_rate = c_train["lr"], step_size= c_train["step_size"], gamma= c_train["gamma"], weight_decay= c_train["weight_decay"], activation = c_nn["activ"] ) # 12/15 Hz data only contains real and imaginary part of the field elif config["Project"]["database"]==('GRF_12Hz') or ('GRF_15Hz'): if config["Project"]["name"] == "FNO": Proj = torch.nn.Linear(c_nn["features"], 2, dtype=torch.float) model =FNO( wavenumber = c_nn["modes_list"], features_ = c_nn["features"], learning_rate = c_train["lr"], step_size= c_train["step_size"], gamma= c_train["gamma"], weight_decay= c_train["weight_decay"], proj = Proj, activation = c_nn["activ"] ) elif config["Project"]["name"] == "sFNO": Proj = torch.nn.Linear(c_nn["features"], 2, dtype=torch.float) model =sFNO( wavenumber = c_nn["modes_list"], drop = c_nn["drop"], features_ = c_nn["features"], learning_rate = c_train["lr"], step_size= c_train["step_size"], gamma= c_train["gamma"], weight_decay= c_train["weight_decay"], proj = Proj, activation = c_nn["activ"] ) elif config["Project"]["name"] == "FNO_residual": Proj = torch.nn.Linear(c_nn["features"], 2, dtype=torch.float) model =FNO_residual( wavenumber = c_nn["modes_list"], drop = c_nn["drop"], drop_path = c_nn["drop_path"], features_ = c_nn["features"], learning_rate = c_train["lr"], step_size= c_train["step_size"], gamma= c_train["gamma"], weight_decay= c_train["weight_decay"], proj = Proj, activation = c_nn["activ"] ) elif config["Project"]["name"] == "sFNO+epsilon_v1": Proj = torch.nn.Linear(c_nn["features"], 2, dtype=torch.float) model =sFNO_epsilon_v1( wavenumber = c_nn["modes_list"], drop = c_nn["drop"], drop_path = c_nn["drop_path"], features_ = c_nn["features"], learning_rate = c_train["lr"], step_size= c_train["step_size"], gamma= c_train["gamma"], weight_decay= c_train["weight_decay"], proj = Proj, activation = c_nn["activ"] ) elif config["Project"]["name"] == "sFNO+epsilon_v2": #sFNO_epsilon_v2_proj is the same arch as sFNO_epsilon_v2 Proj = torch.nn.Linear(c_nn["dims"][-1], 2, dtype=torch.float) model =sFNO_epsilon_v2_proj( modes = c_nn["modes_list"], drop_path_rate = c_nn["drop_path"], drop = c_nn["drop"], depths = c_nn["depths"], dims = c_nn["dims"], learning_rate = c_train["lr"], step_size= c_train["step_size"], gamma= c_train["gamma"], weight_decay= c_train["weight_decay"], proj = Proj, activation = c_nn["activ"] ) elif config["Project"]["name"] == "sFNO+epsilon_v2_updated": #sFNO_epsilon_v2_proj is the same arch as sFNO_epsilon_v2 #we just allow to have an independent projection layer. #Proj = torch.nn.Linear(c_nn["dims"][-1], 2, dtype=torch.float) model =sFNO_epsilon_v2_updated( stage_list = c_nn["depths"], features_stage_list = c_nn["dims"], wavenumber_stage_list = c_nn["modes_list"], dim_input = 1, dim_output = 2, #proj = Proj, activation = c_nn["activ"], drop_rate = c_nn["drop"], drop_path_rate = c_nn["drop_path"], learning_rate = c_train["lr"], step_size= c_train["step_size"], gamma= c_train["gamma"], weight_decay= c_train["weight_decay"], ) return model
7,309
42.254438
84
py
Fine-tuning-NOs
Fine-tuning-NOs-master/utilities/loss.py
import torch #loss function with rel/abs Lp loss class LpLoss(object): def __init__(self, d=2, p=2, size_average=True, reduction=True): super(LpLoss, self).__init__() #Dimension and Lp-norm type are postive assert d > 0 and p > 0 self.d = d self.p = p self.reduction = reduction self.size_average = size_average def abs(self, x, y): num_examples = x.size()[0] #Assume uniform mesh h = 1.0 / (x.size()[1] - 1.0) all_norms = (h**(self.d/self.p))*torch.norm(x.view(num_examples,-1) - y.view(num_examples,-1), self.p, 1) if self.reduction: if self.size_average: return torch.mean(all_norms) else: return torch.sum(all_norms) return all_norms def rel(self, x, y): num_examples = x.size()[0] diff_norms = torch.norm(x.reshape(num_examples,-1) - y.reshape(num_examples,-1), self.p, 1) y_norms = torch.norm(y.reshape(num_examples,-1), self.p, 1) if self.reduction: if self.size_average: return torch.mean(diff_norms/y_norms) else: return torch.sum(diff_norms/y_norms) return diff_norms/y_norms def __call__(self, x, y): return self.rel(x, y)
1,326
27.234043
113
py
Fine-tuning-NOs
Fine-tuning-NOs-master/utilities/loading_data.py
import numpy as np import torch from bisect import bisect import os from torch.utils.data import Dataset, DataLoader def to_numpy(x): return x.detach().cpu().numpy() #files Loader def MyLoader(GL, do = "train", config = None, args=None): if config is not None: batch_size = config['train']['batchsize'] workers = config['data']['load_workers'] database = config['Project']['database'] if database == 'GRF_7Hz': size = 128 elif database in {'GRF_12Hz','GRF_15Hz'}: size = 64 elif args is not None: batch_size = args.batchsize workers = args.load_workers database = args.database if database == 'GRF_7Hz': size = 128 elif database in {'GRF_12Hz', 'GRF_15Hz'}: size = 64 else: batch_size = 50 workers = 4 if do == 'train': list_x_train, list_y_train = GL('train') list_x_valid, list_y_valid = GL('validation') Train_Data_set = File_Loader(list_x_train,list_y_train, size = size, data=database) Valid_Data_set = File_Loader(list_x_valid,list_y_valid, size = size, data=database) ##### setting the data Loader train_loader = DataLoader(dataset = Train_Data_set, shuffle = True, batch_size = batch_size, num_workers= workers) valid_loader = DataLoader(dataset = Valid_Data_set, shuffle = False, batch_size =batch_size, num_workers= workers) return train_loader, valid_loader elif do == 'test': list_x_test, list_y_test = GL('test') Test_Data_set = File_Loader(list_x_test, list_y_test, size = size, data=database) ##### setting the data Loader test_loader = DataLoader(dataset = Test_Data_set, shuffle = False, batch_size = batch_size, num_workers= workers) return test_loader class GettingLists(object): #Generating the list for train/valid/test--> each sample is 5000 velocity/data def __init__(self, data_for_training, wave_eq = "acoustic", data_base = "GRF_7Hz", PATH = 'databases', batch_data_size = int(5000)): super(GettingLists, self).__init__() self.PATH = os.path.join(PATH, wave_eq, data_base) self.batch_data = batch_data_size valid_limit = data_for_training//self.batch_data self.valid_limit = valid_limit if data_base == 'GRF_7Hz': self.end = int(6) elif data_base in {'GRF_12Hz', 'GRF_15Hz'} : self.end = int(10) def get_list(self, do): if do == 'train': in_limit_train = np.array([os.path.join(self.PATH, 'model', f'velocity{k}.npy') for k in \ range(1,self.valid_limit+1)]) out_limit_train = np.array([os.path.join(self.PATH, 'data', f'pressure{k}.npy')for k in \ range(1,self.valid_limit+1)]) return in_limit_train, out_limit_train elif do == 'validation': in_limit_valid = np.array([os.path.join(self.PATH, 'model', f'velocity{k}.npy') for k in \ range(self.end,self.end+1)]) out_limit_valid= np.array([os.path.join(self.PATH, 'data', f'pressure{k}.npy') for k in \ range(self.end,self.end+1)]) return in_limit_valid, out_limit_valid elif do =='test': in_limit_test = np.array([os.path.join(self.PATH, 'model', f'velocity{k}.npy') for k in \ range(self.valid_limit+1, self.end+1)]) out_limit_test = np.array([os.path.join(self.PATH, 'data', f'pressure{k}.npy')for k in \ range(self.valid_limit+1, self.end+1)]) return in_limit_test, out_limit_test def __call__(self, do = 'train'): return self.get_list(do) class File_Loader(Dataset): #data loader file def __init__(self, data_paths, target_paths, size =128, data = "GRF"): self.size = size self.data = data if self.data == "GRF_7Hz": self.data_memmaps = [np.load(path, mmap_mode='r') for path in data_paths] self.target_memmaps = [np.load(path, mmap_mode='r') for path in target_paths] elif self.data == ("GRF_12Hz") or ("GRF_15Hz") : self.data_memmaps = [np.load(path, mmap_mode='r').view(float) for path in data_paths] self.target_memmaps = [np.load(path, mmap_mode='r').view(float) for path in target_paths] elif self.data == ("GRF_12Hz_vz") or ("GRF_15Hz_vz"): self.data_memmaps = [np.load(path, mmap_mode='r').view(float) for path in data_paths] self.target_memmaps = [np.load(path, mmap_mode='r').view(float).reshape(2,self.size,self.size,2) for path in target_paths] self.start_indices = [0] * len(data_paths) self.data_count = 0 for index, memmap in enumerate(self.data_memmaps): self.start_indices[index] = self.data_count self.data_count += memmap.shape[0] def __len__(self): return self.data_count def __getitem__(self, index): memmap_index = bisect(self.start_indices, index) - 1 index_in_memmap = index - self.start_indices[memmap_index] data = np.copy(self.data_memmaps[memmap_index][index_in_memmap]) target = np.copy(self.target_memmaps[memmap_index][index_in_memmap]) if self.data == "GRF_7Hz": return torch.tensor(data*1e-3, dtype=torch.float).view(self.size,self.size,1), torch.tensor(target, dtype=torch.float).view(self.size,self.size,1) elif self.data == ("GRF_12Hz") or ("GRF_15Hz"): return torch.tensor(data*1e-3, dtype=torch.float).view(self.size,self.size,1), torch.tensor(target, dtype=torch.float).view(self.size,self.size,2) elif self.data == ("GRF_12Hz_vz") or ("GRF_15Hz_vz"): return torch.tensor(data*1e-3, dtype=torch.float).view(self.size,self.size,1), torch.tensor(target, dtype=torch.float).view(2,self.size,self.size,2)
6,671
43.18543
158
py
Fine-tuning-NOs
Fine-tuning-NOs-master/utilities/__init__.py
from .loading_data import * from .loss import LpLoss from .model_factory import choosing_model from .plotting_data import * from .saving_npy_output import *
156
30.4
41
py
Fine-tuning-NOs
Fine-tuning-NOs-master/utilities/plotting_data.py
import matplotlib.pyplot as plt from .loading_data import to_numpy import os def plotting(in_, NN_out, out, name, database, PATH, list_to_plot = None, vmin=-0.5, vmax =0.5, shrink = 0.8, ksample = 0): if list_to_plot is None: list_to_plot = [0,1,2,3,4,5] print("list_to_plot is None, so we plot the first 6 samples") assert in_.shape[0] >= len(list_to_plot); "list of samples to plot is bigger than the input size" in_ = to_numpy(in_)[:list_to_plot[-1]+1,...] NN_out = to_numpy(NN_out)[:list_to_plot[-1]+1,...] out = to_numpy(out)[:list_to_plot[-1]+1,...] s = in_.shape[1] for k in list_to_plot: print(f"plotting sample {k}") in_k = in_[k,...].reshape(s,s) out_k = out[k,...].reshape(s,s,-1) NN_k =NN_out[k,...].reshape(s,s,-1) plt.figure(figsize=(20,10)) plt.subplot(231) plt.imshow(in_k, vmin=1., vmax =5., cmap = 'jet') plt.colorbar(shrink =shrink) plt.title(f'wavespeed: {k}') plt.subplot(232) plt.imshow(out_k[:,:,0].reshape(s,s), vmin=vmin, vmax =vmax, cmap = 'seismic') plt.colorbar(shrink =shrink) plt.title(f'HDG (real) sample: {k}') plt.subplot(233) plt.imshow(NN_k[:,:,0].reshape(s,s), vmin=vmin, vmax =vmax, cmap = 'seismic') plt.colorbar(shrink =shrink) plt.title(f'{name} (real) sample: {k}') plt.subplot(235) plt.imshow(out_k[:,:,1].reshape(s,s), vmin=vmin, vmax =vmax, cmap = 'seismic') plt.colorbar(shrink =shrink) plt.title(f'HDG (imaginary) sample: {k}') plt.subplot(236) plt.imshow(NN_k[:,:,1].reshape(s,s), vmin=vmin, vmax =vmax, cmap = 'seismic') plt.colorbar(shrink =shrink) plt.title(f'{name} (imaginary) sample: {k}') saving_dir = os.path.join(PATH, "figures", database, name, f"realization_{ksample}") if not os.path.exists(saving_dir): os.makedirs(saving_dir) plt.savefig(f"{saving_dir}/ex_{k}.png")
2,069
39.588235
104
py
Fine-tuning-NOs
Fine-tuning-NOs-master/utilities/saving_npy_output.py
import numpy as np import os from .loading_data import to_numpy def saving_files(in_files, out_files, NN_out_files, NN_name, database, PATH, realization_k): """ Saving the files in the directory OOD/database/realization_k """ saving_dir = f'{PATH}/{database}/realization_{realization_k}' if not os.path.exists(saving_dir): os.makedirs(saving_dir) if not isinstance(in_files, np.ndarray): in_files = to_numpy(in_files) if not isinstance(out_files, np.ndarray): out_files = to_numpy(out_files) if not isinstance(NN_out_files, np.ndarray): NN_out_files = to_numpy(NN_out_files) np.save(os.path.join(saving_dir, f"wavespeed_{database}.npy"), in_files) np.save(os.path.join(saving_dir, f"pressure_{database}.npy"), out_files) np.save(os.path.join(saving_dir, f"pressure_{NN_name}_{database}.npy"), NN_out_files)
896
39.772727
94
py
Fine-tuning-NOs
Fine-tuning-NOs-master/visualization_code/._create_trajectory.py
Mac OS X  2ATTRcom.apple.lastuseddate#PS%com.apple.metadata:kMDItemWhereFroms?com.apple.quarantine\ycbplist00_Zsftp://gilbreth.rcac.purdue.edu/home/xmt/Forward-Operator/visualization/plot_trajectory.py gq/0081;63c6db0f;Cyberduck;7FDA60D8-166D-4607-92FC-6874EB221091
443
221
347
py
Fine-tuning-NOs
Fine-tuning-NOs-master/visualization_code/._create_surface.py
Mac OS X  2ATTRcom.apple.lastuseddate#PS%com.apple.metadata:kMDItemWhereFroms?com.apple.quarantine*cobplist00_Wsftp://gilbreth.rcac.purdue.edu/home/xmt/Forward-Operator/visualization/plot_surface.py dq/0081;63c6db0f;Cyberduck;13DB3353-C2F5-4063-A2F9-A36042DDD5E9
441
220
345
py
Fine-tuning-NOs
Fine-tuning-NOs-master/visualization_code/combine_files.py
import csv import numpy as np import argparse import pandas as pd import json import vtk import os import h5py import vtk_colors as colors import vtk_io_helper as io_helper import sys ''' Collect all surface samples spread across multiple csv files (with arbitrary structure, or total lack thereof) and form a surface by triangulation. The surface and its underlying mesh can both be visually inspected afterwards. A better visualization of the loss landscape surface can then be created with 'view_surface.py' ''' from vtk.util.numpy_support import numpy_to_vtk def do_warp(input, factor=10): warp = vtk.vtkWarpScalar() if isinstance(input, vtk.vtkAlgorithm): warp.SetInputConnection(input.GetOutputPort()) else: warp.SetInputData(input) warp.SetScaleFactor(factor) return warp def do_wrap(input, radius): tubes = vtk.vtkTubeFilter() if isinstance(input, vtk.vtkAlgorithm): tubes.SetInputConnection(input.GetOutputPort()) else: tubes.SetInputData(input) tubes.SetNumberOfSides(12) tubes.SetRadius(radius) return tubes # convert an array of 2D positions into a polydata that can be warped # using vtkWarpScalar def interpolate_trajectory(trajectory, triangulation, factor): append = vtk.vtkAppendFilter() append.AddInputData(triangulation) append.Update() tris = vtk.vtkUnstructuredGrid() tris.ShallowCopy(append.GetOutput()) losses = tris.GetPointData().GetScalars() values = [] coords = [] xs = [] ys = [] for p in trajectory: xs.append(p[0]) ys.append(p[1]) subid = 0 pcoord = [0,0,0] weights = [0 for i in range(8)] subid = 0 cellid = tris.FindCell([p[0], p[1], 0], None, 0, 1.0e-6, vtk.reference(subid), pcoord, weights) print(f'cellid = {cellid}') if cellid < 0: print(f'position {[p[0], p[1], 0]} is outside surface domain') continue cell = tris.GetCell(cellid) ids = [ cell.GetPointId(i) for i in range(0, cell.GetNumberOfPoints())] v = 0 print(f'ids={ids}') for i, id in enumerate(ids): print(f'loss = {losses.GetTuple(id)}') v += weights[i]*losses.GetTuple(id)[0] values.append(args.factor*v) coords.append([p[0], p[1], values[-1]]) print(f'x bounds of trajectory: {np.min(xs)} - {np.max(xs)}') print(f'y bounds of trajectory: {np.min(ys)} - {np.max(ys)}') values = np.array(values) scalars = numpy_to_vtk(values) coords = np.array(coords) coords = numpy_to_vtk(coords) pts = vtk.vtkPoints() pts.SetData(coords) poly = vtk.vtkPolyData() poly.SetPoints(pts) poly.GetPointData().SetScalars(scalars) lines = vtk.vtkCellArray() lines.InsertNextCell(len(values)) for i in range(len(values)): lines.InsertCellPoint(i) poly.SetLines(lines) return poly def compute(args): df = pd.DataFrame() if args.skip and os.path.exists(args.output): print(f'{args.output} found. Nothing to be done') sys.exit(0) total_size = 0 if args.input is None and args.path is not None: args.input = [] filenames = os.listdir(args.path) for name in filenames: if os.path.splitext(name)[1].lower() == '.csv': args.input.append(name) for fname in args.input: if args.path: fname = os.path.join(args.path, fname) print(f'importing {fname}') df1 = pd.read_csv(fname) total_size += df1.shape[0] if df.shape[0] == 0: df = df1 else: df = pd.concat([df, df1], ignore_index=True) if args.restrict and not args.trim: args.trim = True if args.trim and args.x is not None and args.y is not None: xmin, xmax, xnum = args.x ymin, ymax, ynum = args.y xnum = int(xnum) ynum = int(ynum) n = df.shape[0] if args.restrict: xs = np.linspace(xmin, xmax, xnum) ys = np.linspace(ymin, ymax, ynum) df = df[(df['x'].isin(xs)) & (df['y'].isin(ys))] else: df = df[(df['x']>=xmin) & (df['x']<=xmax) & (df['y']>=ymin) & (df['y']<=ymax) ] n1 = df.shape[0] print(f'after trimming, number of samples went from {n} to {n1}') if len(args.input) == 0: print('No data file available. Done') sys.exit(0) points = np.array([[x, y, 0] for x, y in zip(df['x'], df['y'])]) print(points) coords = numpy_to_vtk(points) dataset = vtk.vtkPolyData() pts = vtk.vtkPoints() pts.SetData(coords) dataset.SetPoints(pts) loss = numpy_to_vtk(np.array(df['loss'])) minloss = np.min(loss) maxloss = np.max(loss) meanloss = np.mean(loss) print(f'loss stats: min: {minloss}, mean: {meanloss}, max: {maxloss}') dataset.GetPointData().SetScalars(loss) tri = vtk.vtkDelaunay2D() tri.SetInputData(dataset) tri.Update() triangulation = tri.GetOutput() bounds = triangulation.GetBounds() if args.trajectory: f = h5py.File(args.trajectory, 'r') xs = list(f['proj_00coord']) ys = list(f['proj_01coord']) curve = [ (x,y) for x, y in zip(xs, ys)] f.close() curve_dataset = interpolate_trajectory(curve, triangulation, args.factor) if args.output: base, ext = os.path.splitext(args.output) io_helper.saveVTK_XML(curve_dataset, base + '_trajectory' + ext) wrapped_curve = do_wrap(curve_dataset, args.radius) mapper = vtk.vtkDataSetMapper() mapper.SetInputConnection(wrapped_curve.GetOutputPort()) curve_actor = vtk.vtkActor() curve_actor.SetMapper(mapper) curve_actor.GetProperty().SetColor(args.color) warp = vtk.vtkWarpScalar() warp.SetInputConnection(tri.GetOutputPort()) warp.SetScaleFactor(args.factor) warp.Update() surface = warp.GetOutput() if args.output: print(f'saving surface in {args.output}') io_helper.saveVTK_XML(surface, args.output) plane = vtk.vtkPlaneSource() plane.SetXResolution(10) plane.SetYResolution(10) plane.SetOrigin(bounds[0], bounds[2], 0) plane.SetPoint1(bounds[1], bounds[2], 0) plane.SetPoint2(bounds[0], bounds[3], 0) pmapper = vtk.vtkDataSetMapper() pmapper.SetInputConnection(plane.GetOutputPort()) pactor = vtk.vtkActor() pactor.SetMapper(pmapper) pactor.GetProperty().SetColor(1,1,1) tactor = None if args.show_edges: edges = vtk.vtkExtractEdges() edges.SetInputConnection(warp.GetOutputPort()) tubes = vtk.vtkTubeFilter() tubes.SetInputConnection(edges.GetOutputPort()) tubes.SetNumberOfSides(10) tubes.SetRadius(0.1) tmapper = vtk.vtkPolyDataMapper() tmapper.SetInputConnection(tubes.GetOutputPort()) tactor = vtk.vtkActor() tactor.SetMapper(tmapper) tactor.GetProperty().SetColor(1,0,0) cmap = colors.make_colormap('viridis', [minloss, maxloss]) # cmap = vtk.vtkColorTransferFunction() # cmap.AddRGBPoint(minloss, 1, 1, 0) # cmap.AddRGBPoint(maxloss, 0, 0, 1) # cmap.AddRGBPoint(meanloss, 0.5, 0.5, 0.5) mapper = vtk.vtkPolyDataMapper() mapper.SetInputConnection(warp.GetOutputPort()) mapper.ScalarVisibilityOn() mapper.SetLookupTable(cmap) actor = vtk.vtkActor() actor.SetMapper(mapper) actor.GetProperty().SetColor(1,1,0) renderer = vtk.vtkRenderer() renderer.AddActor(actor) if tactor is not None: renderer.AddActor(tactor) renderer.AddActor(pactor) if args.trajectory: renderer.AddActor(curve_actor) window = vtk.vtkRenderWindow() window.AddRenderer(renderer) window.SetSize(1024, 1024) interactor = vtk.vtkRenderWindowInteractor() interactor.SetRenderWindow(window) df.sort_values('x', ascending=True, inplace=True) xs = df['x'].unique() print(f'xs={xs}') df.sort_values('y', ascending=True, inplace=True) ys = df['y'].unique() print(f'ys={ys}') if args.x is not None and args.y is not None: goal_xs = np.linspace(args.x[0], args.x[1], int(args.x[2])) goal_ys = np.linspace(args.y[0], args.y[1], int(args.y[2])) missing = [] for x in goal_xs: if not x in xs: # print(f'column {x} is missing ({len(goal_ys)} missing values)') missing.extend([[x,y] for y in goal_ys]) else: subdf = df[df['x']==x] ys = subdf['y'].unique() for y in goal_ys: if not y in ys: # print(f'value at ({x}, {y}) is missing') missing.append([x,y]) n = len(missing) total = len(goal_xs) * len(goal_ys) print(f'There are a total of {n} missing values ({float(n)/float(total)*100.}%)') done = total-n print(f'total unique computed values: {done}') print(f'total computed values: {total_size}') print(f'redundant values: {total_size-done}') print('missing values are:\n{}'.format(missing)) if args.missing: with open(args.missing, 'w') as fp: json.dump(missing, fp) with open(args.missing, 'r') as fp: all_c = json.load(fp) print('\n\n\n\n{}'.format(all_c)) interactor.Initialize() window.Render() interactor.Start() if __name__ == '__main__': parser = argparse.ArgumentParser(description='Combine information contained in csv file to reconstruct a data lattice') parser.add_argument('-p', '--path', type=str, default='', help='Path to append to filenames') parser.add_argument('-i', '--input', type=str, action='append', help='CSV filename contening a fraction of the data') parser.add_argument('--x', type=float, nargs=3, required=False, help='X sampling: xmin, xmax, xnum') parser.add_argument('--y', type=float, nargs=3, required=False, help='Y sampling: ymin, ymax, ynum') parser.add_argument('--fieldnames', type=str, default='', help='Column names') parser.add_argument('-o', '--output', type=str, default='', help='Filename to export reconstucted surface') parser.add_argument('--missing', type=str, default='', help='Filename to use to export coordinates of missing samples') parser.add_argument('--trajectory', type=str, default='', help='Filename of projected training trajectory') parser.add_argument('--radius', type=float, default=1, help='Radius of tubes depicting trajectory') parser.add_argument('--factor', type=float, default=10, help='Loss magnification') parser.add_argument('--color', type=float, nargs=3, default=[1,1,1], help='Color for trajectory representation') parser.add_argument('--show-edges', action='store_true', help='Display surface edges') parser.add_argument('--trim', action='store_true', help='Restrict surface to prescribed x,y domain') parser.add_argument('--restrict', action='store_true', help='Restrict the mesh to the prescribed samples') parser.add_argument('--skip', action='store_true', help='Indicate whether to do anything if output file exists already') args = parser.parse_args() compute(args)
11,372
35.219745
124
py
Fine-tuning-NOs
Fine-tuning-NOs-master/visualization_code/scatterplotmatrix.py
from matplotlib import pyplot as plt import itertools import numpy as np def depth(data): if isinstance(data, list): d = np.array(data) return len(d.shape) elif isinstance(data, np.ndarray): return len(data) else: print(f'unable to determine depth of {data}') return 0 def scatterplot_matrix(data, names, colors=None, labels=None, markers=None, **kwargs): """Plots a scatterplot matrix of subplots. Each row of "data" is plotted against other rows, resulting in a nrows by nrows grid of subplots with the diagonal subplots labeled with "names". Additional keyword arguments are passed on to matplotlib's "plot" command. Returns the matplotlib figure object containg the subplot grid.""" if not isinstance(data, np.ndarray): data = np.array(data) if len(data.shape) == 2: print('single curve in input') numvars, numdata = data.shape data = np.array([data]) elif len(data.shape) == 3: print(f'several curves in input ({data.shape[0]})') numvars, numdata = data[0].shape # numvars, numdata = data.shape fig, axes = plt.subplots(nrows=numvars, ncols=numvars, figsize=(8,8)) fig.subplots_adjust(hspace=0.05, wspace=0.05) for ax in axes.flat: #Hide all ticks and labels ax.xaxis.set_visible(False) ax.yaxis.set_visible(False) print(f'ax is {ax}') # Set up ticks only on one side for the "edge" subplots... if ax.get_subplotspec().is_first_col(): ax.yaxis.set_ticks_position('left') # if ax.get_subplotspec().is_last_col(): # ax.yaxis.set_ticks_position('right') # if ax.get_subplotspec().is_first_row(): # ax.xaxis.set_ticks_position('top') if ax.get_subplotspec().is_last_row(): ax.xaxis.set_ticks_position('bottom') ax.label_outer() # Plot the data. for i, j in zip(*np.triu_indices_from(axes, k=1)): for curve_id in range(data.shape[0]): cdata = data[curve_id] for x, y in [(i,j), (j,i)]: if colors is not None: axes[x,y].plot(cdata[x], cdata[y], c=colors[curve_id], marker=markers[curve_id], **kwargs) # Label the diagonal subplots... for i, label in enumerate(names): axes[i,i].annotate(label, (0.5, 0.5), xycoords='axes fraction', ha='center', va='center') # Turn on the proper x or y axes ticks. for i, j in zip(range(numvars), itertools.cycle((-1, 0))): axes[j,i].xaxis.set_visible(True) axes[i,j].yaxis.set_visible(True) return fig if __name__ == '__main__': np.random.seed(1977) numvars, numdata = 4, 10 data = 10 * np.random.random((numvars, numdata)) fig = scatterplot_matrix(data, ['mpg', 'disp', 'drat', 'wt'], linestyle='none', marker='o', color='black', mfc='none') fig.suptitle('Simple Scatterplot Matrix') plt.show()
2,991
35.487805
110
py
Fine-tuning-NOs
Fine-tuning-NOs-master/visualization_code/view_surface.py
import vtk import argparse from scipy import interpolate import math from matplotlib import pyplot as plt import json import sys import os import numpy as np import vtk_camera import vtk_colorbar import vtk_colors import vtk_io_helper from vtk_colors import make_colormap from vtk.util.numpy_support import * ''' Program to visualize loss surface and training trajectory with a few additional features. ''' frame_counter=0 parameters = { 'surf_name': [ '', 'name of surface'], 'traj_diameter': [0.1, 'trajectory diameter' ], 'step_diameter': [0.4, 'Diameter of step spherical representation'], 'size': [ [1024, 1024], 'image resolution' ], 'ncontours': [20, 'Number of isocontours'], 'curve_color': [ [1,1,1], 'Default color of trajectory' ], 'traj_offset': [ 0.01, 'Vertical offset for trajectory' ], 'iso_diameter': [ 0.05, 'Isocontour tubes diameters' ], 'font_size': [ 12, 'Font size for color legend' ], 'log_offset': [ 0.1, 'Logarithm offset' ], 'surface_palette': [ 'viridis', 'Name of color palette for surface' ], 'trajectory_palette': ['Oranges', 'Name of color palette for trajectory' ], 'show_isovalues': [ False, 'Display isovalues on isolines' ], 'show_steps': [ False, 'Display individual learning steps along trajectory'], 'color_surface': [ True, 'Color map loss on surface' ], 'frame_basename': [ 'frame', 'Basename of frame snapshots to be saved' ], 'camera_basename': [ 'camera', 'Basename for exported camera settings' ], 'camera_file': [ '', 'Name of file containing camera information' ], 'print_text': [ '', 'Text to be displayed' ], 'save_frame': [ False, 'Save frame and exit'], 'flatten': [False, 'Flatten into 2D visualization'], 'range': [ [0.0,0.0], 'Value range to consider for color mapping'], 'show_colorbars': [True, 'Display colorbars of used color mappings'], 'background': [ [0.,0.,0.], 'Background color'] } def save_frame(window, verbose=False): global frame_counter global frame_basename # --------------------------------------------------------------- # Save current contents of render window to PNG file # --------------------------------------------------------------- if frame_counter >= 0: file_name = frame_basename + str(frame_counter).zfill(5) + ".png" else: file_name = frame_basename + '.png' image = vtk.vtkWindowToImageFilter() image.SetInput(window) png_writer = vtk.vtkPNGWriter() png_writer.SetInputConnection(image.GetOutputPort()) png_writer.SetFileName(file_name) window.Render() png_writer.Write() frame_counter += 1 if verbose: print(file_name + " has been successfully exported") def key_pressed_callback(inter, event): global camera_basename # --------------------------------------------------------------- # Attach actions to specific keys # --------------------------------------------------------------- key = inter.GetKeySym() window = inter.GetRenderWindow() cam = window.GetRenderers().GetFirstRenderer().GetActiveCamera() if key == "s": save_frame(window=window) elif key == "c": print('about to save camera setting') vtk_camera.save_camera(camera=cam, filename=camera_basename) vtk_camera.print_camera(cam) elif key == "q": if args.verbose: print("User requested exit.") sys.exit() def log_xform(data, args): coords = data.GetPoints().GetData() newcoords = vtk.vtkFloatArray() newcoords.DeepCopy(coords) for i in range(newcoords.GetNumberOfTuples()): p = newcoords.GetTuple3(i) z = math.log(p[2]+args.log_offset) print(f'{p[2]} -> {z}') newcoords.SetTuple3(i, p[0], p[1], z) newpts = vtk.vtkPoints() newpts.SetData(newcoords) data.SetPoints(newpts) return data def flat_xform(data): coords = data.GetPoints().GetData() newcoords = vtk.vtkFloatArray() newcoords.DeepCopy(coords) for i in range(newcoords.GetNumberOfTuples()): p = newcoords.GetTuple3(i) newcoords.SetTuple3(i, p[0], p[1], 0) newpts = vtk.vtkPoints() newpts.SetData(newcoords) data.SetPoints(newpts) return data def shift(data, offset): xform = vtk.vtkTransform() xform.Identity() xform.Translate(0, 0, offset) _shift = vtk.vtkTransformPolyDataFilter() _shift.SetTransform(xform) _shift.SetInputData(data) _shift.Update() shifted = vtk.vtkPolyData() shifted.DeepCopy(_shift.GetOutput()) return shifted def view(args): if args.path is not None and args.path: args.surface = args.path + '/' + args.surface args.trajectory = args.path + '/' + args.trajectory if args.info is not None: args.info = args.path + '/' + args.info global frame_basename frame_basename = args.frame_basename global camera_basename camera_basename = args.camera_basename if not os.path.exists(args.surface): print('{} does not exist. Nothing to visualize!'.format(args.surface)) sys.exit(0) surf_reader = vtk_io_helper.readVTK(args.surface) surf_reader.Update() if True or not args.flatten: geom = vtk.vtkGeometryFilter() geom.SetInputConnection(surf_reader.GetOutputPort()) geom.Update() # 2. create a mapper to its geometry surface = geom.GetOutput() if args.flatten: surface = flat_xform(surface) info = None if args.info is not None: print('importing training information') with open(args.info, 'r') as json_file: info = json.load(json_file) if args.do_log: surface = log_xform(surface, args) is_log = True normals_algo = vtk.vtkPolyDataNormals() normals_algo.SetInputData(surface) normals_algo.Update() surface = normals_algo.GetOutput() surf_mapper = vtk.vtkPolyDataMapper() surf_actor = vtk.vtkActor() surf_mapper.SetInputData(surface) surf_actor.SetMapper(surf_mapper) surf_actor.GetProperty().SetSpecular(0.25) surf_actor.GetProperty().SetDiffuse(0.9) # 3. create a color map for the value range if args.surf_name: value_bounds = surface.GetPointData().GetArray(args.surf_name).GetRange() surface.GetPointData().SetActiveScalars(args.surf_name) else: value_bounds = surface.GetPointData().GetScalars().GetRange() if args.range != [0,0]: value_bounds = args.range cmap = make_colormap(args.surface_palette, value_bounds) if args.color_surface: surf_mapper.ScalarVisibilityOn() surf_mapper.SetLookupTable(cmap) else: surf_mapper.ScalarVisibilityOff() surf_actor.GetProperty().SetColor(1,1,1) renderer = vtk.vtkRenderer() renderer.AddActor(surf_actor) # Create a text actor if len(args.print_text) > 1: txt = vtk.vtkTextActor() if info is not None: loss = info['training']['loss'] loss_as_str = '{:0.8f}'.format(loss) steps = info['training']['steps'][-1] args.print_text += '/' + str(steps) + 'it./loss=' + loss_as_str txt.SetInput(args.print_text) txtprop = txt.GetTextProperty() txtprop.SetFontFamilyToArial(); txtprop.BoldOn(); txtprop.SetFontSize(28); txtprop.SetColor(1,1,1); txt.SetDisplayPosition(20, 30); renderer.AddActor(txt) if (args.color_surface or not args.show_isovalues) and args.show_colorbars: surf_bar_param = vtk_colorbar.colorbar_param(title='Training\nloss', title_font_size=40, title_col=[0,0,0], label_col=[0,0,0], title_offset=10, nlabels=10, font_size=30, width=150, height=600, pos=[0.9, 0.3]) surf_bar = vtk_colorbar.colorbar(ctf=cmap, param=surf_bar_param) renderer.AddActor2D(surf_bar.get()) contour = vtk.vtkContourFilter() contour.SetInputData(surface) # fix set of loss values that covers a wide range of training scenarios vals = [10.0, 5.0, 3.0, 2.5, 2.0, 1.75, 1.5, 1.25, 1.0, 0.9, 0.8, 0.7, 0.6, 0.5, 0.45, 0.4, 0.35, 0.3, 0.25, 0.2, 0.15, 0.14, 0.13, 0.12, 0.11, 0.1, 0.09, 0.08, 0.07, 0.06, 0.05, 0.04, 0.03, 0.02, 0.001, 0.0005, 0.0001, 0.00005] for i, v in enumerate(vals): contour.SetValue(i, v) stripper = vtk.vtkStripper() stripper.SetInputConnection(contour.GetOutputPort()) stripper.Update() if args.ncontours > 0: isolines = stripper.GetOutput() if args.is_log: npts = isolines.GetNumberOfPoints() values = vtk.vtkDoubleArray() values.SetNumberOfTuples(npts) values.SetNumberOfComponents(1) values.SetName('Actual loss values') log_values = isolines.GetPointData().GetScalars() for i in range(npts): logv = log_values.GetTuple1(i) values.SetTuple1(i, math.exp(logv)-args.log_offset) if args.ncontours > 0: isolines.GetPointData().AddArray(values) isolines.GetPointData().SetActiveScalars('Actual loss values') if args.ncontours > 0: isolines = shift(isolines, args.traj_offset) if args.show_isovalues: textprop = vtk.vtkTextProperty() textprop.FrameOff() if not args.color_surface: textprop.SetColor(0,0,0) else: textprop.SetColor(1,1,1) textprop.SetFontSize(args.font_size) textprop.BoldOn() contour_mapper = vtk.vtkLabeledContourMapper() contour_mapper.LabelVisibilityOn() contour_mapper.SetSkipDistance(100) contour_mapper.SetTextProperty(textprop) contour_mapper.SetInputData(isolines) contour_mapper.ScalarVisibilityOff() else: contour_mapper = vtk.vtkPolyDataMapper() contour_mapper.SetInputData(isolines) if False and not args.color_surface: contour_mapper.ScalarVisibilityOn() contour_mapper.SetLookupTable(cmap) else: contour_mapper.ScalarVisibilityOn() contour_actor = vtk.vtkActor() contour_actor.GetProperty().SetLineWidth(args.iso_diameter) contour_actor.SetMapper(contour_mapper) contour_actor.GetProperty().SetColor(1,1,1) contour_actor.AddPosition([0,0,1]) renderer.AddActor(contour_actor) if args.trajectory is not None: curve_reader = vtk.vtkXMLPolyDataReader() curve_reader.SetFileName(args.trajectory) curve_reader.Update() curve = curve_reader.GetOutput() nsteps = curve.GetNumberOfPoints() cmap_traj = make_colormap(args.trajectory_palette, [0, nsteps-1]) if args.flatten: curve = flat_xform(curve) if args.do_log: curve = log_xform(curve, args) curve = shift(curve, args.traj_offset) if False and args.show_colorbars: traj_bar_param = vtk_colorbar.colorbar_param(title='Training steps', title_offset=10, nlabels=10, font_size=18, width=80, height=500, pos=[0.9, 0.1]) traj_bar = vtk_colorbar.colorbar(ctf=cmap_traj, param=traj_bar_param) renderer.AddActor2D(traj_bar.get()) stops = vtk.vtkPolyData() actual_steps = None if args.show_steps: verts = vtk.vtkCellArray() scalars = numpy_to_vtk(np.array([i for i in range(nsteps)])) for i in range(curve.GetNumberOfPoints()): verts.InsertNextCell(1) verts.InsertCellPoint(i) curve.SetVerts(verts) curve.GetPointData().SetScalars(scalars) sphere = vtk.vtkSphereSource() sphere.SetThetaResolution(12) sphere.SetPhiResolution(12) sphere.SetRadius(args.step_diameter) glyphs = vtk.vtkGlyph3D() glyphs.SetSourceConnection(sphere.GetOutputPort()) glyphs.SetInputData(curve) glyphs.ScalingOff() gmapper = vtk.vtkPolyDataMapper() gmapper.SetInputConnection(glyphs.GetOutputPort()) gmapper.ScalarVisibilityOn() gmapper.SetLookupTable(cmap_traj) gactor = vtk.vtkActor() gactor.SetMapper(gmapper) gactor.AddPosition(0,0,args.traj_offset) gactor.GetProperty().SetColor(1,0,0) renderer.AddActor(gactor) elif info is not None: steps = info['training']['steps'] narrays = curve.GetPointData().GetNumberOfArrays() print('there are {} arrays'.format(narrays)) iter = None for i in range(narrays): name = curve.GetPointData().GetArray(i).GetName() if name.lower() == 'iterations' or name.lower() == 'iteration': iter = curve.GetPointData().GetArray(i) if iter is not None: # find recorded steps within list of iterations iter = np.asarray(iter) actual_steps = [(np.abs(iter-s)).argmin() for s in steps] elif curve.GetNumberOfPoints() == len(steps): actual_steps = steps else: print('Missing iteration information in trajectory') if actual_steps is not None: steps_array = vtk.vtkIntArray() steps_array.SetNumberOfTuples(len(steps)) steps_array.SetNumberOfComponents(1) coords = vtk.vtkFloatArray() coords.SetNumberOfComponents(3) coords.SetNumberOfTuples(len(steps)) for i in range(len(steps)): steps_array.SetTuple1(i, steps[i]) p = curve.GetPoints().GetPoint(actual_steps[i]) coords.SetTuple3(i, p[0], p[1], p[2]) steps_array.SetName('Iteration') pts = vtk.vtkPoints() pts.SetData(coords) stops.SetPoints(pts) stops.GetPointData().SetScalars(steps_array) sphere = vtk.vtkSphereSource() sphere.SetThetaResolution(20) sphere.SetPhiResolution(20) sphere.SetRadius(args.step_diameter) glyphs = vtk.vtkGlyph3D() glyphs.SetSourceConnection(sphere.GetOutputPort()) glyphs.SetInputData(stops) glyphs.ScalingOff() gmapper = vtk.vtkPolyDataMapper() gmapper.SetInputConnection(glyphs.GetOutputPort()) gmapper.ScalarVisibilityOn() cmap2 = make_colormap(args.trajectory_palette, [0, steps[-1]]) gmapper.SetLookupTable(cmap2) gactor = vtk.vtkActor() gactor.SetMapper(gmapper) if args.show_colorbars: traj_bar_param = colorbar.colorbar_param(title='Iterations', title_offset=10, nlabels=11, font_size=args.font_size, width=80, height=300, pos=[0.9, 0.1]) traj_bar = colorbar.colorbar(ctf=cmap2, param=traj_bar_param, is_float=False) renderer.AddActor2D(traj_bar.get()) renderer.AddActor(gactor) tube = vtk.vtkTubeFilter() tube.SetInputData(curve) tube.SetRadius(args.traj_diameter/2) tube.SetNumberOfSides(20) traj_mapper = vtk.vtkPolyDataMapper() traj_mapper.SetInputConnection(tube.GetOutputPort()) # curve_mapper.SetInputData(curve) traj_mapper.ScalarVisibilityOff() traj_actor = vtk.vtkActor() traj_actor.SetMapper(traj_mapper) # curve_actor.GetProperty().SetLineWidth(args.traj_diameter) traj_actor.GetProperty().SetColor(args.curve_color) renderer.AddActor(traj_actor) window = vtk.vtkRenderWindow() window.AddRenderer(renderer) window.SetSize(args.size[0], args.size[1]) renderer.SetBackground(args.background) window.StencilCapableOn() # for proper display of log loss values interactor = vtk.vtkRenderWindowInteractor() interactor.SetRenderWindow(window) if args.camera_file: renderer.SetActiveCamera(vtk_camera.load_camera(args.camera_file)) elif args.flatten: renderer.GetActiveCamera().ParallelProjectionOn() vtk_camera.print_camera(renderer.GetActiveCamera()) if args.light_file is not None and len(args.light_file) > 0: newlc = vtk_camera.load_lights(args.light_file) renderer.SetLightCollection(newlc) if not args.save_frame: interactor.AddObserver("KeyPressEvent", key_pressed_callback) interactor.Initialize() window.Render() if args.save_frame: global frame_counter frame_counter = -1 save_frame(window) sys.exit(0) else: interactor.Start() if __name__ == '__main__': parser = argparse.ArgumentParser(description='Visualize loss landscape', formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('-p', '--path', type=str, help='Path to surface information') parser.add_argument('-s', '--surface', type=str, required=True, help='File containing loss surface geometry') parser.add_argument('-t', '--trajectory', type=str, required=True, help='File containing training trajectory') parser.add_argument('--info', type=str, help='File containing training information') parser.add_argument('--light_file', type=str, nargs='+', action='append', help='Name of file(s) containing light information') for item in parameters.keys(): default, help = parameters[item] # print(f'default={default}, help={help}') if isinstance(default, list): a = default[0] parser.add_argument('--' + item, type=type(a), nargs=len(default), default=default, help=help) elif isinstance(default, bool): parser.add_argument('--' + item, action='store_true', help=help) else: parser.add_argument('--' + item, type=type(default), default=default, help=help) parser.add_argument('--is_log', action='store_true', help='Indicate that the dataset has been log converted beforehand') parser.add_argument('--do_log', action='store_true', help='Log convert the dataset before visualizing it') args = parser.parse_args() view(args)
18,377
38.952174
232
py
Fine-tuning-NOs
Fine-tuning-NOs-master/visualization_code/vtk_colors.py
import vtk import argparse from scipy import interpolate import math from matplotlib import pyplot as plt import json import sys import os import numpy as np import random from vtk.util.numpy_support import * ''' Helper functions to create color palettes and color maps ''' # Colorful axis orientation cube def make_cube_axis_actor(dims): colors = vtk.vtkNamedColors() # Annotated Cube setup annotated_cube = vtk.vtkAnnotatedCubeActor() annotated_cube.SetFaceTextScale(0.366667) # Cartesian labeling annotated_cube.SetXPlusFaceText('{}+'.format(dims[0])) annotated_cube.SetXMinusFaceText('{}-'.format(dims[0])) annotated_cube.SetYPlusFaceText('{}+'.format(dims[1])) annotated_cube.SetYMinusFaceText('{}-'.format(dims[1])) annotated_cube.SetZPlusFaceText('{}+'.format(dims[2])) annotated_cube.SetZMinusFaceText('{}-'.format(dims[2])) # Change the vector text colors annotated_cube.GetTextEdgesProperty().SetColor( colors.GetColor3d('Black')) annotated_cube.GetTextEdgesProperty().SetLineWidth(1) annotated_cube.GetXPlusFaceProperty().SetColor( colors.GetColor3d('Green')) annotated_cube.GetXMinusFaceProperty().SetColor( colors.GetColor3d('Green')) annotated_cube.GetYPlusFaceProperty().SetColor( colors.GetColor3d('Red')) annotated_cube.GetYMinusFaceProperty().SetColor( colors.GetColor3d('Red')) annotated_cube.GetZPlusFaceProperty().SetColor( colors.GetColor3d('Yellow')) annotated_cube.GetZMinusFaceProperty().SetColor( colors.GetColor3d('Yellow')) annotated_cube.SetXFaceTextRotation(90) annotated_cube.SetYFaceTextRotation(180) annotated_cube.SetZFaceTextRotation(-90) annotated_cube.GetCubeProperty().SetOpacity(0) #return annotated_cube # Colored faces cube setup cube_source = vtk.vtkCubeSource() cube_source.Update() face_colors = vtk.vtkUnsignedCharArray() face_colors.SetNumberOfComponents(3) # x face_colors.InsertNextTypedTuple(colors.GetColor3ub('Red')) face_colors.InsertNextTypedTuple(colors.GetColor3ub('Red')) # y face_colors.InsertNextTypedTuple(colors.GetColor3ub('Green')) face_colors.InsertNextTypedTuple(colors.GetColor3ub('Green')) # z face_colors.InsertNextTypedTuple(colors.GetColor3ub('Blue')) face_colors.InsertNextTypedTuple(colors.GetColor3ub('Blue')) cube_source.GetOutput().GetCellData().SetScalars(face_colors) cube_source.Update() m = vtk.vtkPolyDataMapper() m.SetInputData(cube_source.GetOutput()) m.Update() a = vtk.vtkActor() a.SetMapper(m) # Assemble the colored cube and annotated cube texts into a composite prop. assembly = vtk.vtkPropAssembly() assembly.AddPart(annotated_cube) assembly.AddPart(a) return assembly def create_vtk_colors(values): unique_vals = np.unique(values) nvals = unique_vals.shape[0] random.seed(a=13081975) unique_colors = np.array([ random.randrange(0,255) for i in range(3*nvals) ]) ids = np.searchsorted(unique_vals, values, side='left') colors = [] for id in ids: c = [ unique_colors[3*id-3], unique_colors[3*id-2], unique_colors[3*id-1] ] colors = np.array([ [ unique_colors[3*id-3], unique_colors[3*id-2], unique_colors[3*id-1] ] for id in ids ]) return numpy_to_vtk(colors, array_type=vtk.VTK_UNSIGNED_CHAR) def import_palette(palette_name='viridis', N=16): try: cmap = plt.get_cmap(palette_name, N) except: print('invalid palette name or number of colors') raise ValueError('invalid palette name or number of colors') lut = cmap(X=range(N)) colors = vtk.vtkColorSeries() colors.ClearColors() colors.SetNumberOfColors(N) # print('import color map with {} colors'.format(N)) for i in range(N): rgba = lut[i] rgb = [int(rgba[0]*255.), int(rgba[1]*255.), int(rgba[2]*255.)] colors.SetColor(i, vtk.vtkColor3ub(rgb)) colors.SetColorSchemeName('{} ({})'.format(palette_name, N)) # print('color map created:\n {}'.format(colors)) return colors def make_colormap(scheme_name, ctrl_pts): colors = vtk.vtkColorSeries() # colors = newcolors m = colors.GetNumberOfColorSchemes() # print(f'There are {m} color schemes') g = colors.SetColorSchemeByName(scheme_name) if g == m: # print('Requested color scheme was not found in VTK list') try: colors = import_palette(scheme_name) except: print('unable to find requested color map: {}'.format(scheme_name)) raise else: print(f'Requested color scheme {scheme_name} has index {g}') n = colors.GetNumberOfColors() # print(f'{n} colors') if len(ctrl_pts) == 2: f = interpolate.interp1d(x=[0, n-1], y=ctrl_pts) ctrl_pts = f(range(n)) elif len(ctrl_pts) != n: raise ValueError('Numbers of colors and control points don\'t match') cmap = vtk.vtkColorTransferFunction() for i in range(n): c = colors.GetColor(i) d=[0,0,0] for j in range(3): # print(c[j]) d[j] = float(c[j])/255. cmap.AddRGBPoint(ctrl_pts[i], d[0], d[1], d[2]) # print(f'{i}: {ctrl_pts[i]} . {c} / {d}') return cmap
5,278
33.503268
112
py
Fine-tuning-NOs
Fine-tuning-NOs-master/visualization_code/projection.py
""" Project a model or multiple models to a plane spaned by given directions. """ import numpy as np import torch import os import copy import h5py import sys import random from projection_helper import sizeof, shapeof sys.path.append('/Users/xmt/code/github/loss-landscape') import net_plotter import h5_util import tqdm from sklearn.decomposition import PCA, TruncatedSVD from scipy.linalg import svd def boolean_query(prompt): asw = input(prompt) asw = asw.lower() if asw=='y' or asw=='yes' or asw=='1' or asw=='t' or asw=='true': return True elif asw=='n' or asw=='no' or asw=='0' or asw=='f' or asw=='false': return False else: print('Warning: unrecognized answer. Assuming no.') return True def tensorlist_to_tensor(weights): """ Concatenate a list of tensors into one tensor. Args: weights: a list of parameter tensors, e.g. net_plotter.get_weights(net). Returns: concatnated 1D tensor """ return torch.cat([w.view(w.numel()) if w.dim() > 1 else torch.FloatTensor(w) for w in weights]) def nplist_to_tensor(nplist): """ Concatenate a list of numpy vectors into one tensor. Args: nplist: a list of numpy vectors, e.g., direction loaded from h5 file. Returns: concatnated 1D tensor """ v = [] for d in nplist: w = torch.tensor(d*np.float64(1.0)) # Ignoreing the scalar values (w.dim() = 0). if w.dim() > 1: v.append(w.view(w.numel())) elif w.dim() == 1: v.append(w) return torch.cat(v) def npvec_to_tensorlist(direction, params): """ Convert a numpy vector to a list of tensors with the same shape as "params". Args: direction: a list of numpy vectors, e.g., a direction loaded from h5 file. base: a list of parameter tensors from net Returns: a list of tensors with the same shape as base """ if isinstance(params, list): w2 = copy.deepcopy(params) idx = 0 n = 0 for w in w2: n = n+w.numel() w.copy_(torch.tensor(direction[idx:idx + w.numel()]).view(w.size())) idx += w.numel() assert(idx == len(direction)) return w2 else: s2 = [] idx = 0 for (k, w) in params.items(): s2.append(torch.Tensor(direction[idx:idx + w.numel()]).view(w.size())) idx += w.numel() assert(idx == len(direction)) return s2 def cal_angle(vec1, vec2): """ Calculate cosine similarities between two torch tensors or two ndarraies Args: vec1, vec2: two tensors or numpy ndarraies """ if isinstance(vec1, torch.Tensor) and isinstance(vec1, torch.Tensor): return torch.dot(vec1, vec2)/(vec1.norm()*vec2.norm()).item() elif isinstance(vec1, np.ndarray) and isinstance(vec2, np.ndarray): return np.ndarray.dot(vec1, vec2)/(np.linalg.norm(vec1)*np.linalg.norm(vec2)) def project_1D(w, d): """ Project vector w to vector d and get the length of the projection. Args: w: vectorized weights d: vectorized direction Returns: the projection scalar """ assert len(w) == len(d), 'dimension does not match for w (' + str(len(w)) + ') and d (' + str(len(d)) + ')' scale = torch.dot(w, d)/d.norm() return scale.item() def lift_1D(x, d): return x*d/d.norm() def project_2D(d, dx, dy, proj_method): """ Project vector d to the plane spanned by dx and dy. Args: d: vectorized weights dx: vectorized direction dy: vectorized direction proj_method: projection method Returns: x, y: the projection coordinates """ if proj_method == 'cos': # when dx and dy are orthorgonal x = project_1D(d, dx) y = project_1D(d, dy) elif proj_method == 'lstsq': # solve the least squre problem: Ax = d A = np.vstack([dx.numpy(), dy.numpy()]).T [x, y] = np.linalg.lstsq(A, d.numpy())[0] return x, y def project_3D(d, dx, dy, dz, proj_method): """ Project vector d to the 3D space spanned by dx, dy, and dz. Args: d: vectorized weights dx: vectorized direction dy: vectorized direction dz: vectorized direction proj_method: projection method Returns: x, y, z: the projection coordinates """ if proj_method == 'cos': # when dx and dy are orthorgonal x = project_1D(d, dx) y = project_1D(d, dy) z = project_1D(d, dz) elif proj_method == 'lstsq': # solve the least squre problem: Ax = d A = np.vstack([dx.numpy(), dy.numpy(), dz.numpy()]).T [x, y, z] = np.linalg.lstsq(A, d.numpy())[0] return x, y, z def project_ND(d, axes, proj_method): """ Project vector d to the space spanned by all axes. Args: d: vectorized weights axes[0, ...]: vectorized direction proj_method: projection method Returns: [x, y, z, ...]: the projection coordinates """ ndim = len(axes) coords = [] if proj_method == 'cos': # when dx and dy are orthorgonal for n, axis in enumerate(axes): coords.append(project_1D(d, axis)) elif proj_method == 'lstsq': # solve the least squre problem: Ax = d A = np.vstack([axis.numpy() for axis in axes]).T coords = np.linalg.lstsq(A, d.numpy())[0] return coords def lift_ND(coords, axes, proj_method): """ Lift coordinates to the embedding space. Args: coords: PCA coordinates axes[0, ...]: basis vectors of PCA space proj_method: projection method Returns: t: vectorized weight difference """ ndim = len(axes) assert (ndim == len(coords)) t = torch.zeros_like(axes[0]) for i, x in enumerate(coords): t += lift_1D(x, axes[i]) return t def load_all_directions(dir_file): """ Load direction(s) from the direction file.""" directions = [] f = h5py.File(dir_file, 'r') lastdim = 0 while True: label = 'direction_{}'.format(lastdim) if label in f.keys(): directions.append(h5_util.read_list(f, label)) lastdim += 1 else: break print(f'directions contain {len(directions)} vectors') return directions def project_trajectory(args, w, s, callback): """ Project the optimization trajectory onto the given two directions. Args: args.dir_file: the h5 file that contains the directions w: weights of the final model s: states of the final model model_name: the name of the model args.steps: the list of available checkpoint indices args.dir_type: the type of the direction, weights or states args.proj_method: cosine projection args.dimension: 2, 3 or higher dimensional plot callback: method to obtain model from step index Returns: proj_file: the projection filename """ proj_file = args.dir_file + '_proj_' + args.proj_method + '.h5' if os.path.exists(proj_file): replace = input('The projection file exists! Replace?') if replace: os.remove(proj_file) else: return proj_file # read directions and convert them to vectors directions = load_all_directions(args.dir_file) axes = [] for d in directions: axes.append(nplist_to_tensor(d)) print(f'directions contains {len(directions)} axes') ndim = len(directions) refw = w w = transform_tensors(w, args.complex) if args.complex == 'imaginary' or args.complex == 'real': debug = False allcoords = [ [] for i in range(ndim) ] other_coords = [ [] for i in range(ndim) ] errors = [] pbar = tqdm.tqdm(args.steps, desc='Projecting learning steps', ncols=100) for step in pbar: net2 = callback(step) if args.dir_type == 'weights': w2 = net_plotter.get_weights(net2) w2 = transform_tensors(w2, args.complex) d = net_plotter.get_diff_weights(w, w2) elif args.dir_type == 'states': s2 = net2.state_dict() d = net_plotter.get_diff_states(s, s2) d = tensorlist_to_tensor(d) coords = project_ND(d, axes, args.proj_method) for i in range(ndim): allcoords[i].append(coords[i]) skip = False if os.path.exists(proj_file): skip = boolean_query(f'{proj_file} exists already. Replace? ') if not skip: os.remove(proj_file) if not skip: f = h5py.File(proj_file, 'w') for i in range(ndim): label = 'proj_{:0>2d}coord'.format(i) f[label] = np.array(allcoords[i]) f.close() return proj_file def real_type(w): if w.dtype is torch.complex64: return torch.float32 elif w.dtype is torch.complex128: return torch.float64 else: return w.dtype def from_values(t, start, length, wref): return t[start:start+length].view(wref.shape).view(real_type(wref)) def untransform_tensors(w, refw, what): weights = [] if what.lower() == 'split': with torch.no_grad(): for wi, refwi in zip(w, refw): nrows = wi.shape[0] realnrows = int(nrows/2) nrows = realnrows if refwi.dtype is torch.float32 or refwi.dtype is torch.float64: # real tensor was padded with as many zeros to signify weights.append(wi[0:nrows].view(refwi.dtype)) elif refwi.dtype is torch.complex64 or refwi.dtype is torch.complex128: # complex tensor was converted to real followed by imaginary values: re = wi[0:nrows] im = wi[nrows:] weights.append(torch.complex(re, im)) else: raise ValueError('Unrecognized data type for this weight: {}'.format(w.dtype)) elif what.lower() == 'real' or what.lower() == 'skip' or what.lower() == 'ignore': with torch.no_grad(): for wi, refwi in zip(w, refw): if refwi.dtype is torch.float32 or refwi.dtype is torch.float64: weights.append(wi) elif refwi.dtype is torch.complex64 or refwi.dtype is torch.complex128: re = wi im = torch.zeros_like(re) weights.append(torch.complex(re, im)) else: raise ValueError('Unrecognized data type for this weight: {}'.format(w.dtype)) elif what.lower() == 'imaginary': at = 0 with torch.no_grad(): for wi, refwi in zip(w, refw): if refwi.dtype is torch.float32 or refwi.dtype is torch.float64: weights.append(torch.zeros_like(refwi)) # real values were discarded elif refwi.dtype is torch.complex64 or refwi.dtype is torch.complex128: im = wi re = torch.zeros_like(im) weights.append(torch.complex(re, im)) else: raise ValueError('Unrecognized data type for this weight: {}'.format(w.dtype)) else: raise ValueError('Unrecognized complex flattening name') return weights def pca_coords_to_weights(coords, axes, refw, what): ''' Transform coordinates in PCA space to weights of model coords: coordinates in PCA space axes: PCA axes (as vectors) refw: weights of the final model used as origin of the reference frame ''' assert(len(coords) == len(axes)) t = torch.zeros_like(axes[0]) # t: lifted version of coords in transformed and flattened space for c,a in zip(coords, axes): t += c*a # w0: list of transformed reference weights w0 = transform_tensors(refw, what) # w: list of weight differences between transformed model and transformed ref model w = npvec_to_tensorlist(t, w0) # w1: list of transformed model weights w1 = [] for wi, w0i in zip(w,w0): w1.append(wi + w0i) # w2: list of untransformed weights w2 = untransform_tensors(w1, refw, what) return w2 def transform_tensor(t, what, verbose=False): if not isinstance(t, torch.Tensor): print(f'WARNING: not a tensor type in transform_tensor ({type(t)})') print(f'size of list: {len(t)}, shape = {shapeof(t)}') assert False return if what.lower() == 'imaginary': if not torch.is_complex(t): return None else: return t.imag elif what.lower() == 'split': if not torch.is_complex(t): return torch.cat((t, torch.zeros_like(t)), dim=0) else: return torch.cat((t.real, t.imag), dim=0) elif what.lower() == 'ignore' or what.lower() == 'real': if not torch.is_complex(t): return t else: return t.real def transform_tensors(t, what, verbose=False): if verbose: print(f'entering transform_tensor: what={what}') if what.lower() == 'keep': if verbose: print('leaving tensor (list) unchanged') return t elif isinstance(t, list): t1 = [] for w in t: w2 = transform_tensor(w, what, verbose) if w2 is not None: if verbose: print('w2 is not None, size={}'.format(w2.numel())) t1.append(w2) return t1 else: return transform_tensor(t, what, verbose) def setup_PCA_directions(args, callback, w, s, verbose=False): """ Find PCA directions for the optimization path from the initial model to the final trained model. Returns: dir_name: the h5 file that stores the directions. """ if verbose: print(f'input tensor w contains {sizeof(w)} values and has shape {shapeof(w)}') actual_dim = np.min([args.dimension, len(args.steps)]) if actual_dim != args.dimension: print(f'WARNING: unable to compute {args.dimension} PCA dimensions. Only {actual_dim} will be computed') args.dimension = actual_dim # Name the .h5 file that stores the PCA directions. folder_name = args.path + '/PCA_' + args.dir_type if args.ignore: folder_name += '_ignore=' + args.ignore folder_name += '_save_epoch=' + str(args.steps[-1]) folder_name += '_complex=' + str(args.complex) folder_name += '_dim=' + str(args.dimension) os.system('mkdir ' + folder_name) dir_name = os.path.join(folder_name, 'directions.h5') if verbose: print(f'PCA directions computed from learning path will be stored in {dir_name}') # skip if the direction file exists if os.path.exists(dir_name): f = h5py.File(dir_name, 'a') if 'explained_variance_' in f.keys(): f.close() return dir_name # load models and prepare the optimization path matrix matrix = [] wsave = w # we will work with the transformed (real) version of the models w = transform_tensors(w, args.complex) pbar = tqdm.tqdm(args.steps, ncols=100, desc='Loading training steps') for step in pbar: pbar.set_description('step #{}'.format(step)) net2 = callback(step) if args.dir_type == 'weights': w2 = net_plotter.get_weights(net2) display = random.random() < 0.1 if verbose: print('transforming tensor {}'.format(shapeof(w2))) # compute real version of the weights w2 = transform_tensors(w2, args.complex) if verbose: print('into tensor {}'.format(shapeof(w2))) d = net_plotter.get_diff_weights(w, w2) elif args.dir_type == 'states': s2 = net2.state_dict() d = net_plotter.get_diff_states(s, s2) if args.ignore == 'biasbn': net_plotter.ignore_biasbn(d) d = tensorlist_to_tensor(d) if verbose: print('converting that tensor into {}'.format(shapeof(d))) if d is not None: matrix.append(d.numpy()) # Perform PCA on the optimization path matrix if verbose: print ("Perform PCA on the models") matrix = np.array(matrix) if verbose: print(matrix.shape) A = torch.from_numpy(matrix) _U, _S, _V = torch.pca_lowrank(A, q=args.dimension, center=True) covar = torch.square(_S)/(len(args.steps)-1) principal_directions = _V.numpy() pcs = [] for i in range(principal_directions.shape[1]): pcs.append(np.array(principal_directions[:,i])) if verbose: print(f'there are {len(pcs)} principal components') # convert vectorized directions to the same shape as models to save in h5 file. if verbose: print(f'type of w is {type(w)}') xi_directions = [] if args.dir_type == 'weights': for pc in pcs: xi_directions.append(npvec_to_tensorlist(pc, w)) elif args.dir_type == 'states': for pc in pcs: xi_directions.append(npvec_to_tensorlist(pc, s)) if args.ignore == 'biasbn': for xd in xi_directions: net_plotter.ignore_biasbn(xd) if verbose: print(f'dir_name={dir_name}') if os.path.exists(dir_name): replace = boolean_query(f'{dir_name} exists already. Replace? ') if replace: os.remove(dir_name) else: return dir_name f = h5py.File(dir_name, 'w') for i, xd in enumerate(xi_directions): label = 'direction_{}'.format(i) h5_util.write_list(f, label, xd) f['singular_values_'] = _S f['covariance_values'] = covar f.close() if verbose: print ('transformed PCA directions saved in: %s' % dir_name) complexdir_name = dir_name[:-4] + '_complex.h5' f = h5py.File(complexdir_name, 'w') for i, xd in enumerate(xi_directions): label = 'direction_{}'.format(i) x = untransform_tensors(xd, wsave, args.complex) if verbose: print(f'after untransformation:\n\tx={shapeof(x)}\n\treference={shapeof(wsave)}') h5_util.write_list(f, label, x) f.close() return dir_name
18,597
31.742958
112
py
Fine-tuning-NOs
Fine-tuning-NOs-master/visualization_code/vtk_misc_helper.py
import vtk import os ''' Misc helper functions ''' def is_algorithm(object): return isinstance(object, vtk.vtkAlgorithm) def is_dataset(object): return isinstance(object, vtk.vtkDataSet) def connect(input, output): if is_algorithm(input) and is_algorithm(output): output.SetInputConnection(input.GetOutputPort()) elif is_dataset(input) and is_algorithm(output): output.SetInputData(input) else: raise TypeError(f'Invalid types {type(input)} / {type(output)} in connect')
519
23.761905
83
py
Fine-tuning-NOs
Fine-tuning-NOs-master/visualization_code/make_landscape.py
import sys import os import argparse import view_surface import combine_files ''' Convenience program that allows to run the entire surface construction and visualization pipeline, tarting with a set of randomly organized loss samples stored in csv files and pre-computed learning trajectory projection and first two principal components ''' if __name__ == '__main__': parser = argparse.ArgumentParser(description='Create visualization of loss surface from a set of csv files containing samples') parser.add_argument('-p', '--path', default='', type=str, help='Path to root of data directory') parser.add_argument('-t', '--trajectory', default='', help='File containing training trajectory') parser.add_argument('-o', '--output', required=True, help='Name of output file for surface and projected trajectory') parser.add_argument('--x', nargs=3, type=float, help='Range of x sampling: xmin xmax xnum') parser.add_argument('--y', nargs=3, type=float, help='Range of y sampling: ymin ymax ynum') parser.add_argument('--trim', action='store_true', help='Trim surface to specified x, y bounds') parser.add_argument('--show_edges', action='store_true', help='Show surface triangulation') parser.add_argument('--flatten', action='store_true', help='Create 2D representation of loss surface with isocontours') parser.add_argument('--skip', action='store_true', help='Skip surface creation if file already present') parser.add_argument('--show_steps', action='store_true', help='Show training steps') parser.add_argument('--traj_diameter', type=float, default=0.5, help='Diameter of trajectory tube representation') parser.add_argument('--step_diameter', type=int, default=None, help='Diameter of spherical step representation') parser.add_argument('--color_surface', action='store_true', help='Apply viridis color mapping to loss surface') parser.add_argument('--range', type=float, nargs=2, default=[0.,0.], help='Value range to consider for color mapping') parser.add_argument('--iso_diameter', type=float, default=3, help='Diameter of isocontours') parser.add_argument('--show_isovalues', action='store_true', help='Show loss values on level sets') parser.add_argument('--font_size', type=int, default=20, help='Font size for isocontour labels') parser.add_argument('--size', type=int, nargs=2, default=[1920, 1080], help='Window resolution') parser.add_argument('--traj_palette', type=str, default='Oranges', help='Palette to use to color code training steps') parser.add_argument('--show_colorbars', action='store_true', help='Display color bars') parser.add_argument('--frame_basename', type=str, default='frame', help='Basename of exported frames') parser.add_argument('--camera_basename', type=str, default='camera', help='Basename of camera settings export files') parser.add_argument('--camera_file', type=str, default='', help='File containing camera setting to use') parser.add_argument('--light_file', type=str, default='', nargs='+', help='File containing light information') parser.add_argument('--surf_name', type=str, help='Name of loss surface') parser.add_argument('--background', type=float, nargs=3, default=[0.,0.,0.], help='Window background color') args = parser.parse_args() print(args) if os.path.exists(args.output) and args.skip: print('{args.output} is alreay created. Skipping.') elif not args.trajectory: print('Missing trajectory information. Unable to proceed') sys.exit(0) else: combine_files.compute(args) args.path = '' args.surface = args.output for p in view_surface.parameters.keys(): if p not in args: print(f'p is {p}') setattr(args, p, view_surface.parameters[p][0]) print(f'{p} was missing and assigned {view_surface.parameters[p][0]} value') setattr(args, 'do_log', False) setattr(args, 'is_log', False) args.info = None base, ext = os.path.splitext(args.output) args.trajectory = base + "_trajectory" + ext view_surface.view(args)
4,117
57.828571
131
py
Fine-tuning-NOs
Fine-tuning-NOs-master/visualization_code/create_surface.py
""" Calculate the loss surface in parallel. Code adapted from Tom Goldstein's implementation of the 2018 NeurIPS paper: Hao Li, Zheng Xu, Gavin Taylor, Christoph Studer and Tom Goldstein. Visualizing the Loss Landscape of Neural Nets. NIPS, 2018. Github: https://github.com/tomgoldstein/loss-landscape Given PCA directions, the code samples the loss associeted with models whose weights lie in the corresponding two-dimensional weight parameterization plane. """ import numpy as np import torch import copy import math import h5py import os import argparse import sys import json import csv # import Tom Goldstein's loss-landscape library from loss_landscape import plot_2D, plot_1D, net_plotter, mpi4pytorch, scheduler, create_trajectory sys.path.append('../') from main import choosing_model, datasetFactory import yaml import utilities from projection import setup_PCA_directions, project_trajectory from projection_helper import get_loader from create_trajectory import evaluate import plot_2D import time import socket import sys import numpy as np import torchvision import torch.nn as nn import tqdm import dataloader import evaluation import projection as proj from projection import shapeof, sizeof import plotter_helper as plotter import plot_2D import plot_1D import model_loader import scheduler import mpi4pytorch as mpi def name_surface_file(args, dir_file): # skip if surf_file is specified in args if args.surf_file: return args.surf_file # use args.dir_file as the prefix surf_file = dir_file # resolution surf_file += '_[%s,%s,%d]' % (str(args.xmin), str(args.xmax), int(args.xnum)) if args.y: surf_file += 'x[%s,%s,%d]' % (str(args.ymin), str(args.ymax), int(args.ynum)) # dataloder parameters if args.raw_data: # without data normalization surf_file += '_rawdata' if args.data_split > 1: surf_file += '_datasplit=' + str(args.data_split) + '_splitidx=' + str(args.split_idx) return surf_file + ".h5" def setup_surface_file(args, surf_file, dir_file): print('-------------------------------------------------------------------') print('setup_surface_file') print('-------------------------------------------------------------------') print('surf_file is {}'.format(surf_file)) print('dir_file is {}'.format(dir_file)) # skip if the direction file already exists if os.path.exists(surf_file): f = h5py.File(surf_file, 'r') if (args.y and 'ycoordinates' in f.keys()) or 'xcoordinates' in f.keys(): f.close() print ("%s is already set up" % surf_file) return f = h5py.File(surf_file, 'a' if os.path.exists(surf_file) else 'w') f['dir_file'] = dir_file # Create the coordinates(resolutions) at which the function is evaluated xcoordinates = np.linspace(args.xmin, args.xmax, num=int(args.xnum)) f['xcoordinates'] = xcoordinates if args.y: ycoordinates = np.linspace(args.ymin, args.ymax, num=int(args.ynum)) f['ycoordinates'] = ycoordinates f.close() return surf_file def to_path(checkpath, config): return os.path.join(checkpath, config['ckpt']['save_dir']) def to_filename(checkpath, config, filename): return os.path.join(to_path(checkpath, config), filename) def crunch(surf_file, net, w, s, d, loss_key, comm, rank, args, samples, loss_func): """ Calculate the loss values of modified models in parallel using MPI. Each individual rank saves its results in a separate csv file that can then be consolidated into a surface geometry using 'combine.py' """ coords = samples[rank] print('Computing %d values for rank %d'% (len(coords), rank)) start_time = time.time() total_sync = 0.0 fname = surf_file + f'_rank={rank}.csv' if os.path.exists(fname): print(f'Creating new filename since {fname} already exists') tstr = time.asctime(time.gmtime(time.time())).replace(' ', '_') fname = surf_file + f'{tstr}_rank={rank}.csv' # Note: the CSV file cannot stay open otherwise changes will only be # recorded upon completion of the loop. Given the odds that a MPI # job is cut short on an HPC architecture, we elect instead to save # each loss value in a csv file as soon as it is computed. with open(fname, 'w') as csvfile: writer = csv.DictWriter(csvfile, fieldnames=['x', 'y', 'loss', 'time']) writer.writeheader() # Loop over all uncalculated loss values pbar = tqdm.tqdm(coords, total=len(coords), ncols=100, desc=f'Sampling loss surface for rank {rank}') for c in pbar: # Load the weights corresponding to those coordinates into the net if args.dir_type == 'weights': plotter.set_weights(net.module if args.ngpu > 1 else net, w, d, c) elif args.dir_type == 'states': plotter.set_states(net.module if args.ngpu > 1 else net, s, d, c) # Record the time to compute the loss value loss_start = time.time() loss, mse = loss_func(net, rank) loss_compute_time = time.time() - loss_start with open(fname, 'a') as csvfile: writer = csv.DictWriter(csvfile, fieldnames=['x', 'y', 'loss', 'time']) writer.writerow({'x': c[0], 'y': c[1], 'loss': loss, 'time': loss_compute_time}) total_time = time.time() - start_time print('Rank %d done! Total time: %.2f' % (rank, total_time) ############################################################### # MAIN ############################################################### if __name__ == '__main__': parser = argparse.ArgumentParser(description='plotting loss surface', formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('--mpi', '-m', action='store_true', help='use mpi') parser.add_argument('--cuda', action='store_true', help='use cuda') parser.add_argument('--threads', default=2, type=int, help='number of threads') parser.add_argument('--ngpu', type=int, default=1, help='number of GPUs to use for each rank, useful for data parallel evaluation') parser.add_argument('--batch_size', default=128, type=int, help='minibatch size') # data parameters # model parameters parser.add_argument('-p', '--path', type=str, required=True, help='Path to checkpoint files') parser.add_argument('-c','--config_file', type=str, required=True, help='Path to the model configuration file') parser.add_argument('--filename', help='Filename of final model') parser.add_argument('--model_name', default='dummy model', help='model name') parser.add_argument('--loss_name', '-l', default='mse', help='loss functions: crossentropy | mse') parser.add_argument('--skip', default=None, type=int, help='Were to resume computation on all ranks') parser.add_argument('--samples', default=None, type=str, help='File containing the explicit list of surface locations to sample') # direction parameters parser.add_argument('--dir_file', required=True, help='specify the name of direction file, or the path to an existing direction file') parser.add_argument('--dir_type', default='weights', help='direction type: weights | states (including BN\'s running_mean/var)') parser.add_argument('--x', type=float, nargs=3, default='-1 1 51', help='xmin xmax xnum') parser.add_argument('--y', type=float, nargs=3, default='-1 1 51', help='ymin ymax ynum') parser.add_argument('--testing', action='store_true', help='Sample testing loss (default: training loss)') parser.add_argument('--xnorm', default='', help='direction normalization: filter | layer | weight') parser.add_argument('--ynorm', default='', help='direction normalization: filter | layer | weight') parser.add_argument('--xignore', default='', help='ignore bias and BN parameters: biasbn') parser.add_argument('--yignore', default='', help='ignore bias and BN parameters: biasbn') parser.add_argument('--surf_file', default='', help='customize the name of surface file, could be an existing file.') # plot parameters parser.add_argument('--proj_file', default='', help='the .h5 file contains projected optimization trajectory.') parser.add_argument('--loss_max', default=5, type=float, help='Maximum value to show in 1D plot') parser.add_argument('--vmax', default=10, type=float, help='Maximum value to map') parser.add_argument('--vmin', default=0.1, type=float, help='Miminum value to map') parser.add_argument('--vlevel', default=0.5, type=float, help='plot contours every vlevel') parser.add_argument('--show', action='store_true', default=False, help='show plotted figures') parser.add_argument('--log', action='store_true', default=False, help='use log scale for loss values') parser.add_argument('--plot', action='store_true', default=False, help='plot figures after computation') args = parser.parse_args() args.raw_data = False args.data_split = 0 # reproducibility is already available by default in data setup # torch.manual_seed(10) #-------------------------------------------------------------------------- # Environment setup #-------------------------------------------------------------------------- torch.set_num_threads(4) if args.mpi: comm = mpi.setup_MPI() rank, nproc = comm.Get_rank(), comm.Get_size() else: comm, rank, nproc = None, 0, 1 # in case of multiple GPUs per node, set the GPU to use for each rank if args.cuda: device = torch.device('cuda') if not torch.cuda.is_available(): raise Exception('User selected cuda option, but cuda is not available on this machine') gpu_count = torch.cuda.device_count() torch.cuda.set_device(rank % gpu_count) print('Rank %d use GPU %d of %d GPUs on %s' % (rank, torch.cuda.current_device(), gpu_count, socket.gethostname())) else: device = torch.device('cpu') #-------------------------------------------------------------------------- # Check plotting resolution #-------------------------------------------------------------------------- try: if args.x is not None and args.y is not None: args.xmin, args.xmax, args.xnum = args.x args.xnum = int(args.xnum) args.ymin, args.ymax, args.ynum = args.y # (None, None, None) args.ynum = int(args.ynum) print(f'Surface sampling bounds: [{args.xmin}, {args.xmax}] x [{args.ymin}, {args.ymax}]') print(f'Sampling density: {args.xnum} x {args.ynum} = {args.xnum *args.ynum} samples') except: raise Exception('Improper format for x- or y-coordinates. Try something like -1 1 51') #-1:1:51') #-------------------------------------------------------------------------- # Load models and extract parameters #-------------------------------------------------------------------------- data = None if args.testing: loss_label = 'test_loss' else: loss_label = 'train_loss' config_file = args.config_file with open(config_file, 'r') as stream: config = yaml.load(stream, yaml.FullLoader) c_save = config["ckpt"] args.path = os.path.join(args.path, config['ckpt']['save_dir']) model = choosing_model(config) if args.testing: dataloader = get_loader(config, train=False, prefix='../') else: dataloader, _ = get_loader(config, train=True, prefix='../') myloss = utilities.LpLoss(size_average=False) #-------------------------------------------------------------------------- # load the final model #-------------------------------------------------------------------------- final_model = os.path.join(args.path, args.filename) checkpoint = torch.load(final_model, map_location=lambda storage, loc: storage) model.load_state_dict(checkpoint['state_dict']) if args.cuda: model.cuda() model.eval() w = plotter.get_weights(model) # initial parameters s = copy.deepcopy(model.state_dict()) # deepcopy since state_dict are references if args.cuda and args.ngpu > 1: # data parallel with multiple GPUs on a single node model = nn.DataParallel(model, device_ids=range(torch.cuda.device_count())) if args.samples is not None: print('Importing sampling locations from file') with open(args.samples, 'r') as fp: coords = json.load(fp) #print(f'{len(coords)} coordinates will be sampled') #print(f'type(coords) is {type(coords)}') #print(f'type(coords[0]) is {type(coords[0])}') #print(f'type(coords[0][0]) is {type(coords[0][0])}') elif args.x is not None and args.y is not None: coords = [ (x, y) for x in np.linspace(args.xmin, args.xmax, args.xnum) for y in np.linspace(args.ymin, args.ymax, args.ynum)] #print(f'type(coords) is {type(coords)}') #print(f'type(coords[0]) is {type(coords[0])}') #print(f'type(coords[0][0]) is {type(coords[0][0])}') else: raise ValueError('Missing information to determine sampling locations') n_per_rank = len(coords) // nproc rem = len(coords) - n_per_rank*nproc n_per_rank_0 = n_per_rank + rem print(f'each rank will sample {n_per_rank} positions') print('Assigning samples to ranks') counter = 0 samples = [ [] for i in range(nproc) ] samples[0] = coords[0:n_per_rank_0] for r in range(1,nproc): samples[r] = coords[n_per_rank_0 + (r-1)*n_per_rank : n_per_rank_0 + r*n_per_rank] print('done') if args.samples is None and args.skip is not None: for r in range(0, nproc): samples[r] = samples[r][args.skip:] # for i in range(n_per_rank+n_per_rank_extra): # samples[0].append(coords[i]) # counter += 1 # for r in range(1, nproc): # for i in range(n_per_rank): # samples[r].append(coords[counter]) # counter += 1 #-------------------------------------------------------------------------- # Setup the direction file and the surface file #-------------------------------------------------------------------------- dir_file = os.path.join(args.path, args.dir_file) if not args.surf_file: args.surf_file = f'{args.dir_file}_surface_[{args.xmin}-{args.xmax}]x[{args.ymin}-{args.ymax}]_{args.xnum}x{args.ynum}.h5' surf_file = os.path.join(args.path, args.surf_file) #if rank == 0: # setup_surface_file(args, surf_file, dir_file) # wait until master has setup the direction file and surface file #mpi.barrier(comm) # load directions directions = plotter.load_directions(dir_file) print(f'type(directions) is {type(directions)}') print(f'type(directions[0]) is {type(directions[0])}') print(f'type(directions[0][0]) is {type(directions[0][0])}') # calculate the cosine similarity of the two directions if False and len(directions) == 2 and rank == 0: similarity = proj.cal_angle(proj.nplist_to_tensor(directions[0]), proj.nplist_to_tensor(directions[1])) print('cosine similarity between x-axis and y-axis: %f' % similarity) class loss_callback: def __init__(self, dataset, loss, train, cuda): self.dataset = dataset self.loss = loss self.train = train self.cuda = cuda def __call__(self, model, rank=0, verbose=False): return evaluate(model, self.dataset, self.loss, train=self.train, verbose=verbose, cuda=self.cuda, rank=rank) #-------------------------------------------------------------------------- # Start the computation #-------------------------------------------------------------------------- crunch(surf_file, model, w, s, directions, loss_label, comm, rank, args, samples, loss_func=loss_callback(dataloader, myloss, not args.testing, args.cuda)) #-------------------------------------------------------------------------- # Plot figures #-------------------------------------------------------------------------- if args.plot and rank == 0: if args.y and args.proj_file: plot_2D.plot_contour_trajectory(surf_file, dir_file, args.proj_file, loss_label, vmin=args.vmin, vmax=args.vmax, vlevel=args.vlevel, show=args.show) elif args.y: plot_2D.plot_2d_contour(surf_file, loss_label, args.vmin, args.vmax, args.vlevel, args.show) else: plot_1D.plot_1d_loss_err(surf_file, args.xmin, args.xmax, args.loss_max, args.log, args.show)
16,827
42.25964
159
py
Fine-tuning-NOs
Fine-tuning-NOs-master/visualization_code/plot_curves.py
from mpl_toolkits.mplot3d import Axes3D from mpl_toolkits import mplot3d from matplotlib import pyplot as plt from matplotlib import cm import h5py import argparse import numpy as np from os.path import exists import seaborn as sns import yaml import os from scatterplotmatrix import scatterplot_matrix as splom def plot_trajectories(traj_files, image_name='somecurves.png', show=False): """ Plot optimization trajectory on the plane spanned by given directions.""" curves = [] print(f'There are {len(traj_files)} filenames in input') # all3d = True for traj in traj_files: fname = traj['filename'] label = traj['label'] complex = traj['complex_handling'] print(f'filename = {fname}') assert exists(fname), f'Projection file {fname} does not exist.' f = h5py.File(fname, 'r') allcoords = [] last_dim = 0 print(f'f.keys are {f.keys()}') while True: name = 'proj_{:0>2d}coord'.format(last_dim) if name in f.keys(): allcoords.append(list(f[name])) last_dim += 1 else: break # color if fname.find('sFNO_eps') != -1: color = 'gold' elif fname.find('sFNO') != -1: color = 'blue' else: color = 'red' # marker and linewidth and dashes linewidth = 1 dashes = (1,0) if fname.find('debug') != -1 or label.find('debug') != -1: marker = 'd' linewidth = 3 dashes = (2,1) elif fname.find('split') != -1: marker = '.' elif fname.find('real') != -1: marker = 's' else: marker = '^' if complex != 'split': continue curves.append({ 'coords': allcoords, 'label': label, 'complex': complex, 'color': color, 'marker': marker, 'linewidth': linewidth, 'dashes': dashes }) f.close() print(f'lastdim={last_dim}') print(f'There are {len(curves)} curves afterwards') ndim = len(curves[0]['coords']) fig = None if ndim == 3: fig = plt.figure() ax = plt.axes(projection='3d') for i, c in enumerate(curves): color = c['color'] coords = c['coords'] marker = c['marker'] ax.plot3D(coords[0], coords[1], coords[2], color=color, label=c['label'] + '/' + c['complex'], marker=marker) # ax.scatter(c['x'], c['y'], c['z'], color='black') ax.legend(bbox_to_anchor=(0.95, 1), loc='upper left', borderaxespad=0) ax.set_xlabel('X') ax.set_ylabel('Y') ax.set_zlabel('Z') elif ndim == 2: fig = plt.figure(figsize=(12, 6)) for i, c in enumerate(curves): color = c['color'] coords = c['coords'] marker = c['marker'] plt.plot(coords[0], coords[1], marker=marker, color=color, label=c['label'] + '/' + c['complex'], linewidth=c['linewidth'], dashes=c['dashes']) plt.xlabel('X') plt.ylabel('Y') plt.legend(loc='upper left', bbox_to_anchor=(0.95, 1), borderaxespad=0) elif ndim > 3: print('plotting splom') data = [ c['coords'] for c in curves ] names = [ 'PCA dim {}'.format(i) for i in range(len(data[0])) ] colors = [ c['color'] for c in curves ] markers = [ c['marker'] for c in curves] labels = [ c['label'] + '/' + c['complex'] for c in curves ] fig = splom(data, names=names, labels=labels, colors=colors, markers=markers) fig.savefig('somecurves.png', dpi=300, bbox_inches='tight', format='png') plt.show() def to_path(checkpath, config): return os.path.join(checkpath, config['ckpt']['save_dir']) if __name__ == '__main__': parser = argparse.ArgumentParser(description='Plot learning trajectories') parser.add_argument('-c','--config_file', type=str, required=True, nargs='+', action='append', help='Path to the model configuration file') parser.add_argument('-p', '--path', type=str, required=True, help='Path to checkpoint files') parser.add_argument('-d', '--dimension', type=int, default=2, help='Number of PCA dimensions') parser.add_argument('--show', action='store_true', default=True, help='show plots') args = parser.parse_args() print(args.config_file) traj_files = [] for config_file in args.config_file: config_file = config_file[0] print(f'config_file is {config_file}') with open(config_file, 'r') as stream: config = yaml.load(stream, yaml.FullLoader) for complex in ['split', 'real', 'imaginary']: path = os.path.join(to_path(args.path, config), 'PCA_weights_save_epoch=99_complex='+ complex + '_dim=' + str(args.dimension)) fname = os.path.join(path, 'directions.h5_proj_cos.h5') fname_debug = fname + '-debug.h5' traj_files.append({ 'label': config['Project']['name']+config['Project']['experiment'], 'filename': fname, 'complex_handling': complex }) if exists(fname_debug): traj_files.append({ 'label': 'debug-'+ config['Project']['name']+config['Project']['experiment'], 'filename': fname, 'complex_handling': complex }) plot_trajectories(traj_files, args.show)
5,387
37.76259
163
py
Fine-tuning-NOs
Fine-tuning-NOs-master/visualization_code/projection_helper.py
import torch import h5py import sys import os sys.path.append('../') import utilities def sizeof(t): n = 0 if isinstance(t, list): for w in t: n += w.numel() elif isinstance(t, torch.Tensor): n = t.numel() elif isinstance(t, h5py.Dataset): n = t.size else: assert False print(f'Unrecognized object of type {type(t)}') return n def shapeof(t): sh = [] if isinstance(t, list): for w in t: sh.append(shapeof(w)) else: sh.append([t.shape, sizeof(t), t.dtype]) return sh def get_loader(config, train=False, prefix=''): if train: what = 'train' else: what = 'test' c_data =config["data"] if prefix: path = os.path.join(prefix, c_data['PATH']) else: path = c_data['PATH'] gl = utilities.GettingLists(data_for_training=c_data["n_sample"], wave_eq = c_data["PDE_type"], data_base = c_data["process"], PATH = path) return utilities.MyLoader(GL=gl, do = what, config=config)
1,144
23.361702
69
py
Fine-tuning-NOs
Fine-tuning-NOs-master/visualization_code/vtk_io_helper.py
import sys import os import vtk from vtk_misc_helper import connect ''' Helper functions to import and export various VTK data formats ''' def __read(reader_type, filename): reader = reader_type() reader.SetFileName(filename) return reader def __write(writer_type, input, filename): writer = writer_type() writer.SetFileName(filename) connect(input, writer) writer.Write() def replace_extension(filename, newext): return os.path.splitext(filename)[0] + newext def readVTK(filename): ext = os.path.splitext(filename)[1].lower() if ext == '.vtk': return __read(vtk.vtkDataSetReader, filename) elif ext == '.vti': return __read(vtk.vtkXMLImageDataReader, filename) elif ext == '.vtu': return __read(vtk.vtkXMLUnstructuredGridReader, filename) elif ext == '.vtp': return __read(vtk.vtkXMLPolyDataReader, filename) elif ext == '.vtr': return __read(vtk.vtkXMLRectilinearGridReader, filename) else: raise TypeError(f'Unrecognized VTK file extension {ext}') def saveVTK(dataset, filename): ext = os.path.splitext(filename)[1].lower() if ext == '.vtk': return __write(vtk.vtkDataSetWriter, dataset, filename) elif ext == '.vti': return __write(vtk.vtkXMLImageDataWriter, dataset, filename) elif ext == '.vtu': return __write(vtk.vtkXMLUnstructuredGridWriter, dataset, filename) elif ext == '.vtp': return __write(vtk.vtkXMLPolyDataWriter, dataset, filename) elif ext == '.vts': return __write(vtk.vtkXMLStructuredGridWriter, dataset, filename) elif ext == '.vtr': return __write(vtk.vtkXMLRectilinearGridWriter, dataset, filename) else: raise ValueError(f'Unrecognized VTK file extension: {ext}') def saveVTK_XML(dataset, filename): if isinstance(dataset, vtk.vtkImageData): filename = replace_extension(filename, '.vti') elif isinstance(dataset, vtk.vtkUnstructuredGrid): filename = replace_extension(filename, '.vtu') elif isinstance(dataset, vtk.vtkPolyData): filename = replace_extension(filename, '.vtp') elif isinstance(dataset, vtk.vtkRectilinearGrid): filename = replace_extension(filename, '.vtr') elif isinstance(dataset, vtk.vtkStructuredGrid): filename = replace_extension(filename, '.vts') else: filename = replace_extension(filename, '.vtk') print('WARNING: Unrecognized VTK dataset type. Using Legacy format') print(f'filename is {filename}') saveVTK(dataset, filename)
2,569
34.205479
76
py
Fine-tuning-NOs
Fine-tuning-NOs-master/visualization_code/vtk_colorbar.py
import vtk ''' Helper functions for the creation of colorbar actors in VTK ''' class colorbar_param: def __init__(self, title='No title', title_col=[1,1,1], title_font_size=22, label_col=[1,1,1], pos=[0.9, 0.5], width=80, height=400, nlabels=4, font_size=18, title_offset=10): self.title=title self.title_col=title_col self.label_col=label_col self.pos=pos self.width=width self.height=height self.nlabels=nlabels self.font_size=font_size self.title_offset=title_offset self.title_font_size=title_font_size class colorbar: def __init__(self, ctf, param, is_float=True): # Create a color bar self.scalar_bar = vtk.vtkScalarBarActor() # size and relative position self.scalar_bar.SetLookupTable(ctf) self.scalar_bar.SetPosition(param.pos[0], param.pos[1]) self.scalar_bar.SetMaximumWidthInPixels(param.width) self.scalar_bar.SetMaximumHeightInPixels(param.height) # title properties self.scalar_bar.SetTitle(param.title) self.scalar_bar.GetTitleTextProperty().SetColor(param.title_col[0], param.title_col[1], param.title_col[2]) self.scalar_bar.SetVerticalTitleSeparation(param.title_offset) self.scalar_bar.GetTitleTextProperty().ShadowOff() self.scalar_bar.GetTitleTextProperty().SetFontSize(param.title_font_size) self.scalar_bar.GetTitleTextProperty().BoldOn() self.scalar_bar.GetLabelTextProperty().SetFontSize(param.font_size) self.scalar_bar.GetLabelTextProperty().BoldOn() self.scalar_bar.UnconstrainedFontSizeOn() # label properties self.scalar_bar.SetNumberOfLabels(param.nlabels) self.scalar_bar.SetTextPad(8) self.scalar_bar.DrawTickLabelsOn() if is_float: format='%0.2f' else: format='%0.0f' self.scalar_bar.SetLabelFormat(format) self.scalar_bar.GetLabelTextProperty().SetColor(param.label_col[0], param.label_col[1], param.label_col[2]) self.scalar_bar.GetLabelTextProperty().SetFontSize(param.font_size) self.scalar_bar.GetLabelTextProperty().BoldOff() self.scalar_bar.GetLabelTextProperty().ShadowOff() def get(self): return self.scalar_bar
2,409
42.818182
179
py
Fine-tuning-NOs
Fine-tuning-NOs-master/visualization_code/vtk_camera.py
import vtk import json import os import time import math import numpy as np ''' Helper functions to import/export and print out camera and light settings ''' def make_2d_camera(dataset, window): xmin, xmax, ymin, ymax, zmin, zmax = dataset.GetBounds() camera = vtk.vtkCamera() center = [(xmin+xmax)/2., (ymin+ymax)/2.] print(f'center={center}') camera.SetFocalPoint(center[0], center[1], zmax) alpha = camera.GetViewAngle()/180.*np.pi print(f'alpha in radians is {alpha}') window_width = window.GetSize()[0] print(f'window width = {window_width}') camera.SetPosition(center[0], center[1], window_width/alpha) print(f'camera position set to {[center[0], center[1], window_width/alpha]}') return camera # for fully reproducible results, the window size is needed def save_camera(camera=None, renderer=None, filename='camera.json'): if camera is None: if renderer is not None: camera = renderer.GetActiveCamera() else: raise ValueError('Missing camera input') pos = camera.GetPosition() foc = camera.GetFocalPoint() up = camera.GetViewUp() clip = camera.GetClippingRange() angle = camera.GetViewAngle() cam = { 'position': pos, 'focal_point': foc, 'view_up': up, 'clipping_range': clip, 'angle': angle} if os.path.exists(filename): t = time.asctime(time.gmtime(time.time())).replace(' ', '_') basename, ext = os.path.splitext(filename) if not ext: ext = '.json' filename = f'{basename}_{t}{ext}' with open(filename, 'w') as output: json.dump(cam, output) print(f'saved camera in {filename}') def load_camera(filename='camera.json'): with open(filename, 'r') as json_file: cam = json.load(json_file) camera = vtk.vtkCamera() camera.SetPosition(cam['position']) camera.SetFocalPoint(cam['focal_point']) camera.SetViewUp(cam['view_up']) camera.SetClippingRange(cam['clipping_range']) if 'angle' in cam.keys(): camera.SetViewAngle(cam['angle']) return camera def save_light(light=None, renderer=None, filename='light.json'): if light is None and renderer is None: raise ValueError('No light information provided') elif light is None: lc = renderer.GetLights() it = lc.NewIterator() if not it.IsDoneWithTraversal(): light = it.GetNextItem() pos = light.GetPosition() foc = light.GetFocalPoint() angle = light.GetConeAngle() cola = light.GetAmbientColor() cold = light.GetDiffuseColor() cols = light.GetSpecularColor() intens = light.GetIntensity() lightdic = { 'position': pos, 'focal_point': foc, 'angle': angle, 'ambient_color': cola, 'diffuse_color': cold, 'specular_color': cols, 'intensity': intens } if os.path.exists(filename): t = time.asctime(time.gmtime(time.time())).replace(' ', '_') basename, ext = os.path.splitext(filename) if not ext: ext = '.json' filename = f'{basename}_{t}{ext}' with open(filename, 'w') as output: json.dump(lightdic, output) print(f'saved light in {filename}') def load_one_light(filename): with open(filename, 'r') as json_file: light_data = json.load(json_file) light = vtk.vtkLight() light.SetPosition(light_data['position']) light.SetFocalPoint(light_data['focal_point']) light.SetConeAngle(light_data['angle']) light.SetAmbientColor(light_data['ambient_color']) light.SetDiffuseColor(light_data['diffuse_color']) light.SetSpecularColor(light_data['specular_color']) light.SetIntensity(light_data['intensity']) light.PositionalOn() return light def load_lights(filename='light.json'): collection = vtk.vtkLightCollection() if isinstance(filename, list): for name in filename: collection.AddItem(load_one_light(name)) else: collection.AddItem(load_one_light(filename)) return collection def print_camera(camera=None, renderer=None): if camera is None: if renderer is not None: camera = renderer.GetActiveCamera() else: raise ValueError('Missing camera input') # --------------------------------------------------------------- # Print out the current settings of the camera # --------------------------------------------------------------- print('Camera settings:') print(f' * position: {camera.GetPosition()}') print(f' * focal point: {camera.GetFocalPoint()}') print(f' * up vector: {camera.GetViewUp()}') print(f' * clipping range: {camera.GetClippingRange()}') print(f' * view angle: {camera.GetViewAngle()}')
4,823
35.545455
103
py
Fine-tuning-NOs
Fine-tuning-NOs-master/visualization_code/create_trajectory.py
import numpy as np import torch import copy import math import h5py import os import argparse import sys import json import tqdm ''' Code adapted from Tom Goldstein's implementation of the 2018 NeurIPS paper: Hao Li, Zheng Xu, Gavin Taylor, Christoph Studer and Tom Goldstein. Visualizing the Loss Landscape of Neural Nets. NIPS, 2018. Github: https://github.com/tomgoldstein/loss-landscape Given a series of models corresponding to learning steps, compute the PCA of the models' weights, treated as giant parameter vectors. The first few principal components, along with the final model, can be used to create a 2D reference frame upon which the steps can be projected. ''' sys.path.append('../') from main import choosing_model, datasetFactory import yaml import utilities from loss_landscape import net_plotter, plot_2D from projection import setup_PCA_directions, project_trajectory from scatterplotmatrix import scatterplot_matrix as splom def evaluate(model, dataloader, loss, train=False, cuda=False, verbose=False): the_loss = 0. if train: lossname = 'training' else: lossname = 'testing' with torch.no_grad(): pbar = tqdm.tqdm(dataloader, ncols=100, desc=f'Computing {lossname} loss') for x, y in pbar: batch_size, s= x.shape[0:2] if cuda: x, y = x.cuda(), y.cuda() out = model(x).reshape(batch_size, s, s) loss_test = loss(out.view(batch_size,-1), y.view(batch_size,-1)) the_loss += loss_test.item() the_loss = the_loss / len(dataloader.dataset) if verbose: print(f'loss = {the_loss}') return the_loss, 0 def to_path(checkpath, config): return os.path.join(checkpath, config['ckpt']['save_dir']) def step_to_filename(checkpath, config, basename, step): return os.path.join(to_path(checkpath, config), basename.format(step)) if __name__ == '__main__': parser = argparse.ArgumentParser(description='Plot optimization trajectory', formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('-c','--config_file', type=str, required=True, help='Path to the model configuration file') parser.add_argument('-p', '--path', type=str, required=True, help='Path to checkpoint files') parser.add_argument('--dir_type', default='weights', help="""direction type: weights (all weights except bias and BN paras) | states (include BN.running_mean/var)""") parser.add_argument('--ignore', action='store_true', help='ignore bias and BN paras: biasbn (no bias or bn)') parser.add_argument('--complex', type=str, default='split', help='Method to handle imaginary part of complex weights (split/both, ignore/real, keep/same, imaginary)') parser.add_argument('--filename', help='Regex filename for checkpoint modes_list') parser.add_argument('--steps', type=int, nargs='+', help='list of all available step ids') parser.add_argument('--dir_file', help='load/save the direction file for projection') parser.add_argument('--proj_method', type=str, default='cos', help='Projection method onto PCA coordinates') parser.add_argument('--dimension', type=int, default=2, help='Spatial dimensions in which to draw curve') parser.add_argument('--debug', action='store_true', help='Run verification code for PCA projection forward and backward') parser.add_argument('--verbose', action='store_true', help='Select verbose output') args = parser.parse_args() config_file = args.config_file with open(config_file, 'r') as stream: config = yaml.load(stream, yaml.FullLoader) c_save = config["ckpt"] model = choosing_model(config) test_dataloader = datasetFactory(config, train=False, prefix='../') myloss = utilities.LpLoss(size_average=False) #-------------------------------------------------------------------------- # load the final model #-------------------------------------------------------------------------- last_id = args.steps[-1] final_model = step_to_filename(args.path, config, args.filename, last_id) checkpoint = torch.load(final_model, map_location=lambda storage, loc: storage) model.load_state_dict(checkpoint['state_dict']) model.eval() w = net_plotter.get_weights(model) s = model.state_dict() #-------------------------------------------------------------------------- # collect models to be projected #-------------------------------------------------------------------------- model_files = {} for epoch in args.steps: model_file = step_to_filename(args.path, config, args.filename, epoch) if not os.path.exists(model_file): print('model %s does not exist' % model_file) exit(-1) else: model_files[epoch] = model_file def callback(step): name = model_files[step] model2 = choosing_model(config) checkpoint = torch.load(name, map_location=lambda storage, loc: storage) model2.load_state_dict(checkpoint['state_dict']) # model2.cuda() model2.eval() return model2 #-------------------------------------------------------------------------- # load or create projection directions #-------------------------------------------------------------------------- args.path = to_path(args.path, config) if not args.dir_file: print('computing PCA directions for {} models'.format(len(args.steps))) args.dir_file = setup_PCA_directions(args, callback, w, s, verbose=args.verbose) print(f'dir_file={args.dir_file}') #-------------------------------------------------------------------------- # projection trajectory to given directions #-------------------------------------------------------------------------- proj_file = project_trajectory(args, w, s, callback)
5,937
41.113475
170
py
Fine-tuning-NOs
Fine-tuning-NOs-master/visualization_code/._combine_files.py
Mac OS X  2~ATTRcom.apple.lastuseddate#PS9Wcz:
169
169
169
py
Fine-tuning-NOs
Fine-tuning-NOs-master/OOD/making_graphs_for_paper.py
import numpy as np import os import argparse import matplotlib.pyplot as plt from matplotlib.colors import LogNorm def rel_l2( ref, approx): diff = np.abs((approx-ref).view(np.csingle)).reshape(-1, ref.shape[1], ref.shape[2]) den = np.linalg.norm(ref.view(np.csingle), ord=2, axis=(1,2)).reshape(-1,1,1) return diff/den def loading_data_OOD_arch(args): PATH = args.PATH data_files = dict() print(PATH) #ground truth data_files['pressure_hdg'] = np.load(os.path.join(PATH,'pressure_set_{:02d}_freq15.npy'.format(args.ood_sample)))[:args.n_sample] data_files['wavespeed'] = np.load(os.path.join(PATH,'wavespeed_set_{:02d}_freq15.npy'.format(args.ood_sample)))[:args.n_sample] #loading the data fno data_files['fno_approx'] = np.load(os.path.join(PATH,'pressure_FNO_set_{:02d}_freq15.npy'.format(args.ood_sample)))[:args.n_sample] data_files['error_fno_approx'] = rel_l2(data_files['pressure_hdg'], data_files['fno_approx']) #loading the data sFNO data_files['sfno_approx'] = np.load(os.path.join(PATH,'pressure_sFNO_set_{:02d}_freq15.npy'.format(args.ood_sample)))[:args.n_sample] data_files['error_sfno_approx'] = rel_l2(data_files['pressure_hdg'], data_files['sfno_approx']) #loading the data sFNO_eps v1 data_files['sfno_eps1_approx'] = np.load(os.path.join(PATH,'pressure_sFNO_epsilon_v1_set_{:02d}_freq15.npy'.format(args.ood_sample)))[:args.n_sample] data_files['error_sfno_eps1_approx'] = rel_l2(data_files['pressure_hdg'], data_files['sfno_eps1_approx']) #loading the data sFNO data_files['sfno_eps2_approx'] = np.load(os.path.join(PATH,'pressure_sFNO_epsilon_v1_long_set_{:02d}_freq15.npy'.format(args.ood_sample)))[:args.n_sample] data_files['error_sfno_eps2_approx'] = rel_l2(data_files['pressure_hdg'], data_files['sfno_eps2_approx']) #loading the data sFNO data_files['residual_fno'] = np.load(os.path.join(PATH,'pressure_FNO_residual_set_{:02d}_freq15.npy'.format(args.ood_sample)))[:args.n_sample] data_files['error_residual_fno'] = rel_l2(data_files['pressure_hdg'], data_files['residual_fno']) return data_files def plotting_data_OOD_arch(args, data_files): PATH = os.path.join(args.PATH, 'graphs') if not os.path.exists(PATH): os.makedirs(PATH) for name in data_files.keys(): if name =="wavespeed": v_min = 1.5 v_max = 5 cmap = 'jet' else: v_min = -0.1 v_max = 0.1 cmap = 'seismic' for i in range(len(data_files[name])): print(name) data = data_files[name][i,...] if name == 'wavespeed': fig =plt.imshow(data, vmin=v_min, vmax=v_max, cmap=cmap, aspect='equal') #eliminate space between the image and the axis plt.axis('off') fig.axes.get_xaxis().set_visible(False) fig.axes.get_yaxis().set_visible(False) if args.save_graph: plt.savefig(os.path.join(PATH, f'cp_{i}_{name}.png'),bbox_inches='tight', pad_inches=0) plt.close() else: plt.show() #if the name contains error in any part, we plot the error elif name.find('error') != -1: #make log scale in imshow #norm = matplotlib.colors.LogNorm(vmin=1e-4, vmax=1e-1) fig =plt.imshow(data, cmap='jet', aspect='equal', norm=LogNorm(vmin=1e-4, vmax=1e-1)) #eliminate space between the image and the axis plt.axis('off') fig.axes.get_xaxis().set_visible(False) fig.axes.get_yaxis().set_visible(False) #make log scale if args.save_graph: plt.savefig(os.path.join(PATH, f'cp_{i}_{name}.png'),bbox_inches='tight', pad_inches=0) plt.close() else: plt.show() else: data_real= data[:,:,0] fig =plt.imshow(data_real, vmin=v_min, vmax=v_max, cmap=cmap, aspect='equal') #eliminate space between the image and the axis plt.axis('off') fig.axes.get_xaxis().set_visible(False) fig.axes.get_yaxis().set_visible(False) if args.save_graph: plt.savefig(os.path.join(PATH, f'cp_{i}_{name}_real.png'),bbox_inches='tight', pad_inches=0) plt.close() else: plt.show() data_imag= data[:,:,1] fig =plt.imshow(data_imag, vmin=v_min, vmax=v_max, cmap=cmap, aspect='equal') #eliminate space between the image and the axis plt.axis('off') fig.axes.get_xaxis().set_visible(False) fig.axes.get_yaxis().set_visible(False) if args.save_graph: plt.savefig(os.path.join(PATH, f'cp_{i}_{name}_imag.png'), bbox_inches='tight', pad_inches=0) plt.close() else: plt.show() if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument('-ood','--ood_sample', type=int, help='out of distribution set', default=5) parser.add_argument('-sg','--save-graph', type=bool, help='Saving Image', default=True) parser.add_argument('-rk','--realization_k', type=int, help='realization number', default=0) parser.add_argument('-n','--n_sample', type=int, help='number of sample', default=6) parser.add_argument('-f','--freq', type=int, help='frequency of the OOD', default=15) args = parser.parse_args() # getting the name of the dataset dir_skeleton = 'set_{:02d}'.format(args.ood_sample)+f'_freq{args.freq}' print(f"dir skel {dir_skeleton}") PATH = os.path.join('OOD', dir_skeleton, f'realization_{args.realization_k}') args.PATH = PATH #getting the data data_files = loading_data_OOD_arch(args) #plotting the data plotting_data_OOD_arch(args, data_files)
6,411
46.496296
158
py
fairness-indicators
fairness-indicators-master/setup.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Setup to install Fairness Indicators.""" import os import sys from setuptools import find_packages from setuptools import setup if sys.version_info >= (3, 11): sys.exit('Sorry, Python >= 3.11 is not supported') def select_constraint(default, nightly=None, git_master=None): """Select dependency constraint based on TFX_DEPENDENCY_SELECTOR env var.""" selector = os.environ.get('TFX_DEPENDENCY_SELECTOR') if selector == 'UNCONSTRAINED': return '' elif selector == 'NIGHTLY' and nightly is not None: return nightly elif selector == 'GIT_MASTER' and git_master is not None: return git_master else: return default REQUIRED_PACKAGES = [ 'tensorflow>=2.12.0,<2.13', 'tensorflow-hub>=0.8.0,<1.0.0', 'tensorflow-data-validation' + select_constraint( default='>=1.13.0,<1.14.0', nightly='>=1.14.0.dev', git_master='@git+https://github.com/tensorflow/data-validation@master'), 'tensorflow-model-analysis' + select_constraint( default='>=0.44,<0.45', nightly='>=0.45.0.dev', git_master='@git+https://github.com/tensorflow/model-analysis@master'), 'witwidget>=1.4.4,<2', 'protobuf>=3.20.3,<5', ] # Get version from version module. with open('fairness_indicators/version.py') as fp: globals_dict = {} exec(fp.read(), globals_dict) # pylint: disable=exec-used __version__ = globals_dict['__version__'] with open('README.md', 'r', encoding='utf-8') as fh: long_description = fh.read() setup( name='fairness_indicators', version=__version__, description='Fairness Indicators', long_description=long_description, long_description_content_type='text/markdown', url='https://github.com/tensorflow/fairness-indicators', author='Google LLC', author_email='packages@tensorflow.org', packages=find_packages(exclude=['tensorboard_plugin']), package_data={ 'fairness_indicators': ['documentation/*'], }, python_requires='>=3.8,<3.10', install_requires=REQUIRED_PACKAGES, tests_require=REQUIRED_PACKAGES, # PyPI package information. classifiers=[ 'Development Status :: 4 - Beta', 'Intended Audience :: Developers', 'Intended Audience :: Education', 'Intended Audience :: Science/Research', 'License :: OSI Approved :: Apache Software License', 'Operating System :: OS Independent', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.8', 'Programming Language :: Python :: 3.9', 'Programming Language :: Python :: 3 :: Only', 'Topic :: Scientific/Engineering', 'Topic :: Scientific/Engineering :: Mathematics', 'Topic :: Scientific/Engineering :: Artificial Intelligence', 'Topic :: Software Development', 'Topic :: Software Development :: Libraries', 'Topic :: Software Development :: Libraries :: Python Modules', ], license='Apache 2.0', keywords='tensorflow model analysis fairness indicators tensorboard machine' ' learning', )
3,759
35.504854
80
py
fairness-indicators
fairness-indicators-master/g3doc/__init__.py
# Copyright 2019 Google LLC. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License.
596
41.642857
74
py
fairness-indicators
fairness-indicators-master/tensorboard_plugin/setup.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Setup to install Fairness Indicators Tensorboard plugin.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import sys from setuptools import find_packages from setuptools import setup if sys.version_info >= (3, 11): sys.exit('Sorry, Python >= 3.11 is not supported') def select_constraint(default, nightly=None, git_master=None): """Select dependency constraint based on TFX_DEPENDENCY_SELECTOR env var.""" selector = os.environ.get('TFX_DEPENDENCY_SELECTOR') if selector == 'UNCONSTRAINED': return '' elif selector == 'NIGHTLY' and nightly is not None: return nightly elif selector == 'GIT_MASTER' and git_master is not None: return git_master else: return default REQUIRED_PACKAGES = [ 'protobuf>=3.20.3,<5', 'tensorboard>=2.8.0,<3', 'tensorflow>=2.12,<3', 'tensorflow-model-analysis' + select_constraint( default='>=0.44,<0.45', nightly='>=0.45.0.dev', git_master='@git+https://github.com/tensorflow/model-analysis@master'), 'werkzeug<2' ] with open('README.md', 'r', encoding='utf-8') as fh: long_description = fh.read() # Get version from version module. with open('tensorboard_plugin_fairness_indicators/version.py') as fp: globals_dict = {} exec(fp.read(), globals_dict) # pylint: disable=exec-used __version__ = globals_dict['__version__'] setup( name='tensorboard_plugin_fairness_indicators', version=__version__, description='Fairness Indicators TensorBoard Plugin', long_description=long_description, long_description_content_type='text/markdown', url='https://github.com/tensorflow/fairness-indicators', author='Google LLC', author_email='packages@tensorflow.org', packages=find_packages(), package_data={ 'tensorboard_plugin_fairness_indicators': ['static/**'], }, entry_points={ 'tensorboard_plugins': [ 'fairness_indicators = tensorboard_plugin_fairness_indicators.plugin:FairnessIndicatorsPlugin', ], }, python_requires='>=3.8,<3.10', install_requires=REQUIRED_PACKAGES, tests_require=REQUIRED_PACKAGES, classifiers=[ 'Development Status :: 4 - Beta', 'Intended Audience :: Developers', 'Intended Audience :: Education', 'Intended Audience :: Science/Research', 'License :: OSI Approved :: Apache Software License', 'Operating System :: OS Independent', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.8', 'Programming Language :: Python :: 3.9', 'Programming Language :: Python :: 3 :: Only', 'Topic :: Scientific/Engineering', 'Topic :: Scientific/Engineering :: Mathematics', 'Topic :: Scientific/Engineering :: Artificial Intelligence', 'Topic :: Software Development', 'Topic :: Software Development :: Libraries', 'Topic :: Software Development :: Libraries :: Python Modules', ], license='Apache 2.0', keywords='tensorflow model analysis fairness indicators tensorboard machine learning', )
3,841
35.245283
107
py
fairness-indicators
fairness-indicators-master/tensorboard_plugin/tensorboard_plugin_fairness_indicators/metadata_test.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for util function to create plugin metadata.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorboard_plugin_fairness_indicators import metadata import tensorflow.compat.v1 as tf class MetadataTest(tf.test.TestCase): def testCreateSummaryMetadata(self): summary_metadata = metadata.CreateSummaryMetadata('description') self.assertEqual(metadata.PLUGIN_NAME, summary_metadata.plugin_data.plugin_name) self.assertEqual('description', summary_metadata.summary_description) def testCreateSummaryMetadata_withoutDescription(self): summary_metadata = metadata.CreateSummaryMetadata() self.assertEqual(metadata.PLUGIN_NAME, summary_metadata.plugin_data.plugin_name) if __name__ == '__main__': tf.test.main()
1,547
36.756098
80
py
fairness-indicators
fairness-indicators-master/tensorboard_plugin/tensorboard_plugin_fairness_indicators/plugin.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """TensorBoard Fairnss Indicators plugin.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os # Standard imports from absl import logging from tensorboard_plugin_fairness_indicators import metadata import six import tensorflow_model_analysis as tfma from tensorflow_model_analysis.addons.fairness.view import widget_view from werkzeug import wrappers from google.protobuf import json_format from tensorboard.backend import http_util from tensorboard.plugins import base_plugin _TEMPLATE_LOCATION = os.path.normpath( os.path.join( __file__, '../../' 'tensorflow_model_analysis/static/vulcanized_tfma.js')) class FairnessIndicatorsPlugin(base_plugin.TBPlugin): """A plugin to visualize Fairness Indicators.""" plugin_name = metadata.PLUGIN_NAME def __init__(self, context): """Instantiates plugin via TensorBoard core. Args: context: A base_plugin.TBContext instance. A magic container that TensorBoard uses to make objects available to the plugin. """ self._multiplexer = context.multiplexer def get_plugin_apps(self): """Gets all routes offered by the plugin. This method is called by TensorBoard when retrieving all the routes offered by the plugin. Returns: A dictionary mapping URL path to route that handles it. """ return { '/get_evaluation_result': self._get_evaluation_result, '/get_evaluation_result_from_remote_path': self._get_evaluation_result_from_remote_path, '/index.js': self._serve_js, '/vulcanized_tfma.js': self._serve_vulcanized_js, } def frontend_metadata(self): return base_plugin.FrontendMetadata( es_module_path='/index.js', disable_reload=False, tab_name='Fairness Indicators', remove_dom=False, element_name=None) def is_active(self): """Determines whether this plugin is active. This plugin is only active if TensorBoard sampled any summaries relevant to the plugin. Returns: Whether this plugin is active. """ return bool( self._multiplexer.PluginRunToTagToContent( FairnessIndicatorsPlugin.plugin_name)) @wrappers.Request.application def _serve_js(self, request): # pytype: disable=wrong-arg-types filepath = os.path.join(os.path.dirname(__file__), 'static', 'index.js') with open(filepath) as infile: contents = infile.read() return http_util.Respond( request, contents, content_type='application/javascript') @wrappers.Request.application def _serve_vulcanized_js(self, request): # pytype: disable=wrong-arg-types with open(_TEMPLATE_LOCATION) as infile: contents = infile.read() return http_util.Respond( request, contents, content_type='application/javascript') @wrappers.Request.application def _get_evaluation_result(self, request): # pytype: disable=wrong-arg-types run = request.args.get('run') try: run = six.ensure_text(run) except (UnicodeDecodeError, AttributeError): pass data = [] try: eval_result_output_dir = six.ensure_text( self._multiplexer.Tensors(run, FairnessIndicatorsPlugin.plugin_name) [0].tensor_proto.string_val[0]) eval_result = tfma.load_eval_result(output_path=eval_result_output_dir) # TODO(b/141283811): Allow users to choose different model output names # and class keys in case of multi-output and multi-class model. data = widget_view.convert_slicing_metrics_to_ui_input( eval_result.slicing_metrics) except (KeyError, json_format.ParseError) as error: logging.info('Error while fetching evaluation data, %s', error) return http_util.Respond(request, data, content_type='application/json') def _get_output_file_format(self, evaluation_output_path): file_format = os.path.splitext(evaluation_output_path)[1] if file_format: return file_format[1:] return '' @wrappers.Request.application def _get_evaluation_result_from_remote_path(self, request): # pytype: disable=wrong-arg-types evaluation_output_path = request.args.get('evaluation_output_path') try: evaluation_output_path = six.ensure_text(evaluation_output_path) except (UnicodeDecodeError, AttributeError): pass try: eval_result = tfma.load_eval_result( os.path.dirname(evaluation_output_path), output_file_format=self._get_output_file_format( evaluation_output_path)) data = widget_view.convert_slicing_metrics_to_ui_input( eval_result.slicing_metrics) except (KeyError, json_format.ParseError) as error: logging.info('Error while fetching evaluation data, %s', error) data = [] return http_util.Respond(request, data, content_type='application/json')
5,621
35.038462
96
py
fairness-indicators
fairness-indicators-master/tensorboard_plugin/tensorboard_plugin_fairness_indicators/metadata.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Plugin-specific global metadata.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function # Standard imports from tensorboard.compat.proto import summary_pb2 PLUGIN_NAME = "fairness_indicators" def CreateSummaryMetadata(description=None): return summary_pb2.SummaryMetadata( summary_description=description, plugin_data=summary_pb2.SummaryMetadata.PluginData( plugin_name=PLUGIN_NAME))
1,162
35.34375
80
py
fairness-indicators
fairness-indicators-master/tensorboard_plugin/tensorboard_plugin_fairness_indicators/summary_v2_test.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for Fairness Indicators summary.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import glob import os # Standard imports from tensorboard_plugin_fairness_indicators import metadata from tensorboard_plugin_fairness_indicators import summary_v2 import six import tensorflow.compat.v1 as tf from tensorboard.compat import tf2 try: tf2.__version__ # Force lazy import to resolve except ImportError: tf2 = None try: tf.enable_eager_execution() except AttributeError: # TF 2.0 doesn't have this symbol because eager is the default. pass class SummaryV2Test(tf.test.TestCase): def _write_summary(self, eval_result_output_dir): writer = tf2.summary.create_file_writer(self.get_temp_dir()) with writer.as_default(): summary_v2.FairnessIndicators(eval_result_output_dir, step=1) writer.close() def _get_event(self): event_files = sorted(glob.glob(os.path.join(self.get_temp_dir(), '*'))) self.assertEqual(len(event_files), 1) events = list(tf.train.summary_iterator(event_files[0])) # Expect a boilerplate event for the file_version, then the summary one. self.assertEqual(len(events), 2) return events[1] def testSummary(self): self._write_summary('output_dir') event = self._get_event() self.assertEqual(1, event.step) summary_value = event.summary.value[0] self.assertEqual(metadata.PLUGIN_NAME, summary_value.tag) self.assertEqual( 'output_dir', six.ensure_text(summary_value.tensor.string_val[0], 'utf-8')) self.assertEqual(metadata.PLUGIN_NAME, summary_value.metadata.plugin_data.plugin_name) if __name__ == '__main__': tf.test.main()
2,433
31.453333
80
py