repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
|---|---|---|---|---|---|---|
deficient-efficient
|
deficient-efficient-master/load_wrn50_2.py
|
import re
import torch
import torch.nn.functional as F
from torch.utils import model_zoo
from models.blocks import Conv
from models.wide_resnet import WRN_50_2
from collections import OrderedDict
def all_equal(iterable_1, iterable_2):
return all([x == y for x,y in zip(iterable_1, iterable_2)])
# functional model definition from functional zoo: https://github.com/szagoruyko/functional-zoo/blob/master/imagenet-validation.py#L27-L47
def define_model(params):
def conv2d(input, params, base, stride=1, pad=0):
return F.conv2d(input, params[base + '.weight'],
params[base + '.bias'], stride, pad)
def group(input, params, base, stride, n):
o = input
for i in range(0,n):
b_base = ('%s.block%d.conv') % (base, i)
x = o
o = conv2d(x, params, b_base + '0')
o = F.relu(o)
o = conv2d(o, params, b_base + '1', stride=i==0 and stride or 1, pad=1)
o = F.relu(o)
o = conv2d(o, params, b_base + '2')
if i == 0:
o += conv2d(x, params, b_base + '_dim', stride=stride)
else:
o += x
o = F.relu(o)
return o
# determine network size by parameters
blocks = [sum([re.match('group%d.block\d+.conv0.weight'%j, k) is not None
for k in params.keys()]) for j in range(4)]
def f(input, params):
o = F.conv2d(input, params['conv0.weight'], params['conv0.bias'], 2, 3)
o = F.relu(o)
o = F.max_pool2d(o, 3, 2, 1)
o_g0 = group(o, params, 'group0', 1, blocks[0])
o_g1 = group(o_g0, params, 'group1', 2, blocks[1])
o_g2 = group(o_g1, params, 'group2', 2, blocks[2])
o_g3 = group(o_g2, params, 'group3', 2, blocks[3])
o = F.avg_pool2d(o_g3, 7, 1, 0)
o = o.view(o.size(0), -1)
o = F.linear(o, params['z.fc.weight'], params['z.fc.bias'])
return o
return f
if __name__ == '__main__':
# our model definition
net = WRN_50_2(Conv)
# load parameters from model zoo
params = model_zoo.load_url('https://s3.amazonaws.com/modelzoo-networks/wide-resnet-50-2-export-5ae25d50.pth')
# otherwise the ordering will be messed up
params['z.fc.weight'] = params.pop('fc.weight')
params['z.fc.bias'] = params.pop('fc.bias')
params = sorted(params.items()) # list of tuples, in order
# make state_dict from model_zoo parameters
state_dict = OrderedDict()
w_i, b_i = 0, 0
for n,p in net.state_dict().items():
if 'weight' in n and 'bn' not in n:
while 'weight' not in params[w_i][0]:
w_i += 1
k, v = params[w_i]
print(k, " == ", n)
assert all_equal(v.shape, p.size()), f"{v.shape} =/= {p.size()}"
state_dict[n] = v
w_i += 1
elif 'bias' in n:
while 'bias' not in params[b_i][0]:
b_i += 1
k, v = params[b_i]
print(k, " == ", n)
assert all_equal(v.shape, p.size()), f"{v.shape} =/= {p.size()}"
state_dict[n] = v
b_i += 1
else:
state_dict[n] = p
assert max(w_i, b_i) == len(params) # all params are matched
# test if this is the same as the functional implementation
params = OrderedDict(params)
f = define_model(params)
net.load_state_dict(state_dict)
net.eval()
X = torch.randn(2,3,224,224)
func_out, net_out = f(X, params), net(X)[0]
error = torch.abs(func_out - net_out)
assert error.max() < 1e-3, "%f"%error.max()
print("Output given random input is equal within %f"%error.max())
# now save a new checkpoint file, with correct saved terms
save_dict = {}
save_dict['net'] = state_dict
save_dict['epoch'] = 100
save_dict['conv'] = 'Conv'
save_dict['blocktype'] = None
save_dict['module'] = None
torch.save(save_dict, 'checkpoints/wrn_50_2.imagenet.modelzoo.t7')
| 4,000
| 36.046296
| 138
|
py
|
deficient-efficient
|
deficient-efficient-master/imagenet_experiments.py
|
import json
#settings = ['ACDC_%i'%n for n in [12, 28]] +\
# ['SepHashed_%.2f'%s for s in [0.08, 0.58]] +\
settings = ['Generic_%.2f'%s for s in [0.03, 0.21]] +\
['Tucker_%.2f'%s for s in [0.25, 0.73]] +\
['TensorTrain_%.2f'%s for s in [0.27, 0.75]] +\
['Shuffle_%i'%n for n in [7, 1]]
experiments = []
import datetime
now = datetime.datetime.now()
monthday = now.strftime("%B")[:3]+"%i"%now.day
# use these settings to train WideResNets from scratch
for s in settings:
experiment = ["python", "main.py", "imagenet", "teacher", "--conv", s,
"-t", "wrn_50_2.%s.%s"%(s.lower(), monthday), "--network",
"WRN_50_2", "--GPU", "0,1,2,3"]
experiments.append(experiment)
# and to train WideResNets with a teacher
for s in settings:
experiment = ["python", "main.py", "imagenet", "student", "--conv", s,
"-t", "wrn_50_2.imagenet.modelzoo", "-s",
"wrn_50_2.%s.student.%s"%(s.lower(), monthday), "--network",
"WRN_50_2", "--alpha", "0.", "--beta", "1e3", "--GPU", "0,1,2,3"]
experiments.append(experiment)
with open("wrn_50_2_imagenet.json", "w") as f:
f.write(json.dumps(experiments))
| 1,203
| 36.625
| 77
|
py
|
deficient-efficient
|
deficient-efficient-master/collate_results.py
|
# open schedule json, then search for which machines the longest progressed job
# has run on
import json
import sys
import os
import torch
import subprocess
from subprocess import PIPE
from collections import OrderedDict
from funcs import what_conv_block
from models.wide_resnet import WideResNet, WRN_50_2
from models.darts import DARTS
from count import measure_model
from tqdm import tqdm
with open('machine_list.json', 'r') as f:
# list of strings with "hostname:path" where the deficient-efficient repos
# can be found
machines = json.load(f)
def ckpt_name(experiment):
if '-s' in experiment:
prefix = '-s'
else:
prefix = '-t'
ckpt_idx = [i for i, arg in enumerate(experiment) if arg == prefix][0]+1
return experiment[ckpt_idx]
def parse_name(path):
monthday = path.split(".")[-2]
path = path.split('.')[1:] # split off part containing settings
# index to cut out for settings
idx = [i for i,s in enumerate(path) if monthday == s or 'student' == s][0]
method, setting = (".".join(path[:idx])).split("_") # this is just the settings string now
return 'student' in path, method, setting
def parse_checkpoint(ckpt_name, ckpt_contents):
results = OrderedDict()
results['epoch'] = ckpt_contents['epoch']
results['val_errors'] = [float(x) for x in ckpt_contents['val_errors']]
results['train_errors'] = [float(x) for x in ckpt_contents['train_errors']]
# hard part: count parameters by making an instance of the network
network = {'wrn_28_10': 'WideResNet', 'darts': 'DARTS', 'wrn_50_2': 'WRN_50_2'}[ckpt_name.split(".")[0]]
h,w = {'WideResNet': (32,32), 'DARTS': (32,32), 'WRN_50_2': (224,224)}[network]
SavedConv, SavedBlock = what_conv_block(ckpt_contents['conv'],
ckpt_contents['blocktype'], ckpt_contents['module'])
model = build_network(SavedConv, SavedBlock, network)
flops, params = measure_model(model, h, w)
assert params == sum([p.numel() for p in model.parameters()])
results['no_params'] = params
results['flops'] = flops
results['settings'] = parse_name(ckpt_name)
results['scatter'] = (params, results['val_errors'][-1], results['train_errors'][-1], results['epoch'], flops)
return results
# instance the model
def build_network(Conv, Block, network):
if network == 'WideResNet':
return WideResNet(28, 10, Conv, Block,
num_classes=10, dropRate=0)
elif network == 'WRN_50_2':
return WRN_50_2(Conv)
elif network == 'DARTS':
return DARTS(Conv, num_classes=10, drop_path_prob=0., auxiliary=False)
def keep_oldest(collated, ckpt_name, ckpt_contents):
# if the checkpoint already exists in collated,
# keep it if it's run for more epochs
ckpt = parse_checkpoint(ckpt_name, ckpt_contents)
try:
existing_epochs = collated[ckpt_name]['epoch']
except KeyError:
# doesn't exist yet so return
return ckpt
if int(existing_epochs) < int(ckpt['epoch']):
return ckpt
else:
return collated[ckpt_name]
def main():
try:
# read the schedule from json
json_path = sys.argv[1]
with open(json_path, "r") as f:
schedule = json.load(f)
# prepare directory
if not os.path.exists("collate"):
os.mkdir("collate")
else:
# clean up directory
old_ckpts = os.listdir("collate")
for c in old_ckpts:
os.remove(os.path.join("collate", c))
# make a list of all the checkpoint files we need to check
checkpoints = []
for e in schedule:
checkpoints.append(ckpt_name(e)+".t7")
# look for these checkpoints on every machine we know about
collated = []
for m in tqdm(machines, desc='machine'):
# connect to the remote machine
hostname, directory = m.split(":")
checkpoint_dir = os.path.join(directory, "checkpoints")
completed = subprocess.run(f"ssh {hostname} ls {checkpoint_dir}".split(" "), stdout=PIPE, stderr=PIPE)
checkpoints_on_remote = completed.stdout.decode().split("\n")
# look for overlap between that and the checkpoints we care about
overlap = list(set(checkpoints_on_remote) & set(checkpoints))
for checkpoint in tqdm(overlap, desc="copying"):
checkpoint_loc = os.path.join(checkpoint_dir, checkpoint)
checkpoint_dest = f"collate/{hostname}.{checkpoint}"
if not os.path.exists(checkpoint_dest):
subprocess.run(f"scp {hostname}:{checkpoint_loc} {checkpoint_dest}".split(" "), stdout=PIPE, stderr=PIPE)
except IndexError:
pass
# iterate over copied files
collated = OrderedDict()
copied = os.listdir("collate")
for checkpoint in tqdm(copied, desc='Opening checkpoints'):
checkpoint_loc = os.path.join("collate", checkpoint)
hostname = checkpoint.split(".")[0]
checkpoint_name = ".".join(checkpoint.split(".")[1:])
checkpoint_contents = torch.load(checkpoint_loc)
collated[checkpoint_name] = keep_oldest(collated, checkpoint_name, checkpoint_contents)
for k in collated:
print(k, collated[k]['epoch'], collated[k]['val_errors'][-1])
with open("collated.json", "w") as f:
f.write(json.dumps(collated, indent=2))
if __name__ == "__main__":
main()
| 5,460
| 37.730496
| 125
|
py
|
deficient-efficient
|
deficient-efficient-master/history.py
|
# opens checkpoints and prints the commands used to run each
import torch
import os
import argparse
parser = argparse.ArgumentParser(description='Inspect saved checkpoints')
parser.add_argument('--match', type=str, default=None, help='Filter checkpoints by keyword.')
if __name__ == '__main__':
args = parser.parse_args()
ckpt_paths = os.listdir("checkpoints")
# filter for search term
if args.match is not None:
ckpt_paths = [c for c in ckpt_paths if args.match in c]
for p in ckpt_paths:
try:
ckpt = torch.load("checkpoints/"+p)
if 'args' in ckpt.keys():
print(p + " (%i-%.2f) "%(ckpt['epoch'], ckpt['val_errors'][-1]) + ": " + " ".join(ckpt['args']))
except:
pass
| 766
| 33.863636
| 113
|
py
|
deficient-efficient
|
deficient-efficient-master/models/resnet.py
|
'''This is a rewriting of the native resnet definition that comes with Pytorch, to allow it to use our blocks and
convolutions for imagenet experiments. Annoyingly, the pre-trained models don't use pre-activation blocks.'''
import torch
import torch.nn as nn
import math
import torchvision.models.resnet
import torch.utils.model_zoo as model_zoo
from .blocks import *
__all__ = ['ResNet', 'resnet18', 'resnet34']#, 'resnet50', 'resnet101','resnet152']
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
}
class ResNet(nn.Module):
def __init__(self, conv, block, n, num_classes=1000, s=1):
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
nChannels =[64, 64, 128, 256, 512]
self.layer1 = torch.nn.ModuleList()
for i in range(s):
self.layer1.append(NetworkBlock(int(n[0] // s), nChannels[0] if i == 0 else nChannels[1],
nChannels[1], block, 1, conv=conv))
self.layer2 = torch.nn.ModuleList()
for i in range(s):
self.layer2.append(NetworkBlock(int(n[1] // s), nChannels[1] if i == 0 else nChannels[2],
nChannels[2], block, 2, conv=conv))
self.layer3 = torch.nn.ModuleList()
for i in range(s):
self.layer3.append(NetworkBlock(int(n[2] // s), nChannels[2] if i == 0 else nChannels[3],
nChannels[3], block, 2, conv=conv))
self.layer4 = torch.nn.ModuleList()
for i in range(s):
self.layer4.append(NetworkBlock(int(n[3] // s), nChannels[3] if i == 0 else nChannels[4],
nChannels[4], block, 2, conv=conv))
# self.layer1 = self._make_layer(block, 64, layers[0])
# self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
# self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
# self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.avgpool = nn.AvgPool2d(7, stride=1)
self.fc = nn.Linear(512, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def forward(self, x):
activations = []
out = self.maxpool(self.relu(self.bn1(self.conv1(x))))
for sub_block in self.layer1:
out = sub_block(out)
activations.append(out)
for sub_block in self.layer2:
out = sub_block(out)
activations.append(out)
for sub_block in self.layer3:
out = sub_block(out)
activations.append(out)
for sub_block in self.layer4:
out = sub_block(out)
activations.append(out)
out = self.avgpool(out)
out = out.view(out.size(0), -1)
out = self.fc(out)
return out, activations
def resnet18(pretrained=False, conv=nnConv, block=OldBlock):
"""Constructs a ResNet-18 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(conv,block, [2, 2, 2, 2])
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))
return model
def resnet34(pretrained=False, conv=nnConv, block=OldBlock):
"""Constructs a ResNet-34 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(conv,block, [3, 4, 6, 3])
if pretrained:
old_model = torchvision.models.resnet.resnet34(pretrained=False)
old_model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))
new_state_dict = model.state_dict()
old_state_dict = old_model.state_dict()
# This assumes the sequence of each module in the network is the same in both cases.
# Ridiculously, batch norm params are stored in a different sequence in the downloaded state dict, so we have to
# load the old model definition, load in its downloaded state dict to change the order back, then transfer this!
old_model = torchvision.models.resnet.resnet34(pretrained=False)
old_model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))
old_names = [v for v in old_state_dict]
new_names = [v for v in new_state_dict]
for i,j in enumerate(old_names):
new_state_dict[new_names[i]] = old_state_dict[j]
model.load_state_dict(new_state_dict)
return model
def test2():
net = resnet34()
x = torch.randn(1, 3, 224, 224)
y, _ = net(Variable(x))
print(y.size())
if __name__ == '__main__':
test2()
# Haven't written the old bottleneck yet.
#
# def resnet50(pretrained=False, **kwargs):
# """Constructs a ResNet-50 model.
# Args:
# pretrained (bool): If True, returns a model pre-trained on ImageNet
# """
# model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)
# if pretrained:
# model.load_state_dict(model_zoo.load_url(model_urls['resnet50']))
# return model
#
#
# def resnet101(pretrained=False, **kwargs):
# """Constructs a ResNet-101 model.
# Args:
# pretrained (bool): If True, returns a model pre-trained on ImageNet
# """
# model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)
# if pretrained:
# model.load_state_dict(model_zoo.load_url(model_urls['resnet101']))
# return model
#
#
# def resnet152(pretrained=False, **kwargs):
# """Constructs a ResNet-152 model.
# Args:
# pretrained (bool): If True, returns a model pre-trained on ImageNet
# """
# model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)
# if pretrained:
# model.load_state_dict(model_zoo.load_url(model_urls['resnet152']))
# return model
| 6,623
| 34.047619
| 120
|
py
|
deficient-efficient
|
deficient-efficient-master/models/hashed.py
|
# HashedNet Convolutional Layer: https://arxiv.org/abs/1504.04788
from functools import reduce
import torch
import torch.nn as nn
import torch.nn.functional as F
class HashedConv2d(nn.Conv2d):
"""Conv2d with the weights of the convolutional filters parameterised using
a budgeted subset of parameters and random indexes to place those
parameters in the weight tensor."""
def __init__(self, in_channels, out_channels, kernel_size, budget,
stride=1, padding=0, dilation=1, groups=1, bias=True):
super(HashedConv2d, self).__init__(in_channels, out_channels,
kernel_size, stride=stride, padding=padding, dilation=dilation,
groups=groups, bias=True)
# grab budgeted subset of the weights
assert self.weight.numel() >= budget, \
f"budget {budget} higher than {self.weight.numel()}"
self.weight_size = self.weight.size()
budgeted = self.weight.data.view(-1)[:budget]
del self.weight
# register non-budgeted weights
self.register_parameter('hashed_weight', nn.Parameter(budgeted))
# precompute random index matrix
idxs = torch.randint(high=budget-1, size=self.weight_size).long()
idxs = idxs.view(-1)
# register indexes as a buffer
self.register_buffer('idxs', idxs)
#self.W = self.weight[self.idxs].cuda()
def forward(self, x):
# index to make weight matrix
try:
W = self.hashed_weight.index_select(0, self.idxs).view(self.weight_size)
except RuntimeError:
import ipdb
ipdb.set_trace()
# complete forward pass as normal
return F.conv2d(x, W, self.bias, self.stride, self.padding,
self.dilation, self.groups)
class HalfHashedSeparable(nn.Module):
"""A depthwise grouped convolution followed by a HashedNet 1x1 convolution.
Grouped convolution could also be hashed, but it's not."""
def __init__(self, in_channels, out_channels, kernel_size, budget,
stride=1, padding=0, dilation=1, groups=1, bias=True):
super(HalfHashedSeparable, self).__init__()
# has to have hashed in the name to get caught by alternative weight
# decay setting, it is not actually hashed
if kernel_size > 1:
self.grouped = nn.Conv2d(in_channels, in_channels, kernel_size,
stride=stride, padding=padding, dilation=dilation,
groups=in_channels, bias=False)
# we spent some of the budget on that grouped convolution
assert self.grouped.weight.numel() == reduce(lambda x,y: x*y, self.grouped.weight.size())
budget = budget - self.grouped.weight.numel()
assert budget > 0, \
"budget exceeded by grouped convolution: %i too many"%(-budget)
self.hashed = HashedConv2d(in_channels, out_channels, 1, budget,
bias=bias)
else:
self.grouped = None
self.hashed = HashedConv2d(in_channels, out_channels, 1, budget,
stride=stride, padding=padding, dilation=dilation,
bias=bias)
def forward(self, x):
if self.grouped is not None:
x = self.grouped(x)
return self.hashed(x)
class HashedSeparable(nn.Module):
"""Separabled, where grouped and pointwise are both Hashed.."""
def __init__(self, in_channels, out_channels, kernel_size, budget,
stride=1, padding=0, dilation=1, groups=1, bias=True):
super(HashedSeparable, self).__init__()
# has to have hashed in the name to get caught by alternative weight
# decay setting, it is not actually hashed
grouped_params = float(in_channels * kernel_size * kernel_size)
pointwise_params = float(in_channels * out_channels)
total_params = grouped_params + pointwise_params
grouped_budget = int(budget*grouped_params/total_params)
pointwise_budget = int(budget*pointwise_params/total_params)
#print(total_params, grouped_budget, pointwise_budget)
if kernel_size > 1:
self.grouped = HashedConv2d(in_channels, in_channels, kernel_size,
grouped_budget, stride=stride, padding=padding,
dilation=dilation, groups=in_channels, bias=False)
stride = 1
else:
self.grouped = None
pointwise_budget = budget
assert budget > 0, "budget must be greater than 0, was %i"%(-budget)
self.hashed = HashedConv2d(in_channels, out_channels, 1,
pointwise_budget, stride=stride, bias=bias)
def forward(self, x):
if self.grouped is not None:
x = self.grouped(x)
return self.hashed(x)
if __name__ == '__main__':
from timeit import timeit
setup = "from __main__ import HashedConv2d; import torch; X = torch.randn(128, 256, 28, 28).cuda(); conv = HashedConv2d(256, 512, 3, 1000, bias=False).cuda()"
print("HashedConv2d: ", timeit("_ = conv(X)", setup=setup, number=100))
setup = "import torch.nn as nn; import torch; X = torch.randn(128, 256, 28, 28).cuda(); conv = nn.Conv2d(256, 512, 3, bias=False).cuda()"
print("Conv2d: ", timeit("_ = conv(X)", setup=setup, number=100))
setup = "from __main__ import HalfHashedSeparable; import torch; X = torch.randn(128, 256, 28, 28).cuda(); conv = HalfHashedSeparable(256, 512, 3, 5000, bias=False).cuda()"
print("HalfHashedSeparable: ", timeit("_ = conv(X)", setup=setup, number=100))
setup = "from __main__ import HashedSeparable; import torch; X = torch.randn(128, 256, 28, 28).cuda(); conv = HashedSeparable(256, 512, 3, 5000, bias=False).cuda()"
print("HashedSeparable: ", timeit("_ = conv(X)", setup=setup, number=100))
| 5,827
| 48.811966
| 176
|
py
|
deficient-efficient
|
deficient-efficient-master/models/darts.py
|
# DARTS network definition
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.transforms as transforms
from torch.utils.checkpoint import checkpoint
from collections import namedtuple
from .blocks import DepthwiseSep
from .wide_resnet import group_lowrank, compression
#############################
# Training utils start here #
#############################
class Cutout(object):
def __init__(self, length):
self.length = length
def __call__(self, img):
h, w = img.size(1), img.size(2)
mask = np.ones((h, w), np.float32)
y = np.random.randint(h)
x = np.random.randint(w)
y1 = np.clip(y - self.length // 2, 0, h)
y2 = np.clip(y + self.length // 2, 0, h)
x1 = np.clip(x - self.length // 2, 0, w)
x2 = np.clip(x + self.length // 2, 0, w)
mask[y1: y2, x1: x2] = 0.
mask = torch.from_numpy(mask)
mask = mask.expand_as(img)
img *= mask
return img
def _data_transforms_cifar10():
CIFAR_MEAN = [0.49139968, 0.48215827, 0.44653124]
CIFAR_STD = [0.24703233, 0.24348505, 0.26158768]
train_transform = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(CIFAR_MEAN, CIFAR_STD),
])
if True: # always use cutout
train_transform.transforms.append(Cutout(16))
valid_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(CIFAR_MEAN, CIFAR_STD),
])
return train_transform, valid_transform
#####################################
# End of training utils #
#####################################
# Model definition code starts here #
#####################################
Genotype = namedtuple('Genotype', 'normal normal_concat reduce reduce_concat')
DARTS_V2 = Genotype(normal=[('sep_conv_3x3', 0), ('sep_conv_3x3', 1), ('sep_conv_3x3', 0), ('sep_conv_3x3', 1), ('sep_conv_3x3', 1), ('skip_connect', 0), ('skip_connect', 0), ('dil_conv_3x3', 2)], normal_concat=[2, 3, 4, 5], reduce=[('max_pool_3x3', 0), ('max_pool_3x3', 1), ('skip_connect', 2), ('max_pool_3x3', 1), ('max_pool_3x3', 0), ('skip_connect', 2), ('skip_connect', 2), ('max_pool_3x3', 1)], reduce_concat=[2, 3, 4, 5])
OPS = {
'none' : lambda C, stride, affine, conv: Zero(stride),
'avg_pool_3x3' : lambda C, stride, affine, conv: nn.AvgPool2d(3, stride=stride, padding=1, count_include_pad=False),
'max_pool_3x3' : lambda C, stride, affine, conv: nn.MaxPool2d(3, stride=stride, padding=1),
'skip_connect' : lambda C, stride, affine, conv: Identity() if stride == 1 else FactorizedReduce(C, C, conv, affine=affine),
'sep_conv_3x3' : lambda C, stride, affine, conv: SepConv(C, C, 3, stride, 1, Conv=conv, affine=affine),
'sep_conv_5x5' : lambda C, stride, affine, conv: SepConv(C, C, 5, stride, 2, Conv=conv, affine=affine),
'sep_conv_7x7' : lambda C, stride, affine, conv: SepConv(C, C, 7, stride, 3, Conv=conv, affine=affine),
'dil_conv_3x3' : lambda C, stride, affine, conv: DilConv(C, C, 3, stride, 2, 2, Conv=conv, affine=affine),
'dil_conv_5x5' : lambda C, stride, affine, conv: DilConv(C, C, 5, stride, 4, 2, Conv=conv, affine=affine),
# this is never used so you can remove it without hitting any errors
# 'conv_7x1_1x7' : lambda C, stride, affine, conv: nn.Sequential(
# nn.ReLU(inplace=False),
# nn.Conv2d(C, C, (1,7), stride=(1, stride), padding=(0, 3), bias=False),
# nn.Conv2d(C, C, (7,1), stride=(stride, 1), padding=(3, 0), bias=False),
# nn.BatchNorm2d(C, affine=affine)
# ),
}
class ReLUConvBN(nn.Module):
def __init__(self, C_in, C_out, ConvClass, kernel_size, stride, padding, affine=True):
super(ReLUConvBN, self).__init__()
#ConvClass = nn.Conv2d if ConvClass is DepthwiseSep else ConvClass
self.op = nn.Sequential(
nn.ReLU(inplace=False),
ConvClass(C_in, C_out, kernel_size, stride=stride, padding=padding, bias=False),
nn.BatchNorm2d(C_out, affine=affine)
)
def forward(self, x):
#return self.op(x)
return checkpoint(self.op, x)
class DilConv(nn.Module):
def __init__(self, C_in, C_out, kernel_size, stride, padding, dilation, Conv=DepthwiseSep, affine=True):
super(DilConv, self).__init__()
self.op = nn.Sequential(
nn.ReLU(inplace=False),
Conv(C_in, C_out, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, bias=False),
nn.BatchNorm2d(C_out, affine=affine),
)
def forward(self, x):
return self.op(x)
class SepConv(nn.Module):
def __init__(self, C_in, C_out, kernel_size, stride, padding, Conv=DepthwiseSep, affine=True):
super(SepConv, self).__init__()
self.op = nn.Sequential(
nn.ReLU(inplace=False),
Conv(C_in, C_in, kernel_size=kernel_size, stride=stride, padding=padding, bias=False),
nn.BatchNorm2d(C_in, affine=affine),
nn.ReLU(inplace=False),
Conv(C_in, C_out, kernel_size=kernel_size, stride=1, padding=padding, bias=False),
nn.BatchNorm2d(C_out, affine=affine),
)
def forward(self, x):
return self.op(x)
class Identity(nn.Module):
def __init__(self):
super(Identity, self).__init__()
def forward(self, x):
return x
class Zero(nn.Module):
def __init__(self, stride):
super(Zero, self).__init__()
self.stride = stride
def forward(self, x):
if self.stride == 1:
return x.mul(0.)
return x[:,:,::self.stride,::self.stride].mul(0.)
class FactorizedReduce(nn.Module):
def __init__(self, C_in, C_out, ConvClass, affine=True):
super(FactorizedReduce, self).__init__()
assert C_out % 2 == 0
self.relu = nn.ReLU(inplace=False)
#ConvClass = nn.Conv2d if ConvClass is DepthwiseSep else ConvClass
#ConvClass = nn.Conv2d
self.conv_1 = ConvClass(C_in, C_out // 2, 1, stride=2, padding=0, bias=False)
self.conv_2 = ConvClass(C_in, C_out // 2, 1, stride=2, padding=0, bias=False)
self.bn = nn.BatchNorm2d(C_out, affine=affine)
def forward(self, x):
def factorized_reduce(x):
x = self.relu(x)
out = torch.cat([self.conv_1(x), self.conv_2(x[:,:,1:,1:])], dim=1)
return self.bn(out)
#out = checkpoint(cat_1, *[self.conv_1(x), self.conv_2(x[:,:,1:,1:])])
#return factorized_reduce(x)
return checkpoint(factorized_reduce, x)
#return out
class Cell(nn.Module):
def __init__(self, genotype, C_prev_prev, C_prev, C, reduction, reduction_prev, Conv):
super(Cell, self).__init__()
self.Conv = Conv
if reduction_prev:
self.preprocess0 = FactorizedReduce(C_prev_prev, C, Conv)
else:
self.preprocess0 = ReLUConvBN(C_prev_prev, C, Conv, 1, 1, 0)
self.preprocess1 = ReLUConvBN(C_prev, C, Conv, 1, 1, 0)
if reduction:
op_names, indices = zip(*genotype.reduce)
concat = genotype.reduce_concat
else:
op_names, indices = zip(*genotype.normal)
concat = genotype.normal_concat
self._compile(C, op_names, indices, concat, reduction)
def _compile(self, C, op_names, indices, concat, reduction):
assert len(op_names) == len(indices)
self._steps = len(op_names) // 2
self._concat = concat
self.multiplier = len(concat)
self._ops = nn.ModuleList()
for name, index in zip(op_names, indices):
stride = 2 if reduction and index < 2 else 1
op = OPS[name](C, stride, True, self.Conv)
self._ops += [op]
self._indices = indices
def forward(self, s0, s1, drop_prob):
s0 = self.preprocess0(s0)
s1 = self.preprocess1(s1)
states = [s0, s1]
for i in range(self._steps):
h1 = states[self._indices[2*i]]
h2 = states[self._indices[2*i+1]]
op1 = self._ops[2*i]
op2 = self._ops[2*i+1]
h1 = op1(h1)
h2 = op2(h2)
if self.training and drop_prob > 0.:
if not isinstance(op1, Identity):
h1 = drop_path(h1, drop_prob)
if not isinstance(op2, Identity):
h2 = drop_path(h2, drop_prob)
s = h1 + h2
states += [s]
return torch.cat([states[i] for i in self._concat], dim=1)
#return checkpoint(cat_1, *[states[i] for i in self._concat])
class AuxiliaryHeadCIFAR(nn.Module):
def __init__(self, C, num_classes):
"""assuming input size 8x8"""
super(AuxiliaryHeadCIFAR, self).__init__()
self.features = nn.Sequential(
nn.ReLU(inplace=True),
nn.AvgPool2d(5, stride=3, padding=0, count_include_pad=False), # image size = 2 x 2
nn.Conv2d(C, 128, 1, bias=False),
nn.BatchNorm2d(128),
nn.ReLU(inplace=True),
nn.Conv2d(128, 768, 2, bias=False),
nn.BatchNorm2d(768),
nn.ReLU(inplace=True)
)
self.classifier = nn.Linear(768, num_classes)
def forward(self, x):
x = self.features(x)
x = self.classifier(x.view(x.size(0),-1))
return x
def drop_path(x, drop_prob):
if drop_prob > 0.:
keep_prob = 1.-drop_prob
mask = torch.FloatTensor(x.size(0), 1, 1, 1).bernoulli_(keep_prob)
mask = mask.to(x.device)
x.div_(keep_prob)
x.mul_(mask)
return x
class DARTS(nn.Module):
def __init__(self, ConvClass=DepthwiseSep, C=36, num_classes=10, layers=20, auxiliary=True,
genotype=DARTS_V2, drop_path_prob=0.2):
self.kwargs = dict(ConvClass=ConvClass, C=C, num_classes=num_classes,
layers=layers, auxiliary=auxiliary, genotype=genotype,
drop_path_prob=drop_path_prob)
super(DARTS, self).__init__()
self.drop_path_prob = drop_path_prob
self._layers = layers
self._auxiliary = auxiliary
stem_multiplier = 3
C_curr = stem_multiplier*C
self.stem = nn.Sequential(
nn.Conv2d(3, C_curr, 3, padding=1, bias=False),
nn.BatchNorm2d(C_curr)
)
C_prev_prev, C_prev, C_curr = C_curr, C_curr, C
self.cells = nn.ModuleList()
reduction_prev = False
for i in range(layers):
if i in [layers//3, 2*layers//3]:
C_curr *= 2
reduction = True
else:
reduction = False
cell = Cell(genotype, C_prev_prev, C_prev, C_curr, reduction, reduction_prev, ConvClass)
reduction_prev = reduction
self.cells += [cell]
C_prev_prev, C_prev = C_prev, cell.multiplier*C_curr
if i == 2*layers//3:
C_to_auxiliary = C_prev
if auxiliary:
self.auxiliary_head = AuxiliaryHeadCIFAR(C_to_auxiliary, num_classes)
self.global_pooling = nn.AdaptiveAvgPool2d(1)
self.classifier = nn.Linear(C_prev, num_classes)
def compression_ratio(self):
return compression(self.__class__, self.kwargs)
def grouped_parameters(self, weight_decay):
return group_lowrank(self.named_parameters(), weight_decay,
self.compression_ratio())
def forward(self, input):
logits_aux = None
s0 = s1 = self.stem(input)
cell_AMs = []
attention = lambda x: F.normalize(x.pow(2).mean(1).view(x.size(0), -1))
layers = len(self.cells)
for i, cell in enumerate(self.cells):
s0, s1 = s1, cell(s0, s1, self.drop_path_prob)
if i in [layers//3, 2*layers//3]:
cell_AMs.append(attention(s0))
if i == 2*self._layers//3:
if self._auxiliary and self.training:
logits_aux = self.auxiliary_head(s1)
out = self.global_pooling(s1)
logits = self.classifier(out.view(out.size(0),-1))
return logits, cell_AMs, logits_aux
if __name__ == '__main__':
darts = DARTS()
X = torch.randn(10,3,32,32)
print(darts(X))
| 11,450
| 32.979228
| 429
|
py
|
deficient-efficient
|
deficient-efficient-master/models/wide_resnet.py
|
# network definition
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from collections import OrderedDict
# wildcard import for legacy reasons
if __name__ == '__main__':
from blocks import *
else:
from .blocks import *
def parse_options(convtype, blocktype):
# legacy cmdline argument parsing
if isinstance(convtype,str):
conv = conv_function(convtype)
else:
raise NotImplementedError("Tuple convolution specification no longer supported.")
if blocktype =='Basic':
block = BasicBlock
elif blocktype =='Bottle':
block = BottleBlock
elif blocktype =='Old':
block = OldBlock
else:
block = None
return conv, block
def group_lowrank(named_parameters, weight_decay, compression_ratio):
lowrank_params, other_params = [], []
for n,p in named_parameters:
if 'A' in n or 'D' in n:
lowrank_params.append(p)
elif 'shuffle' in n:
lowrank_params.append(p)
elif 'hashed' in n:
lowrank_params.append(p)
elif 'weight_core' in n or 'weight_u' in n:
lowrank_params.append(p)
elif 'lowrank' in n:
lowrank_params.append(p)
else:
other_params.append(p)
return [{'params': lowrank_params,
'weight_decay': compression_ratio*weight_decay},
{'params': other_params}]
def compression(model_class, kwargs):
# assume there is a kwarg "conv", which is the convolution we've chosen
compressed_params = sum([p.numel() for p in
model_class(**kwargs).parameters()])
if 'genotype' in list(kwargs.keys()):
# standard conv with DARTS is DepthwiseSep
kwargs['ConvClass'] = DepthwiseSep
else:
# everything else it's Conv
kwargs['ConvClass'] = Conv
uncompressed_params = sum([p.numel() for p in
model_class(**kwargs).parameters()])
ratio = float(compressed_params)/float(uncompressed_params)
print("Compression: %i to %i, ratio %.2f"%(uncompressed_params,
compressed_params, ratio))
return ratio
class WideResNet(nn.Module):
def __init__(self, depth, widen_factor, ConvClass, block, num_classes=10, dropRate=0.0, s = 1):
super(WideResNet, self).__init__()
self.kwargs = dict(depth=depth, widen_factor=widen_factor, ConvClass=ConvClass,
block=block, num_classes=num_classes, dropRate=dropRate, s=s)
nChannels = [16, 16 * widen_factor, 32 * widen_factor, 64 * widen_factor]
nChannels = [int(a) for a in nChannels]
assert ((depth - 4) % 6 == 0) # why?
n = (depth - 4) // 6
assert n % s == 0, 'n mod s must be zero'
# 1st conv before any network block
self.conv1 = nn.Conv2d(3, nChannels[0], kernel_size=3, stride=1,
padding=1, bias=False)
# 1st block
self.block1 = torch.nn.ModuleList()
for i in range(s):
self.block1.append(NetworkBlock(int(n//s), nChannels[0] if i == 0 else nChannels[1],
nChannels[1], block, 1, dropRate, ConvClass))
# 2nd block
self.block2 = torch.nn.ModuleList()
for i in range(s):
self.block2.append(NetworkBlock(int(n//s), nChannels[1] if i == 0 else nChannels[2],
nChannels[2], block, 2 if i == 0 else 1, dropRate, ConvClass))
# 3rd block
self.block3 = torch.nn.ModuleList()
for i in range(s):
self.block3.append(NetworkBlock(int(n//s), nChannels[2] if i == 0 else nChannels[3],
nChannels[3], block, 2 if i == 0 else 1, dropRate, ConvClass))
# global average pooling and classifier
self.bn1 = nn.BatchNorm2d(nChannels[3])
self.relu = nn.ReLU(inplace=True)
self.fc = nn.Linear(nChannels[3], num_classes)
self.nChannels = nChannels[3]
# normal is better than uniform initialisation
# this should really be in `self.reset_parameters`
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
try:
m.weight.data.normal_(0, math.sqrt(2. / n))
except AttributeError:
pass
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.bias.data.zero_()
def compression_ratio(self):
return compression(self.__class__, self.kwargs)
def grouped_parameters(self, weight_decay):
# iterate over parameters and separate those in ACDC layers
return group_lowrank(self.named_parameters(), weight_decay,
self.compression_ratio())
def forward(self, x):
activation_maps = []
out = self.conv1(x)
#activations.append(out)
attention = lambda x: F.normalize(x.pow(2).mean(1).view(x.size(0), -1))
for sub_block in self.block1:
out = sub_block(out)
activation_maps.append(attention(out))
for sub_block in self.block2:
out = sub_block(out)
activation_maps.append(attention(out))
for sub_block in self.block3:
out = sub_block(out)
activation_maps.append(attention(out))
out = self.relu(self.bn1(out))
out = F.avg_pool2d(out, 8)
out = out.view(-1, self.nChannels)
return self.fc(out), activation_maps
class ResNet(nn.Module):
def __init__(self, ConvClass, layers, block=Bottleneck, widen=1,
num_classes=1000, expansion=4):
self.kwargs = dict(layers=layers, expansion=expansion,
ConvClass=ConvClass, widen=widen, num_classes=num_classes,
block=block)
self.expansion = expansion
super(ResNet, self).__init__()
self.Conv = ConvClass
self.inplanes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64*widen, layers[0])
self.layer2 = self._make_layer(block, 128*widen, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256*widen, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512*widen, layers[3], stride=2)
self.avgpool = nn.AvgPool2d((7, 7), 1, 0)
self.fc = nn.Linear(512*widen * self.expansion, num_classes)
#self.fc = self.Conv(512*widen * self.expansion, num_classes, kernel_size=1, bias=True)
for m in self.modules():
if isinstance(m, nn.Conv2d):
if hasattr(m, 'weight'):
w = m.weight
nn.init.kaiming_normal_(w, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * self.expansion:
downsample = nn.Sequential(OrderedDict([
('conv', self.Conv(self.inplanes, planes * self.expansion,
kernel_size=1, stride=stride, padding=0, bias=False)),
('bn', nn.BatchNorm2d(planes * self.expansion))
]))
layers = []
layers.append(block(self.inplanes, planes, self.Conv, stride, downsample, self.expansion))
self.inplanes = planes * self.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, self.Conv, expansion=self.expansion))
return nn.Sequential(*layers)
def compression_ratio(self):
return compression(self.__class__, self.kwargs)
def grouped_parameters(self, weight_decay):
# iterate over parameters and separate those in other layer types
return group_lowrank(self.named_parameters(), weight_decay,
self.compression_ratio())
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
attention_maps = []
attention = lambda x: F.normalize(x.pow(2).mean(1).view(x.size(0), -1))
if self.train:
x = self.layer1(x)
#x = checkpoint(self.layer1, x)
#x = checkpoint_sequential(self.layer1, 1, x)
else:
x = self.layer1(x)
attention_maps.append(attention(x))
if self.train:
x = self.layer2(x)
#x = checkpoint(self.layer2, x)
#x = checkpoint_sequential(self.layer2, 1, x)
else:
x = self.layer2(x)
attention_maps.append(attention(x))
if self.train:
x = self.layer3(x)
#x = checkpoint(self.layer3, x)
#x = checkpoint_sequential(self.layer3, 1, x)
else:
x = self.layer3(x)
attention_maps.append(attention(x))
if self.train:
x = self.layer4(x)
#x = checkpoint(self.layer4, x)
#x = checkpoint_sequential(self.layer4, 1, x)
else:
x = self.layer4(x)
attention_maps.append(attention(x))
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
#x = x.view(x.size(0), -1)
return x, attention_maps
def WRN_50_2(Conv, Block=None):
assert Block is None
return ResNet(Conv, [3, 4, 6, 3], widen=2, expansion=2)
def test():
net = WideResNet(28, 10, conv_function("Shuffle_7"), BasicBlock)
params = net.grouped_parameters(5e-4)
params = [d['params'] for d in params]
print("Low-rank: ", sum([p.numel() for p in params[0]]))
print("Full-rank: ", sum([p.numel() for p in params[1]]))
print("FC: ", sum([p.numel() for p in net.fc.parameters()]))
net = WRN_50_2(conv_function("Shuffle_7"))
params = net.grouped_parameters(5e-4)
params = [d['params'] for d in params]
print("Low-rank: ", sum([p.numel() for p in params[0]]))
print("Full-rank: ", sum([p.numel() for p in params[1]]))
print("FC: ", sum([p.numel() for p in net.fc.parameters()]))
x = torch.randn(1,3,224,224).float()
y, _ = net(Variable(x))
print(y.size())
if __name__ == '__main__':
test()
| 10,717
| 36.872792
| 106
|
py
|
deficient-efficient
|
deficient-efficient-master/models/__init__.py
|
from .wide_resnet import *
#from .resnet import *
| 50
| 16
| 26
|
py
|
deficient-efficient
|
deficient-efficient-master/models/decomposed.py
|
# Substitute layer explicitly decomposing the tensors in convolutional layers
# All implemented using tntorch: https://github.com/rballester/tntorch
# All also use a separable design: the low-rank approximate pointwise
# convolution is preceded by a grouped convolution
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
import tntorch as tn
torch.set_default_dtype(torch.float32)
def dimensionize(t, d, rank_scale):
"""Take a tensor, t, and reshape so that it has d dimensions, of roughly
equal size."""
# if not, we have to do some work
N = t.numel()
# do d-th root with log
equal = math.exp((1./d)*math.log(N))
# if this is an integer, our work here is done
if abs(round(equal) - equal) < 1e-6:
dims = [int(round(equal))]*d
# if the tensor already has d dimensions
elif t.ndimension() == d:
dims = list(t.size())
# oh no, then we want to build up a list of dimensions it *does* divide by
else:
dims = []
for i in range(d-1):
divisor = closest_divisor(N, int(round(equal)))
dims.append(divisor)
N = N//divisor
dims.append(N)
# rank between dimensions must be less than
ranks = {}
ranks['ranks_tt'] = [max(1,int(round(rank_scale*min(b,a)))) for b,a in zip(dims, dims[1:])]
ranks['ranks_tucker'] = [max(1,int(round(rank_scale*d))) for d in dims]
ranks['ranks_cp'] = max(1,int(round(rank_scale*min(dims))))
return t.view(*dims), ranks
def closest_divisor(N, d):
if N < d:
return N
while N%d != 0:
d += 1
return d
class TnTorchConv2d(nn.Conv2d):
def __init__(self, in_channels, out_channels, kernel_size, rank_scale,
TnConstructor, stride=1, padding=0, dilation=1, groups=1,
bias=True):
self.TnConstructor = TnConstructor
assert groups == 1
if kernel_size == 1:
super(TnTorchConv2d, self).__init__(in_channels, out_channels, 1,
stride=stride, padding=padding, dilation=dilation, bias=bias)
elif kernel_size > 1:
super(TnTorchConv2d, self).__init__(in_channels, out_channels, 1, bias=bias)
self.grouped = nn.Conv2d(in_channels, in_channels,
kernel_size, stride=stride, padding=padding, dilation=dilation,
groups=in_channels, bias=False)
self.rank_scale = rank_scale
self.tn_weight = self.TnConstructor(self.weight.data.squeeze(), rank_scale=self.rank_scale)
# store the correct size for this weight
self.weight_size = self.weight.size()
# check the fit to the weight initialisation
self.store_metrics(self.weight)
# delete the original weight
del self.weight
# then register the cores of the Tensor Train as parameters
self.register_tnparams(self.tn_weight.cores, self.tn_weight.Us)
def register_tnparams(self, cores, Us):
cores = [] if all([c is None for c in cores]) else cores
Us = [] if all([u is None for u in Us]) else Us
# tensor train or cp cores
for i,core in enumerate(cores):
core_name = 'weight_core_%i'%i
if hasattr(self, core_name):
delattr(self, core_name)
core.requires_grad = True
self.register_parameter(core_name, nn.Parameter(core))
# replace Parameter in tn.Tensor object
self.tn_weight.cores[i] = getattr(self, core_name)
for i, u in enumerate(Us):
u_name = 'weight_u_%i'%i
if hasattr(self, u_name):
delattr(self, u_name)
u.requires_grad = True
self.register_parameter(u_name, nn.Parameter(u))
# replace Parameter in tn.Tensor object
self.tn_weight.Us[i] = getattr(self, u_name)
def conv_weight(self):
weight = self.tn_weight.torch()
n,d,_,_ = self.weight_size
return weight.view(n,d,1,1)
def reset_parameters(self):
if hasattr(self, 'tn_weight'):
# full rank weight tensor
weight = self.conv_weight()
else:
weight = self.weight.data
n = self.in_channels
for k in self.kernel_size:
n *= k
stdv = 1. / math.sqrt(n)
weight.data.uniform_(-stdv, stdv)
if hasattr(self, 'tn_weight'):
self.tn_weight = self.TnConstructor(weight.data.squeeze(), rank_scale=self.rank_scale)
# update cores
self.register_tnparams(self.tn_weight.cores, self.tn_weight.Us)
else:
self.weight.data = weight
if self.bias is not None:
self.bias.data.uniform_(-stdv, stdv)
def forward(self, x):
if hasattr(self, 'grouped'):
out = self.grouped(x)
else:
out = x
weight = self.conv_weight()
return F.conv2d(out, weight, self.bias, self.stride, self.padding,
self.dilation, self.groups)
def store_metrics(self, full):
t = self.tn_weight
full = full.view(t.torch().size())
self.compression = (full.numel(), t.numel(), full.numel() / t.numel())
self.relative_error = tn.relative_error(full, t)
self.rmse = tn.rmse(full, t)
self.r_squared = tn.r_squared(full, t)
def extra_repr(self):
extra = []
extra.append(self.tn_weight.__repr__())
extra.append('Compression ratio: {}/{} = {:g}'.format(*self.compression))
extra.append('Relative error: %f'%self.relative_error)
extra.append('RMSE: %f'%self.rmse)
extra.append('R^2: %f'%self.r_squared)
return "\n".join(extra)
class TensorTrain(TnTorchConv2d):
def __init__(self, in_channels, out_channels, kernel_size, rank_scale,
dimensions, stride=1, padding=0, dilation=1, groups=1, bias=True):
def TT(tensor, rank_scale):
tensor, ranks = dimensionize(tensor, dimensions, rank_scale)
return tn.Tensor(tensor, ranks_tt=ranks['ranks_tt'])
super(TensorTrain, self).__init__(in_channels, out_channels,
kernel_size, rank_scale, TT, stride=stride, padding=padding,
dilation=dilation, groups=groups, bias=bias)
class Tucker(TnTorchConv2d):
def __init__(self, in_channels, out_channels, kernel_size, rank_scale,
dimensions, stride=1, padding=0, dilation=1, groups=1, bias=True):
def tucker(tensor, rank_scale):
tensor, ranks = dimensionize(tensor, dimensions, rank_scale)
return tn.Tensor(tensor, ranks_tucker=ranks['ranks_tucker'])
super(Tucker, self).__init__(in_channels, out_channels, kernel_size,
rank_scale, tucker, stride=stride, padding=padding,
dilation=dilation, groups=groups, bias=bias)
class CP(TnTorchConv2d):
def __init__(self, in_channels, out_channels, kernel_size, rank_scale,
dimensions, stride=1, padding=0, dilation=1, groups=1, bias=True):
def cp(tensor, rank_scale):
tensor, ranks = dimensionize(tensor, dimensions, rank_scale)
return tn.Tensor(tensor, ranks_cp=ranks['ranks_cp'])
super(CP, self).__init__(in_channels, out_channels, kernel_size,
rank_scale, cp, stride=stride, padding=padding,
dilation=dilation, groups=groups, bias=bias)
if __name__ == '__main__':
for ConvClass in [TensorTrain, Tucker, CP]:
X = torch.randn(5,16,32,32)
tnlayer = ConvClass(16,16,3,0.5,2,bias=False)
tnlayer.reset_parameters()
print(tnlayer)
tnlayer.zero_grad()
y = tnlayer(X)
l = y.sum()
l.backward()
for n,p in tnlayer.named_parameters():
assert p.requires_grad, n
assert torch.abs(tnlayer.weight_core_0.grad - tnlayer.tn_weight.cores[0].grad).max() < 1e-5
# same output on the GPU
tnlayer, X = tnlayer.cuda(), X.cuda()
assert torch.abs(tnlayer(X).cpu() - y).max() < 1e-5
for ConvClass in [TensorTrain, Tucker, CP]:
X = torch.randn(5,16,32,32)
tnlayer = ConvClass(16,16,3,0.5,4,bias=False)
tnlayer.reset_parameters()
print(tnlayer)
| 8,252
| 39.856436
| 99
|
py
|
deficient-efficient
|
deficient-efficient-master/models/MobileNetV2.py
|
import torch
import torch.nn as nn
import math
# wildcard import for legacy reasons
if __name__ == '__main__':
import sys
sys.path.append("..")
from models.blocks import *
from models.wide_resnet import compression, group_lowrank
# only used in the first convolution, which we do not substitute by convention
def conv_bn(inp, oup, stride):
return nn.Sequential(
nn.Conv2d(inp, oup, 3, stride, 1, bias=False),
nn.BatchNorm2d(oup),
nn.ReLU6(inplace=True)
)
# only used for final fully connectec layers
def conv_1x1_bn(inp, oup, ConvClass):
return nn.Sequential(
ConvClass(inp, oup, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup),
nn.ReLU6(inplace=True)
)
class InvertedResidual(nn.Module):
def __init__(self, inp, oup, stride, expand_ratio, ConvClass):
super(InvertedResidual, self).__init__()
self.stride = stride
self.Conv = ConvClass
assert stride in [1, 2]
hidden_dim = round(inp * expand_ratio)
self.use_res_connect = self.stride == 1 and inp == oup
if expand_ratio == 1:
self.conv = nn.Sequential(
# dw
nn.Conv2d(hidden_dim, hidden_dim, 3, stride, 1, groups=hidden_dim, bias=False),
nn.BatchNorm2d(hidden_dim),
nn.ReLU6(inplace=True),
# pw-linear
self.Conv(hidden_dim, oup, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup),
)
else:
self.conv = nn.Sequential(
# pw
self.Conv(inp, hidden_dim, 1, 1, 0, bias=False),
nn.BatchNorm2d(hidden_dim),
nn.ReLU6(inplace=True),
# dw
nn.Conv2d(hidden_dim, hidden_dim, 3, stride, 1, groups=hidden_dim, bias=False),
nn.BatchNorm2d(hidden_dim),
nn.ReLU6(inplace=True),
# pw-linear
self.Conv(hidden_dim, oup, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup),
)
def forward(self, x):
if self.use_res_connect:
return x + self.conv(x)
else:
return self.conv(x)
class MobileNetV2(nn.Module):
def __init__(self, ConvClass, block=None, n_class=1000,
input_size=224, width_mult=1.):
super(MobileNetV2, self).__init__()
self.kwargs = dict(ConvClass=ConvClass, block=block, n_class=n_class,
input_size=input_size, width_mult=width_mult)
block = InvertedResidual
self.Conv = ConvClass
input_channel = 32
last_channel = 1280
interverted_residual_setting = [
# t, c, n, s
[1, 16, 1, 1],
[6, 24, 2, 2],
[6, 32, 3, 2],
[6, 64, 4, 2],
[6, 96, 3, 1],
[6, 160, 3, 2],
[6, 320, 1, 1],
]
# building first layer
assert input_size % 32 == 0
input_channel = int(input_channel * width_mult)
self.last_channel = int(last_channel * width_mult) if width_mult > 1.0 else last_channel
self.features = [conv_bn(3, input_channel, 2)]
# building inverted residual blocks
for t, c, n, s in interverted_residual_setting:
output_channel = int(c * width_mult)
for i in range(n):
if i == 0:
self.features.append(block(input_channel, output_channel, s, expand_ratio=t, ConvClass=self.Conv))
else:
self.features.append(block(input_channel, output_channel, 1, expand_ratio=t, ConvClass=self.Conv))
input_channel = output_channel
# building last several layers
self.features.append(conv_1x1_bn(input_channel, self.last_channel, self.Conv))
# make it nn.Sequential
self.features = nn.Sequential(*self.features)
# building classifier
self.classifier_conv = self.Conv(self.last_channel, n_class, 1, 1, 0, bias=True)
#self.classifier = \
#nn.Dropout(0.2), remove dropout for training according to github
# nn.(self.last_channel, n_class),
#)
self._initialize_weights()
def classifier(self, x):
n, c = x.size()
x = self.classifier_conv(x.view(n,c,1,1))
n, c, _, _ = x.size()
return x.view(n,c)
def forward(self, x):
#y_orig = self.features(x)
attention_maps = []
attention = lambda x: F.normalize(x.pow(2).mean(1).view(x.size(0), -1))
y = x
for block in self.features:
y = block(y)
if isinstance(block, InvertedResidual):
if block.stride > 1:
attention_maps.append(attention(y))
#error = torch.abs(y-y_orig).max()
#assert error < 1e-2, f"Error {error} above 0.01"
x = y
x = x.mean(3).mean(2)
x = self.classifier(x)
return x, attention_maps
def compression_ratio(self):
return compression(self.__class__, self.kwargs)
def grouped_parameters(self, weight_decay):
return group_lowrank(self.named_parameters(), weight_decay,
self.compression_ratio())
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
if hasattr(m, 'weight'):
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
n = m.weight.size(1)
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
def save_reference():
net = MobileNetV2()
net.eval()
x = torch.randn(1,3,224,224).float()
y = net(x)
print(y.size())
torch.save(x, "reference_input_mobilenet.torch")
torch.save(y, "reference_output_mobilenet.torch")
torch.save(net.state_dict(), "reference_state_mobilenet.torch")
def match_keys(net, state):
nstate = net.state_dict()
old_keys = [k for k in state]
for i, k in enumerate(nstate):
p = state[old_keys[i]]
if i == (len(old_keys)-2):
n,m = p.size()
nstate[k] = p.view(n,m,1,1)
else:
nstate[k] = p
return nstate
def test():
import os
net = MobileNetV2(Conv)
if os.path.exists("reference_state_mobilenet.torch"):
state = torch.load("reference_state_mobilenet.torch")
state = match_keys(net, state)
net.load_state_dict(state)
net.eval()
x = torch.load("reference_input_mobilenet.torch")
else:
x = torch.randn(1,3,224,224).float()
y, _ = net(Variable(x))
print(y.size())
# check if these match the test weights
if os.path.exists("reference_output_mobilenet.torch"):
ref_output = torch.load("reference_output_mobilenet.torch")
error = torch.abs(ref_output - y).max()
print(f"Error: {error}, Max logit: {y.max()}/{ref_output.max()}, Min logit: {y.min()}/{ref_output.min()}")
state = {
'net': net.state_dict(),
'epoch': 150,
'args': None,
'width': None,
'depth': None,
'conv': 'Conv',
'blocktype': None,
'module': None,
'train_losses': None,
'train_errors': None,
'val_losses': None,
'val_errors': [28.2],
}
torch.save(state, "mobilenetv2.tonylins.t7")
def test_compression():
net = MobileNetV2(Conv)
#net = MobileNetV2(conv_function('Hashed_0.1'))
nparams = lambda x: sum([p.numel() for p in x.parameters()])
for block in net.features:
print(nparams(block))
for x in block:
print(x)
print(nparams(x))
#CompressedConv = conv_function("Hashed_0.1")
for conv in ['Shuffle_%i'%i for i in [4,8,16,32]]+['Hashed_0.01']:
print(conv)
CompressedConv = conv_function(conv)
net = MobileNetV2(CompressedConv)
print(" ", net.compression_ratio())
if __name__ == '__main__':
test()
#test_compression()
| 8,316
| 33.086066
| 118
|
py
|
deficient-efficient
|
deficient-efficient-master/models/blocks.py
|
# blocks and convolution definitions
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from torch.utils.checkpoint import checkpoint, checkpoint_sequential
if __name__ == 'blocks' or __name__ == '__main__':
from hashed import HashedConv2d, HalfHashedSeparable, HashedSeparable
from decomposed import TensorTrain, Tucker, CP
else:
from .hashed import HashedConv2d, HalfHashedSeparable, HashedSeparable
from .decomposed import TensorTrain, Tucker, CP
def HashedDecimate(in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, bias=False):
# Hashed Conv2d using 1/10 the original parameters
original_params = out_channels*in_channels*kernel_size*kernel_size // groups
budget = original_params//10
return HashedConv2d(in_channels, out_channels, kernel_size, budget,
stride=stride, padding=padding, dilation=dilation, groups=groups,
bias=bias)
def SepHashedDecimate(in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, bias=False):
# Hashed Conv2d using 1/10 the original parameters
assert groups == 1
original_params = out_channels*in_channels*kernel_size*kernel_size
budget = original_params//10
conv = HalfHashedSeparable(in_channels, out_channels, kernel_size,
budget, stride=stride, padding=padding, dilation=dilation,
groups=groups, bias=bias)
n_params = sum([p.numel() for p in conv.parameters()])
budget = budget + conv.hashed.bias.numel()
assert n_params <= budget, f"{n_params} > {budget}"
return conv
from pytorch_acdc.layers import FastStackedConvACDC
def ACDC(in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, bias=False):
return FastStackedConvACDC(in_channels, out_channels, kernel_size, 12,
stride=stride, padding=padding, dilation=dilation, groups=groups,
bias=bias)
def OriginalACDC(in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, bias=False):
return FastStackedConvACDC(in_channels, out_channels, kernel_size, 12,
stride=stride, padding=padding, dilation=dilation, groups=groups,
bias=bias, original=True)
class GenericLowRank(nn.Module):
"""A generic low rank layer implemented with a linear bottleneck, using two
Conv2ds in sequence. Preceded by a depthwise grouped convolution in keeping
with the other low-rank layers here."""
def __init__(self, in_channels, out_channels, kernel_size, rank, stride=1,
padding=0, dilation=1, groups=1, bias=False):
assert groups == 1
super(GenericLowRank, self).__init__()
if kernel_size > 1:
self.grouped = nn.Conv2d(in_channels, in_channels, kernel_size,
stride=stride, padding=padding, dilation=dilation,
groups=in_channels, bias=False)
self.lowrank_contract = nn.Conv2d(in_channels, rank, 1, bias=False)
self.lowrank_expand = nn.Conv2d(rank, out_channels, 1, bias=bias)
else:
self.grouped = None
self.lowrank_contract = nn.Conv2d(in_channels, rank, 1, stride=stride,
dilation=dilation, bias=False)
self.lowrank_expand = nn.Conv2d(rank, out_channels, 1, bias=bias)
def forward(self, x):
if self.grouped is not None:
x = self.grouped(x)
x = self.lowrank_contract(x)
return self.lowrank_expand(x)
class LowRank(nn.Module):
"""A generic low rank layer implemented with a linear bottleneck, using two
Conv2ds in sequence. Preceded by a depthwise grouped convolution in keeping
with the other low-rank layers here."""
def __init__(self, in_channels, out_channels, kernel_size, rank, stride=1,
padding=0, dilation=1, groups=1, bias=False):
assert groups == 1
assert out_channels%in_channels == 0
self.upsample = out_channels//in_channels
super(LowRank, self).__init__()
if kernel_size > 1:
self.grouped = nn.Conv2d(in_channels, in_channels, kernel_size,
stride=stride, padding=padding, dilation=dilation,
groups=in_channels, bias=False)
self.lowrank = nn.Conv2d(self.upsample*in_channels, rank, 1,
bias=bias)
else:
self.grouped = None
self.lowrank = nn.Conv2d(self.upsample*in_channels, rank, 1,
stride=stride, dilation=dilation, bias=bias)
def forward(self, x):
if self.grouped is not None:
x = self.grouped(x)
if self.upsample > 1:
x = x.repeat(1,self.upsample,1,1)
x = F.conv2d(x, self.lowrank.weight, None, self.lowrank.stride,
self.lowrank.padding, self.lowrank.dilation,
self.lowrank.groups)
return F.conv2d(x, self.lowrank.weight.permute(1,0,2,3),
self.lowrank.bias)
# from: https://github.com/kuangliu/pytorch-cifar/blob/master/models/shufflenet.py#L10-L19
class ShuffleBlock(nn.Module):
def __init__(self, groups):
super(ShuffleBlock, self).__init__()
self.groups = groups
def forward(self, x):
'''Channel shuffle: [N,C,H,W] -> [N,g,C/g,H,W] -> [N,C/g,g,H,w] -> [N,C,H,W]'''
N,C,H,W = x.size()
g = self.groups
return x.view(N,g,C//g,H,W).permute(0,2,1,3,4).contiguous().view(N,C,H,W)
class LinearShuffleNet(nn.Module):
"""Linear version of the ShuffleNet block, minus the shortcut connection,
as we assume relevant shortcuts already exist in the network having a
substitution. When linear, this can be viewed as a low-rank tensor
decomposition."""
def __init__(self, in_channels, out_channels, kernel_size, shuffle_groups,
stride=1, padding=0, dilation=1, groups=1, bias=False):
assert groups == 1
super(LinearShuffleNet, self).__init__()
# why 4? https://github.com/jaxony/ShuffleNet/blob/master/model.py#L67
bottleneck_channels = out_channels // 4
self.shuffle_gconv1 = nn.Conv2d(in_channels, bottleneck_channels, 1,
groups=shuffle_groups, bias=False)
self.shuffle = ShuffleBlock(shuffle_groups)
self.shuffle_dwconv = nn.Conv2d(bottleneck_channels, bottleneck_channels,
kernel_size, stride=stride, padding=padding, dilation=dilation,
groups=bottleneck_channels, bias=False)
self.shuffle_gconv2 = nn.Conv2d(bottleneck_channels, out_channels, 1,
groups=shuffle_groups, bias=bias)
def forward(self, x):
x = self.shuffle_gconv1(x)
x = self.shuffle(x)
x = self.shuffle_dwconv(x)
return self.shuffle_gconv2(x)
def cant_be_shuffled(shuffle_groups, in_channels, out_channels):
# utility function, true if we can't instance shufflenet block using this
divides_in = in_channels%shuffle_groups == 0
divides_out = out_channels%shuffle_groups == 0
divides_bottleneck = (out_channels//4)%shuffle_groups == 0
return not (divides_in and divides_out and divides_bottleneck)
class DepthwiseSep(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, bias=True):
super(DepthwiseSep, self).__init__()
assert groups == 1
if kernel_size > 1:
self.grouped = nn.Conv2d(in_channels, in_channels, kernel_size,
stride=stride, padding=padding, dilation=dilation,
groups=in_channels, bias=False)
self.pointwise = nn.Conv2d(in_channels, out_channels, 1, bias=bias)
else:
self.pointwise = nn.Conv2d(in_channels, out_channels, 1,
stride=stride, padding=padding, dilation=dilation,
bias=bias)
def forward(self, x):
if hasattr(self, 'grouped'):
out = self.grouped(x)
else:
out = x
return self.pointwise(out)
class Conv(nn.Module):
def __init__(self, in_planes, out_planes, kernel_size, stride=1, padding=1,
dilation=1, bias=False):
super(Conv, self).__init__()
# Dumb normal conv incorporated into a class
self.conv = nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size,
stride=stride, padding=padding, bias=bias, dilation=dilation)
def forward(self, x):
return self.conv(x)
def conv_function(convtype):
# if convtype contains an underscore, it must have a hyperparam in it
if "_" in convtype:
convtype, hyperparam = convtype.split("_")
if convtype == 'ACDC':
# then hyperparam controls how many layers in each conv
n_layers = int(round(float(hyperparam)))
def conv(in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, bias=False):
return FastStackedConvACDC(in_channels, out_channels,
kernel_size, n_layers, stride=stride,
padding=padding, dilation=dilation, groups=groups,
bias=bias)
elif convtype == 'Hashed':
# then hyperparam controls relative budget for each layer
budget_scale = float(hyperparam)
def conv(in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, bias=False):
# Hashed Conv2d using 1/10 the original parameters
original_params = out_channels*in_channels*kernel_size*kernel_size // groups
budget = int(original_params*budget_scale)
return HashedConv2d(in_channels, out_channels, kernel_size,
budget, stride=stride, padding=padding,
dilation=dilation, groups=groups, bias=bias)
elif convtype == 'SepHashed':
# then hyperparam controls relative budget for each layer
budget_scale = float(hyperparam)
def conv(in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, bias=False):
original_params = out_channels*in_channels // groups
budget = int(original_params*budget_scale)
if kernel_size > 1: # budget for a grouped convolution
budget += in_channels*kernel_size*kernel_size
return HalfHashedSeparable(in_channels, out_channels, kernel_size,
budget, stride=stride, padding=padding,
dilation=dilation, groups=groups, bias=bias)
elif convtype == 'Generic':
rank_scale = float(hyperparam)
def conv(in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, bias=False):
full_rank = max(in_channels,out_channels)
rank = int(rank_scale*full_rank)
return GenericLowRank(in_channels, out_channels, kernel_size,
rank, stride=stride, padding=padding,
dilation=dilation, groups=groups, bias=bias)
elif convtype == 'LR':
rank_scale = float(hyperparam)
def conv(in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, bias=False):
full_rank = max(in_channels,out_channels)
rank = int(rank_scale*full_rank)
return LowRank(in_channels, out_channels, kernel_size,
rank, stride=stride, padding=padding,
dilation=dilation, groups=groups, bias=bias)
elif convtype == 'TensorTrain':
rank_scale = float(hyperparam)
def conv(in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, bias=False):
return TensorTrain(in_channels, out_channels, kernel_size,
rank_scale, 3, stride=stride, padding=padding,
dilation=dilation, groups=groups, bias=bias)
elif convtype == 'Tucker':
rank_scale = float(hyperparam)
def conv(in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, bias=False):
return Tucker(in_channels, out_channels, kernel_size,
rank_scale, 3, stride=stride, padding=padding,
dilation=dilation, groups=groups, bias=bias)
elif convtype == 'CP':
assert False, "Deprecated"
rank_scale = float(hyperparam)
def conv(in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, bias=False):
return CP(in_channels, out_channels, kernel_size,
rank_scale, stride=stride, padding=padding,
dilation=dilation, groups=groups, bias=bias)
elif convtype == 'Shuffle':
def conv(in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, bias=False):
shuffle_groups = int(hyperparam)
while cant_be_shuffled(shuffle_groups, in_channels, out_channels):
shuffle_groups += -1
return LinearShuffleNet(in_channels, out_channels, kernel_size,
shuffle_groups, stride=stride, padding=padding,
dilation=dilation, groups=groups, bias=bias)
else:
if convtype == 'Conv':
conv = Conv
elif convtype =='ACDC':
conv = ACDC
elif convtype =='OriginalACDC':
conv = OriginalACDC
elif convtype == 'HashedDecimate':
conv = HashedDecimate
elif convtype == 'SepHashedDecimate':
conv = SepHashedDecimate
elif convtype == 'Sep':
conv = DepthwiseSep
else:
raise ValueError('Conv "%s" not recognised'%convtype)
return conv
class BasicBlock(nn.Module):
def __init__(self, in_planes, out_planes, stride, dropRate=0.0, conv=Conv):
super(BasicBlock, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.relu1 = nn.ReLU(inplace=True)
self.conv1 = conv(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(out_planes)
self.relu2 = nn.ReLU(inplace=True)
self.conv2 = conv(out_planes, out_planes, kernel_size=3, stride=1,
padding=1, bias=False)
#assert self.conv2.grouped.padding[0] == 1
self.droprate = dropRate
self.equalInOut = (in_planes == out_planes)
self.convShortcut = (not self.equalInOut) and nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride,
padding=0, bias=False) or None
def forward(self, x):
if not self.equalInOut:
x = self.relu1(self.bn1(x))
else:
out = self.relu1(self.bn1(x))
out = self.relu2(self.bn2(self.conv1(out if self.equalInOut else x)))
if self.droprate > 0:
out = F.dropout(out, p=self.droprate, training=self.training)
out = self.conv2(out)
return torch.add(x if self.equalInOut else self.convShortcut(x), out)
# modified from torchvision
class Bottleneck(nn.Module):
"""Bottleneck architecture block for ResNet"""
def __init__(self, inplanes, planes, ConvClass, stride=1, downsample=None, expansion=4):
super(Bottleneck, self).__init__()
self.expansion = expansion
pointwise = lambda i,o: ConvClass(i, o, kernel_size=1, padding=0,
bias=False)
self.conv1 = pointwise(inplanes, planes)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = ConvClass(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = pointwise(planes, planes * self.expansion)
self.bn3 = nn.BatchNorm2d(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def add_residual(self, x, out):
if self.downsample is not None:
residual = self.downsample(x)
else:
residual = x
return out + residual
def forward(self, x):
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
#out = checkpoint(self.add_residual, x, out)
out = self.add_residual(x, out)
out = self.relu(out)
return out
class NetworkBlock(nn.Module):
def __init__(self, nb_layers, in_planes, out_planes, block, stride, dropRate=0.0, conv = Conv):
super(NetworkBlock, self).__init__()
self.layer = self._make_layer(block, in_planes, out_planes, nb_layers, stride, dropRate, conv)
def _make_layer(self, block, in_planes, out_planes, nb_layers, stride, dropRate, conv):
layers = []
for i in range(nb_layers):
layers.append(block(i == 0 and in_planes or out_planes, out_planes, i == 0 and stride or 1, dropRate, conv))
return nn.Sequential(*layers)
def forward(self, x):
return self.layer(x)
if __name__ == '__main__':
X = torch.randn(5,16,32,32)
# sanity of generic low-rank layer
generic = GenericLowRank(16, 32, 3, 2)
for n,p in generic.named_parameters():
print(n, p.size(), p.numel())
out = generic(X)
print(out.size())
low = LowRank(16, 32, 3, 2)
for n, p in low.named_parameters():
print(n, p.size(), p.numel())
out = low(X)
print(out.size())
assert False
# check we don't initialise a grouped conv when not required
layers_to_test = [LowRank(3,32,1,1), GenericLowRank(3,32,1,1),
HalfHashedSeparable(3,32,1,10), TensorTrain(3,32,1,0.5,3),
Tucker(3,32,1,0.5,3), CP(3,32,1,0.5,3), ACDC(3,32,1)]
for layer in layers_to_test:
assert getattr(layer, 'grouped', None) is None
# and we *do* when it is required
layers_to_test = [LowRank(3,32,3,1), GenericLowRank(3,32,3,1),
HalfHashedSeparable(3,32,3,100), TensorTrain(3,32,3,0.5,3),
Tucker(3,32,3,0.5,3), CP(3,32,3,0.5,3), ACDC(3,32,3)]
for layer in layers_to_test:
assert getattr(layer, 'grouped', None) is not None, layer
# sanity of LinearShuffleNet
X = torch.randn(5,16,32,32)
shuffle = LinearShuffleNet(16,32,3,4)
print(shuffle(X).size())
| 18,941
| 43.992874
| 120
|
py
|
ACRO
|
ACRO-main/setup.py
|
"""Python setup script for installing ACRO."""
from pathlib import Path
from setuptools import find_packages, setup
this_directory = Path(__file__).parent
long_description = (this_directory / "README.md").read_text()
setup(
name="acro",
version="0.4.2",
license="MIT",
maintainer="Jim Smith",
maintainer_email="james.smith@uwe.ac.uk",
description="ACRO: Tools for the Automatic Checking of Research Outputs",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/AI-SDC/ACRO",
packages=find_packages(),
package_data={"acro": ["default.yaml"]},
python_requires=">=3.10",
install_requires=["lxml", "numpy", "openpyxl", "pandas", "PyYAML", "statsmodels"],
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.11",
"Topic :: Scientific/Engineering",
"Topic :: Scientific/Engineering :: Information Analysis",
"Operating System :: OS Independent",
],
keywords=[
"data-privacy",
"data-protection",
"privacy",
"privacy-tools",
"statistical-disclosure-control",
],
)
| 1,427
| 31.454545
| 86
|
py
|
ACRO
|
ACRO-main/test/stata.py
|
#!/usr/bin/env python
"""ACRO Stata Tests."""
# ACRO Tests
import os
import pandas as pd
from acro import ACRO, add_constant
# Instantiate ACRO
acro = ACRO()
# Load test data
path = os.path.join("../data", "test_data.dta")
df = pd.read_stata(path)
df.head()
# Pandas crosstab
table = pd.crosstab(df.year, df.grant_type)
# ACRO crosstab
safe_table = acro.crosstab(df.year, df.grant_type)
# ACRO crosstab with aggregation function
safe_table = acro.crosstab(df.year, df.grant_type, values=df.inc_grants, aggfunc="mean")
# ACRO crosstab with negative values
negative = df.inc_grants.copy()
negative[0:10] = -10
safe_table = acro.crosstab(df.year, df.grant_type, values=negative, aggfunc="mean")
# ACRO pivot_table
table = acro.pivot_table(
df, index=["grant_type"], values=["inc_grants"], aggfunc=["mean", "std"]
)
# ACRO pivot_table with negative values
df.loc[0:10, "inc_grants"] = -10
table = acro.pivot_table(
df, index=["grant_type"], values=["inc_grants"], aggfunc=["mean", "std"]
)
# ACRO OLS
new_df = df[["inc_activity", "inc_grants", "inc_donations", "total_costs"]]
new_df = new_df.dropna()
y = new_df["inc_activity"]
x = new_df[["inc_grants", "inc_donations", "total_costs"]]
x = add_constant(x)
results = acro.ols(y, x)
results.summary()
# ACRO OLSR
results = acro.olsr(
formula="inc_activity ~ inc_grants + inc_donations + total_costs", data=new_df
)
results.summary()
# ACRO Probit
new_df = df[["survivor", "inc_activity", "inc_grants", "inc_donations", "total_costs"]]
new_df = new_df.dropna()
y = new_df["survivor"].astype("category").cat.codes # numeric
y.name = "survivor"
x = new_df[["inc_activity", "inc_grants", "inc_donations", "total_costs"]]
x = add_constant(x)
results = acro.probit(y, x)
results.summary()
# ACRO Logit
results = acro.logit(y, x)
results.summary()
# List current ACRO outputs
acro.print_outputs()
# Remove some ACRO outputs before finalising
acro.remove_output("output_1")
acro.remove_output("output_4")
# Finalise ACRO
output = acro.finalise("test_results.xlsx")
| 2,056
| 18.778846
| 88
|
py
|
ACRO
|
ACRO-main/test/test_initial.py
|
"""This module contains unit tests."""
import json
import os
import numpy as np
import pandas as pd
import pytest
from acro import ACRO, add_constant, record, utils
from acro.record import Records, load_records
# pylint: disable=redefined-outer-name
PATH: str = "RES_PYTEST"
@pytest.fixture
def data() -> pd.DataFrame:
"""Load test data."""
path = os.path.join("data", "test_data.dta")
data = pd.read_stata(path)
return data
@pytest.fixture
def acro() -> ACRO:
"""Initialise ACRO."""
return ACRO(suppress=True)
def test_crosstab_without_suppression(data):
"""Crosstab threshold without automatic suppression."""
acro = ACRO(suppress=False)
_ = acro.crosstab(data.year, data.grant_type)
output = acro.results.get_index(0)
correct_summary: str = "fail; threshold: 6 cells may need suppressing; "
assert output.summary == correct_summary
assert 48 == output.output[0]["R/G"].sum()
def test_crosstab_multiple_aggregate_function(data, acro):
"""Crosstab with multiple agg funcs."""
acro = ACRO(suppress=False)
_ = acro.crosstab(
data.year, data.grant_type, values=data.inc_grants, aggfunc=["mean", "std"]
)
output = acro.results.get_index(0)
correct_summary: str = (
"fail; threshold: 12 cells may need suppressing;"
" p-ratio: 2 cells may need suppressing; "
"nk-rule: 2 cells may need suppressing; "
)
assert (
output.summary == correct_summary
), f"\n{output.summary}\n should be \n{correct_summary}\n"
print(f"{output.output[0]['mean'][ 'R/G'].sum()}")
correctval = 97383496.0
errmsg = f"{output.output[0]['mean']['R/G'].sum()} should be {correctval}"
assert correctval == output.output[0]["mean"]["R/G"].sum(), errmsg
def test_crosstab_threshold(data, acro):
"""Crosstab threshold test."""
_ = acro.crosstab(data.year, data.grant_type)
output = acro.results.get_index(0)
total_nan: int = output.output[0]["R/G"].isnull().sum()
assert total_nan == 6
positions = output.sdc["cells"]["threshold"]
for pos in positions:
row, col = pos
assert np.isnan(output.output[0].iloc[row, col])
acro.add_exception("output_0", "Let me have it")
results: Records = acro.finalise()
correct_summary: str = "fail; threshold: 6 cells suppressed; "
output = results.get_index(0)
assert output.summary == correct_summary
def test_crosstab_multiple(data, acro):
"""Crosstab multiple rule test."""
_ = acro.crosstab(
data.year, data.grant_type, values=data.inc_grants, aggfunc="mean"
)
acro.add_exception("output_0", "Let me have it")
results: Records = acro.finalise()
correct_summary: str = (
"fail; threshold: 6 cells suppressed; p-ratio: 1 cells suppressed; "
"nk-rule: 1 cells suppressed; "
)
output = results.get_index(0)
assert output.summary == correct_summary
def test_negatives(data, acro):
"""Pivot table and Crosstab with negative values."""
data.loc[0:10, "inc_grants"] = -10
_ = acro.crosstab(
data.year, data.grant_type, values=data.inc_grants, aggfunc="mean"
)
_ = acro.pivot_table(
data, index=["grant_type"], values=["inc_grants"], aggfunc=["mean", "std"]
)
acro.add_exception("output_0", "Let me have it")
acro.add_exception("output_1", "I want this")
results: Records = acro.finalise()
correct_summary: str = "review; negative values found"
output_0 = results.get_index(0)
output_1 = results.get_index(1)
assert output_0.summary == correct_summary
assert output_1.summary == correct_summary
def test_pivot_table_without_suppression(data):
"""Pivot table without automatic suppression."""
acro = ACRO(suppress=False)
_ = acro.pivot_table(
data, index=["grant_type"], values=["inc_grants"], aggfunc=["mean", "std"]
)
output_0 = acro.results.get_index(0)
assert 36293992.0 == output_0.output[0]["mean"]["inc_grants"].sum()
assert "pass" == output_0.summary
def test_pivot_table_pass(data, acro):
"""Pivot table pass test."""
_ = acro.pivot_table(
data, index=["grant_type"], values=["inc_grants"], aggfunc=["mean", "std"]
)
results: Records = acro.finalise()
correct_summary: str = "pass"
output_0 = results.get_index(0)
assert output_0.summary == correct_summary
def test_pivot_table_cols(data, acro):
"""Pivot table with columns test."""
_ = acro.pivot_table(
data,
index=["grant_type"],
columns=["year"],
values=["inc_grants"],
aggfunc=["mean", "std"],
)
acro.add_exception("output_0", "Let me have it")
results: Records = acro.finalise()
correct_summary: str = (
"fail; threshold: 14 cells suppressed; "
"p-ratio: 2 cells suppressed; nk-rule: 2 cells suppressed; "
)
output_0 = results.get_index(0)
assert output_0.summary == correct_summary
def test_ols(data, acro):
"""Ordinary Least Squares test."""
new_df = data[["inc_activity", "inc_grants", "inc_donations", "total_costs"]]
new_df = new_df.dropna()
# OLS
endog = new_df.inc_activity
exog = new_df[["inc_grants", "inc_donations", "total_costs"]]
exog = add_constant(exog)
results = acro.ols(endog, exog)
assert results.df_resid == 807
assert results.rsquared == pytest.approx(0.894, 0.001)
# OLSR
results = acro.olsr(
formula="inc_activity ~ inc_grants + inc_donations + total_costs", data=new_df
)
assert results.df_resid == 807
assert results.rsquared == pytest.approx(0.894, 0.001)
# Finalise
results = acro.finalise()
correct_summary: str = "pass; dof=807.0 >= 10"
output_0 = results.get_index(0)
output_1 = results.get_index(1)
assert output_0.summary == correct_summary
assert output_1.summary == correct_summary
def test_probit_logit(data, acro):
"""Probit and Logit tests."""
new_df = data[
["survivor", "inc_activity", "inc_grants", "inc_donations", "total_costs"]
]
new_df = new_df.dropna()
endog = new_df["survivor"].astype("category").cat.codes # numeric
endog.name = "survivor"
exog = new_df[["inc_activity", "inc_grants", "inc_donations", "total_costs"]]
exog = add_constant(exog)
# Probit
results = acro.probit(endog, exog)
assert results.df_resid == 806
assert results.prsquared == pytest.approx(0.208, 0.01)
# Logit
results = acro.logit(endog, exog)
assert results.df_resid == 806
assert results.prsquared == pytest.approx(0.214, 0.01)
# ProbitR
new_df["survivor"] = new_df["survivor"].astype("category").cat.codes
results = acro.probitr(
formula="survivor ~ inc_activity + inc_grants + inc_donations + total_costs",
data=new_df,
)
assert results.df_resid == 806
assert results.prsquared == pytest.approx(0.208, 0.01)
# LogitR
results = acro.logitr(
formula="survivor ~ inc_activity + inc_grants + inc_donations + total_costs",
data=new_df,
)
assert results.df_resid == 806
assert results.prsquared == pytest.approx(0.214, 0.01)
# Finalise
results = acro.finalise()
correct_summary: str = "pass; dof=806.0 >= 10"
output_0 = results.get_index(0)
output_1 = results.get_index(1)
output_2 = results.get_index(2)
output_3 = results.get_index(3)
assert output_0.summary == correct_summary
assert output_1.summary == correct_summary
assert output_2.summary == correct_summary
assert output_3.summary == correct_summary
def test_finalise_excel(data, acro):
"""Finalise excel test."""
_ = acro.crosstab(data.year, data.grant_type)
acro.add_exception("output_0", "Let me have it")
results: Records = acro.finalise(PATH, "xlsx")
output_0 = results.get_index(0)
filename = os.path.normpath(f"{PATH}/results.xlsx")
load_data = pd.read_excel(filename, sheet_name=output_0.uid)
correct_cell: str = "_ = acro.crosstab(data.year, data.grant_type)"
assert load_data.iloc[0, 0] == "Command"
assert load_data.iloc[0, 1] == correct_cell
def test_output_removal(data, acro, monkeypatch):
"""Output removal and print test."""
_ = acro.crosstab(data.year, data.grant_type)
_ = acro.crosstab(data.year, data.grant_type)
_ = acro.crosstab(data.year, data.grant_type)
exceptions = ["I want it", "Let me have it", "Please!"]
monkeypatch.setattr("builtins.input", lambda _: exceptions.pop(0))
results: Records = acro.finalise()
output_0 = results.get("output_0")
output_1 = results.get("output_1")
# remove something that is there
acro.remove_output(output_0.uid)
results = acro.finalise()
correct_summary: str = "fail; threshold: 6 cells suppressed; "
keys = results.get_keys()
assert output_0.uid not in keys
assert output_1.uid in keys
assert output_1.summary == correct_summary
acro.print_outputs()
# remove something that is not there
with pytest.raises(ValueError):
acro.remove_output("123")
def test_load_output():
"""Empty array when loading output."""
with pytest.raises(ValueError):
record.load_output(PATH, [])
def test_finalise_invalid(data, acro):
"""Invalid output format when finalising."""
_ = acro.crosstab(data.year, data.grant_type)
output_0 = acro.results.get_index(0)
output_0.exception = "Let me have it"
with pytest.raises(ValueError):
_ = acro.finalise(PATH, "123")
def test_finalise_json(data, acro):
"""Finalise json test."""
_ = acro.crosstab(data.year, data.grant_type)
acro.add_exception("output_0", "Let me have it")
# write JSON
result: Records = acro.finalise(PATH, "json")
# load JSON
loaded: Records = load_records(PATH)
orig = result.get_index(0)
read = loaded.get_index(0)
print("*****************************")
print(orig)
print("*****************************")
print(read)
print("*****************************")
# check equal
assert orig.uid == read.uid
assert orig.status == read.status
assert orig.output_type == read.output_type
assert orig.properties == read.properties
assert orig.sdc == read.sdc
assert orig.command == read.command
assert orig.summary == read.summary
assert orig.comments == read.comments
assert orig.timestamp == read.timestamp
assert (orig.output[0].reset_index()).equals(read.output[0])
# test reading JSON
with open(os.path.normpath(f"{PATH}/results.json"), encoding="utf-8") as file:
json_data = json.load(file)
results: dict = json_data["results"]
assert results[orig.uid]["files"][0]["name"] == f"{orig.uid}_0.csv"
def test_rename_output(data, acro):
"""Output renaming test."""
_ = acro.crosstab(data.year, data.grant_type)
_ = acro.crosstab(data.year, data.grant_type)
acro.add_exception("output_0", "Let me have it")
acro.add_exception("output_1", "I want this")
results: Records = acro.finalise()
output_0 = results.get_index(0)
orig_name = output_0.uid
new_name = "cross_table"
acro.rename_output(orig_name, new_name)
results = acro.finalise()
assert output_0.uid == new_name
assert orig_name not in results.get_keys()
assert os.path.exists(f"outputs/{new_name}_0.csv")
# rename an output that doesn't exist
with pytest.raises(ValueError):
acro.rename_output("123", "name")
# rename an output to another that already exists
with pytest.raises(ValueError):
acro.rename_output("output_1", "cross_table")
def test_add_comments(data, acro):
"""Adding comments to output test."""
_ = acro.crosstab(data.year, data.grant_type)
acro.add_exception("output_0", "Let me have it")
results: Records = acro.finalise()
output_0 = results.get_index(0)
assert output_0.comments == []
comment = "This is a cross table between year and grant_type"
acro.add_comments(output_0.uid, comment)
assert output_0.comments == [comment]
comment_1 = "6 cells were suppressed"
acro.add_comments(output_0.uid, comment_1)
assert output_0.comments == [comment, comment_1]
# add a comment to something that is not there
with pytest.raises(ValueError):
acro.add_comments("123", "comment")
def test_custom_output(acro):
"""Adding an unsupported output to the results dictionary test."""
filename = "notebooks/XandY.jfif"
file_path = os.path.normpath(filename)
acro.custom_output(filename)
acro.add_exception("output_0", "Let me have it")
results: Records = acro.finalise(path=PATH)
output_0 = results.get_index(0)
assert output_0.output == [file_path]
assert os.path.exists(os.path.normpath(f"{PATH}/XandY.jfif"))
def test_missing(data, acro, monkeypatch):
"""Pivot table and Crosstab with negative values."""
utils.CHECK_MISSING_VALUES = True
data.loc[0:10, "inc_grants"] = np.NaN
_ = acro.crosstab(
data.year, data.grant_type, values=data.inc_grants, aggfunc="mean"
)
_ = acro.pivot_table(
data, index=["grant_type"], values=["inc_grants"], aggfunc=["mean", "std"]
)
exceptions = ["I want it", "Let me have it"]
monkeypatch.setattr("builtins.input", lambda _: exceptions.pop(0))
results: Records = acro.finalise()
correct_summary: str = "review; missing values found"
output_0 = results.get_index(0)
output_1 = results.get_index(1)
assert output_0.summary == correct_summary
assert output_1.summary == correct_summary
assert output_0.exception == "I want it"
assert output_1.exception == "Let me have it"
def test_suppression_error(caplog):
"""Apply suppression type error test."""
table_data = {"col1": [1, 2], "col2": [3, 4]}
mask_data = {"col1": [np.NaN, True], "col2": [True, True]}
table = pd.DataFrame(data=table_data)
masks = {"test": pd.DataFrame(data=mask_data)}
utils.apply_suppression(table, masks)
assert "problem mask test is not binary" in caplog.text
def test_adding_exception(acro):
"""Adding an exception to an output that doesn't exist test."""
with pytest.raises(ValueError):
acro.add_exception("output_0", "Let me have it")
| 14,281
| 34.527363
| 86
|
py
|
ACRO
|
ACRO-main/test/test_stata_interface.py
|
"""This module contains unit tests for the stata interface."""
import os
import pandas as pd
import pytest
from acro import ACRO
from stata.acro_stata_parser import (
apply_stata_expstmt,
apply_stata_ifstmt,
find_brace_contents,
parse_and_run,
parse_table_details,
)
# pylint: disable=redefined-outer-name
@pytest.fixture
def acro() -> ACRO:
"""Initialise ACRO."""
return ACRO()
@pytest.fixture
def data() -> pd.DataFrame:
"""Load test data."""
path = os.path.join("data", "test_data.dta")
data = pd.read_stata(path)
return data
# --- global object and dummy code to replace things in acro.ado
stata_acro = "empty" # pylint:disable=invalid-name
def dummy_acrohandler(
data, command, varlist, exclusion, exp, weights, options
): # pylint:disable=too-many-arguments
"""
Provides an alternative interface that mimics the code in acro.ado
Most notably the presence of a global variable called stata_acro.
"""
# global stata_acro
acro_outstr = parse_and_run(
data, command, varlist, exclusion, exp, weights, options
)
return acro_outstr
# --- Helper functions
def test_find_brace_contents():
"""Tests helper function
that extracts contents 'A B C'
of something specified via X(A B C)
on the stata command line.
"""
options = "by(grant_type) contents(mean sd inc_activity) suppress nototals"
res, substr = find_brace_contents("by", options)
assert res
assert substr == "grant_type"
res, substr = find_brace_contents("contents", options)
assert res
assert substr == "mean sd inc_activity"
res, substr = find_brace_contents("foo", options)
assert not res
assert substr == "foo not found"
def test_apply_stata_ifstmt(data):
"""Tests that if statements work for selection."""
ifstring = "year!=2013"
all_list = list(data["year"].unique())
smaller = apply_stata_ifstmt(ifstring, data)
all_list.remove(2013)
assert list(smaller["year"].unique()) == all_list
ifstring2 = "year != 2013 & year <2015"
all_list.remove(2015)
smaller2 = apply_stata_ifstmt(ifstring2, data)
assert list(smaller2["year"].unique()) == all_list
def test_apply_stata_expstmt(data):
"""Tests that in statements work for row selection."""
length = data.shape[0]
# use of f/F and l/L for first and last with specified row range
exp = "f/5"
smaller = apply_stata_expstmt(exp, data)
assert smaller.shape[0] == 5
assert (smaller.iloc[-1].fillna(0).values == data.iloc[4].fillna(0).values).all()
exp = "F/5"
smaller = apply_stata_expstmt(exp, data)
assert smaller.shape[0] == 5
assert (smaller.iloc[-1].fillna(0).values == data.iloc[4].fillna(0).values).all()
exp = "-6/l"
smaller = apply_stata_expstmt(exp, data)
assert smaller.shape[0] == 5
assert (
smaller.iloc[-1].fillna(0).values == data.iloc[length - 2].fillna(0).values
).all()
exp = "-6/L"
smaller = apply_stata_expstmt(exp, data)
assert smaller.shape[0] == 5
assert (
smaller.iloc[-1].fillna(0).values == data.iloc[length - 2].fillna(0).values
).all()
# invalid range should default to end of dataframe
exp = "500/450"
smaller = apply_stata_expstmt(exp, data)
assert smaller.shape[0] == length - 1 - 500
# -----acro management
def test_stata_acro_init():
"""
Tests creation of an acro object at the start of a session
For stata this gets held in a variable stata_acro
Which is initialsied to the string "empty" in the acro.ado file
Then should be pointed at a new acro instance.
"""
assert isinstance(stata_acro, str)
ret = dummy_acrohandler(
data, command="init", varlist="", exclusion="", exp="", weights="", options=""
)
assert (
ret == "acro analysis session created\n"
), f"wrong string for acro init: {ret}\n"
# assert isinstance(stata_acro,ACRO),f'wrong type for stata_acro:{type(stata_acro)}'
def test_stata_print_outputs(data):
"""Checks print_outputs gets called."""
ret = dummy_acrohandler(
data,
command="print_outputs",
varlist=" inc_activity inc_grants inc_donations total_costs",
exclusion="",
exp="",
weights="",
options="",
)
assert len(ret) == 0, "return string should be empty"
# ----main SDC functionality
def test_simple_table(data):
"""
Checks that the simple table command works as expected
Does via reference to direct call to pd.crosstab()
To make sure table specification is parsed correctly
acro SDC analysis is tested elsewhere.
"""
correct = pd.crosstab(
index=data["survivor"], columns=data["grant_type"]
).to_string()
ret = dummy_acrohandler(
data,
"table",
"survivor grant_type",
exclusion="",
exp="",
weights="",
options="nototals",
)
ret = ret.replace("NaN", "0")
ret = ret.replace(".0", "")
assert ret.split() == correct.split(), f"got\n{ret}\n expected\n{correct}"
def test_parse_table_details(data):
"""
Series of checks that the varlist and options are parsed correctly
by the helper function.
"""
varlist = ["survivor", "grant_type", "year"]
varnames = data.columns
options = "by(grant_type) contents(mean sd inc_activity) suppress nototals"
details = parse_table_details(varlist, varnames, options)
errstring = f" rows {details['rowvars']} should be ['grant_type','survivor']"
assert details["rowvars"] == ["grant_type", "survivor"], errstring
errstring = f" cols {details['colvars']} should be ['year','grant_type']"
assert details["colvars"] == ["year", "grant_type"], errstring
errstring = f" aggfunctions {details['aggfuncs']} should be ['mean','sd']"
assert details["aggfuncs"] == ["mean", "sd"], errstring
errstring = f" values {details['values']} should be ['inc_activity']"
assert details["values"] == ["inc_activity"], errstring
assert not details["totals"], "totals should be False"
assert details["suppress"], "suppress should be True"
def test_stata_probit(data):
"""Checks probit gets called correctly."""
ret = dummy_acrohandler(
data,
command="probit",
varlist=" survivor inc_activity inc_grants inc_donations total_costs",
exclusion="",
exp="",
weights="",
options="",
)
ret = ret.replace("\n", ",")
tokens = ret.split(",")
idx = tokens.index(" Df Residuals: ")
residuals = int(tokens[idx + 1])
assert residuals == 806, f"{residuals} should be 806"
idx = tokens.index(" Pseudo R-squ.: ")
rsquared = float(tokens[idx + 1])
assert rsquared == pytest.approx(0.208, 0.01)
def test_stata_linregress(data):
"""Checks linear regression called correctly."""
ret = dummy_acrohandler(
data,
command="regress",
varlist=" inc_activity inc_grants inc_donations total_costs",
exclusion="",
exp="",
weights="",
options="",
)
ret = ret.replace("\n", ",")
tokens = ret.split(",")
idx = tokens.index("Df Residuals: ")
residuals = int(tokens[idx + 1])
assert residuals == 807, f"{residuals} should be 807"
idx = tokens.index(" R-squared: ")
rsquared = float(tokens[idx + 1])
assert rsquared == pytest.approx(0.894, 0.001)
def test_unsupported_formatting_options(data):
"""Checks that user gets warning if they try to format table."""
format_string = "acro does not currently support table formatting commands."
correct = pd.crosstab(
index=data["survivor"], columns=data["grant_type"]
).to_string()
for bad_option in [
"cellwidth",
"csepwidth",
"stubwidth",
"scsepwidth",
"center",
"left",
]:
ret = dummy_acrohandler(
data,
"table",
"survivor grant_type",
exclusion="",
exp="",
weights="",
options=f"{bad_option} nototals",
)
rets = ret.split("\n", 1)
assert len(rets) == 2, "table should have warning prepended"
errmsg = f"first line {rets[0]} should be {format_string}"
assert rets[0] == format_string, errmsg
ret = rets[1]
ret = ret.replace("NaN", "0")
ret = ret.replace(".0", "")
assert ret.split() == correct.split(), f"got\n{ret}\n expected\n{correct}"
def test_stata_finalise(monkeypatch):
"""Checks finalise gets called correctly."""
monkeypatch.setattr("builtins.input", lambda _: "Let me have it")
ret = dummy_acrohandler(
data,
command="finalise",
varlist="",
exclusion="",
exp="",
weights="",
options="",
)
correct = "outputs and stata_out.json written\n"
assert ret == correct, f"returned string {ret} should be {correct}\n"
| 9,013
| 29.764505
| 88
|
py
|
ACRO
|
ACRO-main/test/__init__.py
| 0
| 0
| 0
|
py
|
|
ACRO
|
ACRO-main/docs/source/conf.py
|
# Configuration file for the Sphinx documentation builder.
#
# -- Path setup --------------------------------------------------------------
import os
import sys
sys.path.insert(0, os.path.abspath("../../"))
from acro.version import __version__
# -- Project information -----------------------------------------------------
project = "ACRO"
copyright = "2023, ACRO Project Team"
author = "ACRO Project Team"
release = __version__
# -- General configuration ---------------------------------------------------
extensions = [
"numpydoc",
"sphinx-prompt",
"sphinx.ext.autodoc",
"sphinx.ext.doctest",
"sphinx.ext.imgconverter",
"sphinx.ext.intersphinx",
"sphinx.ext.viewcode",
"sphinx_autopackagesummary",
"sphinx_issues",
"sphinx_rtd_theme",
]
exclude_patterns = []
html_static_path = ["_static"]
# -- Options for HTML output -------------------------------------------------
html_theme = "sphinx_rtd_theme"
html_theme_options = {"navigation_depth": 2}
# -- -------------------------------------------------------------------------
numpydoc_class_members_toctree = False
| 1,120
| 23.369565
| 78
|
py
|
ACRO
|
ACRO-main/acro/record.py
|
"""ACRO: Output storage and serialization."""
import datetime
import hashlib
import json
import logging
import os
import shutil
from pathlib import Path
from typing import Any
import pandas as pd
from pandas import DataFrame
from .version import __version__
logger = logging.getLogger("acro:records")
def load_outcome(outcome: dict) -> DataFrame:
"""Returns a DataFrame from an outcome dictionary.
Parameters
----------
outcome : dict
The outcome to load as a DataFrame.
"""
return pd.DataFrame.from_dict(outcome)
def load_output(path: str, output: list[str]) -> list[str] | list[DataFrame]:
"""Returns a loaded output.
Parameters
----------
path : str
The path to the output folder (with results.json).
output : list[str]
The output to load.
Returns
-------
list[str] | list[DataFrame]
The loaded output field.
"""
if len(output) < 1:
raise ValueError("error loading output")
loaded: list[DataFrame] = []
for filename in output:
_, ext = os.path.splitext(filename)
if ext == ".csv":
filename = os.path.normpath(f"{path}/{filename}")
loaded.append(pd.read_csv(filename))
if len(loaded) < 1: # output is path(s) to custom file(s)
return output
return loaded
class Record: # pylint: disable=too-many-instance-attributes,too-few-public-methods
"""Stores data related to a single output record.
Attributes
----------
uid : str
Unique identifier.
status : str
SDC status: {"pass", "fail", "review"}
output_type : str
Type of output, e.g., "regression"
properties : dict
Dictionary containing structured output data.
sdc : dict
Dictionary containing SDC results.
command : str
String representation of the operation performed.
summary : str
String summarising the ACRO checks.
outcome : DataFrame
DataFrame describing the details of ACRO checks.
output : Any
List of output DataFrames.
comments : list[str]
List of strings entered by the user to add comments to the output.
exception : str
Description of why an exception to fail/review should be granted.
timestamp : str
Time the record was created in ISO format.
"""
def __init__( # pylint: disable=too-many-arguments
self,
uid: str,
status: str,
output_type: str,
properties: dict,
sdc: dict,
command: str,
summary: str,
outcome: DataFrame,
output: list[str] | list[DataFrame],
comments: list[str] | None = None,
) -> None:
"""Constructs a new output record.
Parameters
----------
uid : str
Unique identifier.
status : str
SDC status: {"pass", "fail", "review"}
output_type : str
Type of output, e.g., "regression"
properties : dict
Dictionary containing structured output data.
sdc : dict
Dictionary containing SDC results.
command : str
String representation of the operation performed.
summary : str
String summarising the ACRO checks.
outcome : DataFrame
DataFrame describing the details of ACRO checks.
output : list[str] | list[DataFrame]
List of output DataFrames.
comments : list[str] | None, default None
List of strings entered by the user to add comments to the output.
"""
self.uid: str = uid
self.status: str = status
self.output_type: str = output_type
self.properties: dict = properties
self.sdc: dict = sdc
self.command: str = command
self.summary: str = summary
self.outcome: DataFrame = outcome
self.output: Any = output
self.comments: list[str] = [] if comments is None else comments
self.exception: str = ""
now = datetime.datetime.now()
self.timestamp: str = now.isoformat()
def serialize_output(self, path: str = "outputs") -> list[str]:
"""Serializes outputs.
Parameters
----------
path : str, default 'outputs'
Name of the folder that outputs are to be written.
Returns
-------
list[str]
List of filepaths of the written outputs.
"""
output: list[str] = []
# check if the outputs directory was already created
try: # pragma: no cover
os.makedirs(path)
logger.debug("Directory %s created successfully", path)
except FileExistsError:
logger.debug("Directory %s already exists", path)
# save each output DataFrame to a different csv
if all(isinstance(obj, DataFrame) for obj in self.output):
for i, data in enumerate(self.output):
filename = f"{self.uid}_{i}.csv"
output.append(filename)
filename = os.path.normpath(f"{path}/{filename}")
with open(filename, mode="w", newline="", encoding="utf-8") as file:
file.write(data.to_csv())
# move custom files to the output folder
if self.output_type == "custom":
for filename in self.output:
if os.path.exists(filename):
shutil.copy(filename, path)
output.append(Path(filename).name)
return output
def __str__(self) -> str:
"""Returns a string representation of a record.
Returns
-------
str
The record.
"""
return (
f"uid: {self.uid}\n"
f"status: {self.status}\n"
f"type: {self.output_type}\n"
f"properties: {self.properties}\n"
f"sdc: {self.sdc}\n"
f"command: {self.command}\n"
f"summary: {self.summary}\n"
f"outcome: {self.outcome}\n"
f"output: {self.output}\n"
f"timestamp: {self.timestamp}\n"
f"comments: {self.comments}\n"
f"exception: {self.exception}\n"
)
class Records:
"""Stores data related to a collection of output records."""
def __init__(self) -> None:
"""Constructs a new object for storing multiple records."""
self.results: dict[str, Record] = {}
self.output_id: int = 0
def add( # pylint: disable=too-many-arguments
self,
status: str,
output_type: str,
properties: dict,
sdc: dict,
command: str,
summary: str,
outcome: DataFrame,
output: list[str] | list[DataFrame],
comments: list[str] | None = None,
) -> None:
"""Adds an output to the results.
Parameters
----------
status : str
SDC status: {"pass", "fail", "review"}
output_type : str
Type of output, e.g., "regression"
properties : dict
Dictionary containing structured output data.
sdc : dict
Dictionary containing SDC results.
command : str
String representation of the operation performed.
summary : str
String summarising the ACRO checks.
outcome : DataFrame
DataFrame describing the details of ACRO checks.
output : list[str | list[DataFrame]
List of output DataFrames.
comments : list[str] | None, default None
List of strings entered by the user to add comments to the output.
"""
new = Record(
uid=f"output_{self.output_id}",
status=status,
output_type=output_type,
properties=properties,
sdc=sdc,
command=command,
summary=summary,
outcome=outcome,
output=output,
comments=comments,
)
self.results[new.uid] = new
self.output_id += 1
logger.info("add(): %s", new.uid)
def remove(self, key: str) -> None:
"""Removes an output from the results.
Parameters
----------
key : str
Key specifying which output to remove, e.g., 'output_0'.
"""
if key not in self.results:
raise ValueError(f"unable to remove {key}, key not found")
del self.results[key]
logger.info("remove(): %s removed", key)
def get(self, key: str) -> Record:
"""Returns a specified output from the results.
Parameters
----------
key : str
Key specifying which output to return, e.g., 'output_0'.
Returns
-------
Record
The requested output.
"""
logger.debug("get(): %s ", key)
return self.results[key]
def get_keys(self) -> list[str]:
"""Returns the list of available output keys.
Returns
-------
list[str]
List of output names.
"""
logger.debug("get_keys()")
return list(self.results.keys())
def get_index(self, index: int) -> Record:
"""Returns the output at the specified position.
Parameters
----------
index : int
Position of the output to return.
Returns
-------
Record
The requested output.
"""
logger.debug("get_index(): %s", index)
key = list(self.results.keys())[index]
return self.results[key]
def add_custom(self, filename: str, comment: str | None = None) -> None:
"""Adds an unsupported output to the results dictionary.
Parameters
----------
filename : str
The name of the file that will be added to the list of the outputs.
comment : str | None, default None
An optional comment.
"""
output = Record(
uid=f"output_{self.output_id}",
status="review",
output_type="custom",
properties={},
sdc={},
command="custom",
summary="review",
outcome=DataFrame(),
output=[os.path.normpath(filename)],
comments=None if comment is None else [comment],
)
self.results[output.uid] = output
logger.info("add_custom(): %s", output.uid)
def rename(self, old: str, new: str) -> None:
"""Rename an output.
Parameters
----------
old : str
The old name of the output.
new : str
The new name of the output.
"""
if old not in self.results:
raise ValueError(f"unable to rename {old}, key not found")
if new in self.results:
raise ValueError(f"unable to rename, {new} already exists")
self.results[new] = self.results[old]
self.results[new].uid = new
del self.results[old]
logger.info("rename_output(): %s renamed to %s", old, new)
def add_comments(self, output: str, comment: str) -> None:
"""Adds a comment to an output.
Parameters
----------
output : str
The name of the output.
comment : str
The comment.
"""
if output not in self.results:
raise ValueError(f"unable to find {output}, key not found")
self.results[output].comments.append(comment)
logger.info("a comment was added to %s", output)
def add_exception(self, output: str, reason: str) -> None:
"""Adds an exception request to an output.
Parameters
----------
output : str
The name of the output.
reason : str
The reason the output should be released.
"""
if output not in self.results:
raise ValueError(f"unable to add exception: {output} not found")
self.results[output].exception = reason
logger.info("exception request was added to %s", output)
def print(self) -> str:
"""Prints the current results.
Returns
-------
str
String representation of all outputs.
"""
logger.debug("print()")
outputs: str = ""
for _, record in self.results.items():
outputs += str(record) + "\n"
print(outputs)
return outputs
def validate_outputs(self) -> None:
"""Prompts researcher to complete any required fields."""
for _, record in self.results.items():
if record.status != "pass" and record.exception == "":
logger.info(
"\n%s\n"
"The status of the record above is: %s.\n"
"Please explain why an exception should be granted.\n",
str(record),
record.status,
)
record.exception = input("")
def finalise(self, path: str, ext: str) -> None:
"""Creates a results file for checking.
Parameters
----------
path : str
Name of a folder to save outputs.
ext : str
Extension of the results file. Valid extensions: {json, xlsx}.
"""
logger.debug("finalise()")
self.validate_outputs()
if ext == "json":
self.finalise_json(path)
elif ext == "xlsx":
self.finalise_excel(path)
else:
raise ValueError("Invalid file extension. Options: {json, xlsx}")
self.write_checksums(path)
logger.info("outputs written to: %s", path)
def finalise_json(self, path: str) -> None:
"""Writes outputs to a JSON file.
Parameters
----------
path : str
Name of a folder to save outputs.
"""
outputs: dict = {}
for key, val in self.results.items():
outputs[key] = {
"uid": val.uid,
"status": val.status,
"type": val.output_type,
"properties": val.properties,
"files": [],
"outcome": json.loads(val.outcome.to_json()),
"command": val.command,
"summary": val.summary,
"timestamp": val.timestamp,
"comments": val.comments,
"exception": val.exception,
}
files: list[str] = val.serialize_output(path)
for file in files:
outputs[key]["files"].append({"name": file, "sdc": val.sdc})
results: dict = {"version": __version__, "results": outputs}
filename: str = os.path.normpath(f"{path}/results.json")
with open(filename, "w", newline="", encoding="utf-8") as handle:
json.dump(results, handle, indent=4, sort_keys=False)
def finalise_excel(self, path: str) -> None:
"""Writes outputs to an excel spreadsheet.
Parameters
----------
path : str
Name of a folder to save outputs.
"""
filename: str = os.path.normpath(f"{path}/results.xlsx")
try: # check if the directory was already created
os.makedirs(path, exist_ok=True)
logger.debug("Directory %s created successfully", path)
except FileExistsError: # pragma: no cover
logger.debug("Directory %s already exists", path)
with pd.ExcelWriter( # pylint: disable=abstract-class-instantiated
filename, engine="openpyxl"
) as writer:
# description sheet
sheet = []
summary = []
command = []
for output_id, output in self.results.items():
if output.output_type == "custom":
continue # avoid writing custom outputs
sheet.append(output_id)
command.append(output.command)
summary.append(output.summary)
tmp_df = pd.DataFrame(
{"Sheet": sheet, "Command": command, "Summary": summary}
)
tmp_df.to_excel(writer, sheet_name="description", index=False, startrow=0)
# individual sheets
for output_id, output in self.results.items():
if output.output_type == "custom":
continue # avoid writing custom outputs
# command and summary
start = 0
tmp_df = pd.DataFrame(
[output.command, output.summary], index=["Command", "Summary"]
)
tmp_df.to_excel(writer, sheet_name=output_id, startrow=start)
# outcome
if output.outcome is not None:
output.outcome.to_excel(writer, sheet_name=output_id, startrow=4)
# output
for table in output.output:
start = 1 + writer.sheets[output_id].max_row
table.to_excel(writer, sheet_name=output_id, startrow=start)
def write_checksums(self, path: str) -> None:
"""Writes checksums for each file to checksums folder.
Parameters
----------
path : str
Name of a folder to save outputs.
"""
checksums: dict[str, str] = {}
for name in os.listdir(path):
filename = os.path.join(path, name)
if os.path.isfile(filename):
with open(filename, "rb") as file:
read = file.read()
checksums[name] = hashlib.sha256(read).hexdigest()
checksums_dir: str = os.path.normpath(f"{path}/checksums")
os.makedirs(checksums_dir, exist_ok=True)
for name, sha256 in checksums.items():
filename = os.path.join(checksums_dir, name + ".txt")
with open(filename, "w", encoding="utf-8") as file:
file.write(sha256)
def load_records(path: str) -> Records:
"""Loads outputs from a JSON file.
Parameters
----------
path : str
Name of an output folder containing results.json.
Returns
-------
Records
The loaded records.
"""
records = Records()
filename = os.path.normpath(f"{path}/results.json")
with open(filename, newline="", encoding="utf-8") as handle:
data = json.load(handle)
if data["version"] != __version__: # pragma: no cover
raise ValueError("error loading output")
for key, val in data["results"].items():
files: list[dict] = val["files"]
filenames: list = []
sdcs: list = []
for file in files:
filenames.append(file["name"])
sdcs.append(file["sdc"])
records.results[key] = Record(
uid=val["uid"],
status=val["status"],
output_type=val["type"],
properties=val["properties"],
sdc=sdcs[0],
command=val["command"],
summary=val["summary"],
outcome=load_outcome(val["outcome"]),
output=load_output(path, filenames),
comments=val["comments"],
)
records.results[key].exception = val["exception"]
records.results[key].timestamp = val["timestamp"]
return records
| 19,460
| 32.209898
| 86
|
py
|
ACRO
|
ACRO-main/acro/utils.py
|
"""ACRO: Utility Functions."""
import logging
from collections.abc import Callable
from inspect import FrameInfo, getframeinfo
import numpy as np
import pandas as pd
from pandas import DataFrame, Series
from statsmodels.iolib.table import SimpleTable
logger = logging.getLogger("acro")
AGGFUNC: dict[str, Callable] = {
"mean": np.mean,
"median": np.median,
"sum": np.sum,
"std": np.std,
"freq": np.size,
}
# aggregation function parameters
THRESHOLD: int = 10
SAFE_PRATIO_P: float = 0.1
SAFE_NK_N: int = 2
SAFE_NK_K: float = 0.9
CHECK_MISSING_VALUES: bool = False
def get_command(default: str, stack_list: list[FrameInfo]) -> str:
"""Returns the calling source line as a string.
Parameters
----------
default : str
Default string to return if unable to extract the stack.
stack_list : list[tuple]
A list of frame records for the caller's stack. The first entry in the
returned list represents the caller; the last entry represents the
outermost call on the stack.
Returns
-------
str
The calling source line.
"""
command: str = default
if len(stack_list) > 1:
code = getframeinfo(stack_list[1][0]).code_context
if code is not None:
command = "\n".join(code).strip()
logger.debug("command: %s", command)
return command
def get_summary_dataframes(results: list[SimpleTable]) -> list[DataFrame]:
"""Converts a list of SimpleTable objects to a list of DataFrame objects.
Parameters
----------
results : list[SimpleTable]
Results from fitting statsmodel.
Returns
-------
list[DataFrame]
List of DataFrame objects.
"""
tables: list[DataFrame] = []
for table in results:
table_df = pd.read_html(table.as_html(), header=0, index_col=0)[0]
tables.append(table_df)
return tables
def agg_threshold(vals: Series) -> bool:
"""Aggregation function that returns whether the number of contributors is
below a threshold.
Parameters
----------
vals : Series
Series to calculate the p percent value.
Returns
-------
bool
Whether the threshold rule is violated.
"""
return vals.count() < THRESHOLD
def agg_negative(vals: Series) -> bool:
"""Aggregation function that returns whether any values are negative.
Parameters
----------
vals : Series
Series to check for negative values.
Returns
-------
bool
Whether a negative value was found.
"""
return vals.min() < 0
def agg_missing(vals: Series) -> bool:
"""Aggregation function that returns whether any values are missing.
Parameters
----------
vals : Series
Series to check for missing values.
Returns
-------
bool
Whether a missing value was found.
"""
return vals.isna().sum() != 0
def agg_p_percent(vals: Series) -> bool:
"""Aggregation function that returns whether the p percent rule is violated.
That is, the uncertainty (as a fraction) of the estimate that the second
highest respondent can make of the highest value. Assuming there are n
items in the series, they are first sorted in descending order and then we
calculate the value p = (sum - N-2 highest values)/highest value. If all
values are 0, returns 1.
Parameters
----------
vals : Series
Series to calculate the p percent value.
Returns
-------
bool
whether the p percent rule is violated.
"""
sorted_vals = vals.sort_values(ascending=False)
total: float = sorted_vals.sum()
sub_total = total - sorted_vals.iloc[0] - sorted_vals.iloc[1]
p_val: float = sub_total / sorted_vals.iloc[0] if total > 0 else 1
return p_val < SAFE_PRATIO_P
def agg_nk(vals: Series) -> bool:
"""Aggregation function that returns whether the top n items account for
more than k percent of the total.
Parameters
----------
vals : Series
Series to calculate the nk value.
Returns
-------
bool
Whether the nk rule is violated.
"""
total: float = vals.sum()
if total > 0:
sorted_vals = vals.sort_values(ascending=False)
n_total = sorted_vals.iloc[0:SAFE_NK_N].sum()
return (n_total / total) > SAFE_NK_K
return False
def apply_suppression(
table: DataFrame, masks: dict[str, DataFrame]
) -> tuple[DataFrame, DataFrame]:
"""Applies suppression to a table.
Parameters
----------
table : DataFrame
Table to apply suppression.
masks : dict[str, DataFrame]
Dictionary of tables specifying suppression masks for application.
Returns
-------
DataFrame
Table to output with any suppression applied.
DataFrame
Table with outcomes of suppression checks.
"""
logger.debug("apply_suppression()")
safe_df = table.copy()
outcome_df = DataFrame().reindex_like(table)
outcome_df.fillna("", inplace=True)
# don't apply suppression if negatives are present
if "negative" in masks:
mask = masks["negative"]
outcome_df[mask.values] = "negative"
# don't apply suppression if missing values are present
elif "missing" in masks:
mask = masks["missing"]
outcome_df[mask.values] = "missing"
# apply suppression masks
else:
for name, mask in masks.items():
try:
safe_df[mask.values] = np.NaN
tmp_df = DataFrame().reindex_like(outcome_df)
tmp_df.fillna("", inplace=True)
tmp_df[mask.values] = name + "; "
outcome_df += tmp_df
except TypeError:
logger.warning("problem mask %s is not binary", name)
outcome_df = outcome_df.replace({"": "ok"})
logger.info("outcome_df:\n%s", outcome_df)
return safe_df, outcome_df
def get_table_sdc(masks: dict[str, DataFrame], suppress: bool) -> dict:
"""Returns the SDC dictionary using the suppression masks.
Parameters
----------
masks : dict[str, DataFrame]
Dictionary of tables specifying suppression masks for application.
suppress : bool
Whether suppression has been applied.
"""
# summary of cells to be suppressed
sdc: dict = {"summary": {"suppressed": suppress}, "cells": {}}
sdc["summary"]["negative"] = 0
sdc["summary"]["missing"] = 0
sdc["summary"]["threshold"] = 0
sdc["summary"]["p-ratio"] = 0
sdc["summary"]["nk-rule"] = 0
for name, mask in masks.items():
sdc["summary"][name] = int(mask.to_numpy().sum())
# positions of cells to be suppressed
sdc["cells"]["negative"] = []
sdc["cells"]["missing"] = []
sdc["cells"]["threshold"] = []
sdc["cells"]["p-ratio"] = []
sdc["cells"]["nk-rule"] = []
for name, mask in masks.items():
true_positions = np.column_stack(np.where(mask.values))
for pos in true_positions:
row_index, col_index = pos
sdc["cells"][name].append([int(row_index), int(col_index)])
return sdc
def get_summary(sdc: dict) -> tuple[str, str]:
"""Returns the status and summary of the suppression masks.
Parameters
----------
sdc : dict
Properties of the SDC checks.
Returns
-------
str
Status: {"review", "fail", "pass"}.
str
Summary of the suppression masks.
"""
status: str = "pass"
summary: str = ""
sdc_summary = sdc["summary"]
sup: str = "suppressed" if sdc_summary["suppressed"] else "may need suppressing"
if sdc_summary["negative"] > 0:
summary += "negative values found"
status = "review"
elif sdc_summary["missing"] > 0:
summary += "missing values found"
status = "review"
else:
if sdc_summary["threshold"] > 0:
summary += f"threshold: {sdc_summary['threshold']} cells {sup}; "
status = "fail"
if sdc_summary["p-ratio"] > 0:
summary += f"p-ratio: {sdc_summary['p-ratio']} cells {sup}; "
status = "fail"
if sdc_summary["nk-rule"] > 0:
summary += f"nk-rule: {sdc_summary['nk-rule']} cells {sup}; "
status = "fail"
if summary != "":
summary = f"{status}; {summary}"
else:
summary = status
logger.info("get_summary(): %s", summary)
return status, summary
def get_aggfunc(aggfunc: str | None) -> Callable | None:
"""Checks whether an aggregation function is allowed and returns the
appropriate function.
Parameters
----------
aggfunc : str | None
Name of the aggregation function to apply.
Returns
-------
Callable | None
The aggregation function to apply.
"""
logger.debug("get_aggfunc()")
func = None
if aggfunc is not None:
if not isinstance(aggfunc, str) or aggfunc not in AGGFUNC:
raise ValueError(f"aggfunc {aggfunc} must be: {', '.join(AGGFUNC.keys())}")
func = AGGFUNC[aggfunc]
logger.debug("aggfunc: %s", func)
return func
def get_aggfuncs(
aggfuncs: str | list[str] | None,
) -> Callable | list[Callable] | None:
"""Checks whether a list of aggregation functions is allowed and returns
the appropriate functions.
Parameters
----------
aggfuncs : str | list[str] | None
List of names of the aggregation functions to apply.
Returns
-------
Callable | list[Callable] | None
The aggregation functions to apply.
"""
logger.debug("get_aggfuncs()")
if aggfuncs is None:
logger.debug("aggfuncs: None")
return None
if isinstance(aggfuncs, str):
function = get_aggfunc(aggfuncs)
logger.debug("aggfuncs: %s", function)
return function
if isinstance(aggfuncs, list):
functions: list[Callable] = []
for function_name in aggfuncs:
function = get_aggfunc(function_name)
if function is not None:
functions.append(function)
logger.debug("aggfuncs: %s", functions)
if len(functions) < 1:
raise ValueError(f"invalid aggfuncs: {aggfuncs}")
return functions
raise ValueError("aggfuncs must be: either str or list[str]")
| 10,320
| 27.991573
| 87
|
py
|
ACRO
|
ACRO-main/acro/acro.py
|
"""ACRO: Automatic Checking of Research Outputs."""
import json
import logging
import os
import pathlib
import warnings
from collections.abc import Callable
from inspect import stack
import pandas as pd
import statsmodels.api as sm
import statsmodels.formula.api as smf
import yaml
from pandas import DataFrame
from statsmodels.discrete.discrete_model import BinaryResultsWrapper
from statsmodels.iolib.table import SimpleTable
from statsmodels.regression.linear_model import RegressionResultsWrapper
from . import utils
from .record import Records
from .version import __version__
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger("acro")
warnings.simplefilter(action="ignore", category=FutureWarning)
class ACRO:
"""ACRO: Automatic Checking of Research Outputs.
Attributes
----------
config : dict
Safe parameters and their values.
results : Records
The current outputs including the results of checks.
output_id : int
The next identifier to be assigned to an output.
Examples
--------
>>> acro = ACRO()
>>> results = acro.ols(y, x)
>>> results.summary()
>>> acro.finalise("MYFOLDER", "json")
"""
def __init__(self, config: str = "default", suppress: bool = False) -> None:
"""Constructs a new ACRO object and reads parameters from config.
Parameters
----------
config : str
Name of a yaml configuration file with safe parameters.
suppress : bool, default False
Whether to automatically apply suppression.
"""
self.config: dict = {}
self.results: Records = Records()
self.suppress: bool = suppress
path = pathlib.Path(__file__).with_name(config + ".yaml")
logger.debug("path: %s", path)
with open(path, encoding="utf-8") as handle:
self.config = yaml.load(handle, Loader=yaml.loader.SafeLoader)
logger.info("version: %s", __version__)
logger.info("config: %s", self.config)
logger.info("automatic suppression: %s", self.suppress)
# set globals needed for aggregation functions
utils.THRESHOLD = self.config["safe_threshold"]
utils.SAFE_PRATIO_P = self.config["safe_pratio_p"]
utils.SAFE_NK_N = self.config["safe_nk_n"]
utils.SAFE_NK_K = self.config["safe_nk_k"]
utils.CHECK_MISSING_VALUES = self.config["check_missing_values"]
def finalise(self, path: str = "outputs", ext="json") -> Records:
"""Creates a results file for checking.
Parameters
----------
path : str
Name of a folder to save outputs.
ext : str
Extension of the results file. Valid extensions: {json, xlsx}.
Returns
-------
Records
Object storing the outputs.
"""
self.results.finalise(path, ext)
config_filename: str = os.path.normpath(f"{path}/config.json")
with open(config_filename, "w", newline="", encoding="utf-8") as file:
json.dump(self.config, file, indent=4, sort_keys=False)
return self.results
def remove_output(self, key: str) -> None:
"""Removes an output from the results.
Parameters
----------
key : str
Key specifying which output to remove, e.g., 'output_0'.
"""
self.results.remove(key)
def print_outputs(self) -> str:
"""Prints the current results dictionary.
Returns
-------
str
String representation of all outputs.
"""
return self.results.print()
def custom_output(self, filename: str, comment: str = "") -> None:
"""Adds an unsupported output to the results dictionary.
Parameters
----------
filename : str
The name of the file that will be added to the list of the outputs.
comment : str
An optional comment.
"""
self.results.add_custom(filename, comment)
def crosstab( # pylint: disable=too-many-arguments,too-many-locals
self,
index,
columns,
values=None,
rownames=None,
colnames=None,
aggfunc=None,
margins: bool = False,
margins_name: str = "All",
dropna: bool = True,
normalize=False,
) -> DataFrame:
"""Compute a simple cross tabulation of two (or more) factors. By
default, computes a frequency table of the factors unless an array of
values and an aggregation function are passed.
Parameters
----------
index : array-like, Series, or list of arrays/Series
Values to group by in the rows.
columns : array-like, Series, or list of arrays/Series
Values to group by in the columns.
values : array-like, optional
Array of values to aggregate according to the factors.
Requires `aggfunc` be specified.
rownames : sequence, default None
If passed, must match number of row arrays passed.
colnames : sequence, default None
If passed, must match number of column arrays passed.
aggfunc : str, optional
If specified, requires `values` be specified as well.
margins : bool, default False
Add row/column margins (subtotals).
margins_name : str, default 'All'
Name of the row/column that will contain the totals
when margins is True.
dropna : bool, default True
Do not include columns whose entries are all NaN.
normalize : bool, {'all', 'index', 'columns'}, or {0,1}, default False
Normalize by dividing all values by the sum of values.
- If passed 'all' or `True`, will normalize over all values.
- If passed 'index' will normalize over each row.
- If passed 'columns' will normalize over each column.
- If margins is `True`, will also normalize margin values.
Returns
-------
DataFrame
Cross tabulation of the data.
"""
logger.debug("crosstab()")
command: str = utils.get_command("crosstab()", stack())
# convert [list of] string to [list of] function
aggfunc = utils.get_aggfuncs(aggfunc)
# requested table
table: DataFrame = pd.crosstab( # type: ignore
index,
columns,
values,
rownames,
colnames,
aggfunc,
margins,
margins_name,
dropna,
normalize,
)
# suppression masks to apply based on the following checks
masks: dict[str, DataFrame] = {}
if aggfunc is not None:
# create lists with single entry for when there is only one aggfunc
freq_funcs: list[Callable] = [utils.AGGFUNC["freq"]]
neg_funcs: list[Callable] = [utils.agg_negative]
pperc_funcs: list[Callable] = [utils.agg_p_percent]
nk_funcs: list[Callable] = [utils.agg_nk]
missing_funcs: list[Callable] = [utils.agg_missing]
# then expand them to deal with extra columns as needed
if isinstance(aggfunc, list):
num = len(aggfunc)
freq_funcs.extend([utils.AGGFUNC["freq"] for i in range(1, num)])
neg_funcs.extend([utils.agg_negative for i in range(1, num)])
pperc_funcs.extend([utils.agg_p_percent for i in range(1, num)])
nk_funcs.extend([utils.agg_nk for i in range(1, num)])
missing_funcs.extend([utils.agg_missing for i in range(1, num)])
# threshold check- doesn't matter what we pass for value
t_values = pd.crosstab( # type: ignore
index,
columns,
values=index,
rownames=rownames,
colnames=colnames,
aggfunc=freq_funcs,
margins=margins,
margins_name=margins_name,
dropna=dropna,
normalize=normalize,
)
t_values = t_values < utils.THRESHOLD
masks["threshold"] = t_values
# check for negative values -- currently unsupported
negative = pd.crosstab( # type: ignore
index, columns, values, aggfunc=neg_funcs, margins=margins
)
if negative.to_numpy().sum() > 0:
masks["negative"] = negative
# p-percent check
masks["p-ratio"] = pd.crosstab( # type: ignore
index, columns, values, aggfunc=pperc_funcs, margins=margins
)
# nk values check
masks["nk-rule"] = pd.crosstab( # type: ignore
index, columns, values, aggfunc=nk_funcs, margins=margins
)
# check for missing values -- currently unsupported
if utils.CHECK_MISSING_VALUES:
masks["missing"] = pd.crosstab( # type: ignore
index, columns, values, aggfunc=missing_funcs, margins=margins
)
else:
# threshold check- doesn't matter what we pass for value
t_values = pd.crosstab( # type: ignore
index,
columns,
values=None,
rownames=rownames,
colnames=colnames,
aggfunc=None,
margins=margins,
margins_name=margins_name,
dropna=dropna,
normalize=normalize,
)
t_values = t_values < utils.THRESHOLD
masks["threshold"] = t_values
# pd.crosstab returns nan for an empty cell
for name, mask in masks.items():
mask.fillna(value=1, inplace=True)
mask = mask.astype(int)
mask.replace({0: False, 1: True}, inplace=True)
masks[name] = mask
# build the sdc dictionary
sdc: dict = utils.get_table_sdc(masks, self.suppress)
# get the status and summary
status, summary = utils.get_summary(sdc)
# apply the suppression
safe_table, outcome = utils.apply_suppression(table, masks)
if self.suppress:
table = safe_table
# record output
self.results.add(
status=status,
output_type="table",
properties={"method": "crosstab"},
sdc=sdc,
command=command,
summary=summary,
outcome=outcome,
output=[table],
)
return table
def pivot_table( # pylint: disable=too-many-arguments,too-many-locals
self,
data: DataFrame,
values=None,
index=None,
columns=None,
aggfunc="mean",
fill_value=None,
margins: bool = False,
dropna: bool = True,
margins_name: str = "All",
observed: bool = False,
sort: bool = True,
) -> DataFrame:
"""Create a spreadsheet-style pivot table as a DataFrame.
The levels in the pivot table will be stored in MultiIndex objects
(hierarchical indexes) on the index and columns of the result
DataFrame.
Parameters
----------
data : DataFrame
The DataFrame to operate on.
values : column, optional
Column to aggregate, optional.
index : column, Grouper, array, or list of the previous
If an array is passed, it must be the same length as the data. The
list can contain any of the other types (except list). Keys to
group by on the pivot table index. If an array is passed, it is
being used as the same manner as column values.
columns : column, Grouper, array, or list of the previous
If an array is passed, it must be the same length as the data. The
list can contain any of the other types (except list). Keys to
group by on the pivot table column. If an array is passed, it is
being used as the same manner as column values.
aggfunc : str | list[str], default 'mean'
If list of strings passed, the resulting pivot table will have
hierarchical columns whose top level are the function names
(inferred from the function objects themselves).
fill_value : scalar, default None
Value to replace missing values with (in the resulting pivot table,
after aggregation).
margins : bool, default False
Add all row / columns (e.g. for subtotal / grand totals).
dropna : bool, default True
Do not include columns whose entries are all NaN.
margins_name : str, default 'All'
Name of the row / column that will contain the totals when margins
is True.
observed : bool, default False
This only applies if any of the groupers are Categoricals. If True:
only show observed values for categorical groupers. If False: show
all values for categorical groupers.
sort : bool, default True
Specifies if the result should be sorted.
Returns
-------
DataFrame
Cross tabulation of the data.
"""
logger.debug("pivot_table()")
command: str = utils.get_command("pivot_table()", stack())
aggfunc = utils.get_aggfuncs(aggfunc) # convert string(s) to function(s)
n_agg: int = 1 if not isinstance(aggfunc, list) else len(aggfunc)
# requested table
table: DataFrame = pd.pivot_table( # pylint: disable=too-many-function-args
data,
values,
index,
columns,
aggfunc,
fill_value,
margins,
dropna,
margins_name,
observed,
sort,
)
# suppression masks to apply based on the following checks
masks: dict[str, DataFrame] = {}
# threshold check
agg = [utils.agg_threshold] * n_agg if n_agg > 1 else utils.agg_threshold
t_values = pd.pivot_table( # type: ignore
data, values, index, columns, aggfunc=agg
)
masks["threshold"] = t_values
if aggfunc is not None:
# check for negative values -- currently unsupported
agg = [utils.agg_negative] * n_agg if n_agg > 1 else utils.agg_negative
negative = pd.pivot_table( # type: ignore
data, values, index, columns, aggfunc=agg
)
if negative.to_numpy().sum() > 0:
masks["negative"] = negative
# p-percent check
agg = [utils.agg_p_percent] * n_agg if n_agg > 1 else utils.agg_p_percent
masks["p-ratio"] = pd.pivot_table( # type: ignore
data, values, index, columns, aggfunc=agg
)
# nk values check
agg = [utils.agg_nk] * n_agg if n_agg > 1 else utils.agg_nk
masks["nk-rule"] = pd.pivot_table( # type: ignore
data, values, index, columns, aggfunc=agg
)
# check for missing values -- currently unsupported
if utils.CHECK_MISSING_VALUES:
agg = [utils.agg_missing] * n_agg if n_agg > 1 else utils.agg_missing
masks["missing"] = pd.pivot_table( # type: ignore
data, values, index, columns, aggfunc=agg
)
# build the sdc dictionary
sdc: dict = utils.get_table_sdc(masks, self.suppress)
# get the status and summary
status, summary = utils.get_summary(sdc)
# apply the suppression
safe_table, outcome = utils.apply_suppression(table, masks)
if self.suppress:
table = safe_table
# record output
self.results.add(
status=status,
output_type="table",
properties={"method": "pivot_table"},
sdc=sdc,
command=command,
summary=summary,
outcome=outcome,
output=[table],
)
return table
def __check_model_dof(self, name: str, model) -> tuple[str, str, float]:
"""Check model DOF.
Parameters
----------
name : str
The name of the model.
model
A statsmodels model.
Returns
-------
str
Status: {"review", "fail", "pass"}.
str
Summary of the check.
float
The degrees of freedom.
"""
status = "fail"
dof: int = model.df_resid
threshold: int = self.config["safe_dof_threshold"]
if dof < threshold:
summary = f"fail; dof={dof} < {threshold}"
warnings.warn(f"Unsafe {name}: {summary}", stacklevel=8)
else:
status = "pass"
summary = f"pass; dof={dof} >= {threshold}"
logger.info("%s() outcome: %s", name, summary)
return status, summary, float(dof)
def ols( # pylint: disable=too-many-locals
self, endog, exog=None, missing="none", hasconst=None, **kwargs
) -> RegressionResultsWrapper:
"""Fits Ordinary Least Squares Regression.
Parameters
----------
endog : array_like
A 1-d endogenous response variable. The dependent variable.
exog : array_like
A nobs x k array where `nobs` is the number of observations and `k`
is the number of regressors. An intercept is not included by
default and should be added by the user.
missing : str
Available options are 'none', 'drop', and 'raise'. If 'none', no
nan checking is done. If 'drop', any observations with nans are
dropped. If 'raise', an error is raised. Default is 'none'.
hasconst : None or bool
Indicates whether the RHS includes a user-supplied constant. If
True, a constant is not checked for and k_constant is set to 1 and
all result statistics are calculated as if a constant is present.
If False, a constant is not checked for and k_constant is set to 0.
**kwargs
Extra arguments that are used to set model properties when using
the formula interface.
Returns
-------
RegressionResultsWrapper
Results.
"""
logger.debug("ols()")
command: str = utils.get_command("ols()", stack())
model = sm.OLS(endog, exog=exog, missing=missing, hasconst=hasconst, **kwargs)
results = model.fit()
status, summary, dof = self.__check_model_dof("ols", model)
tables: list[SimpleTable] = results.summary().tables
self.results.add(
status=status,
output_type="regression",
properties={"method": "ols", "dof": dof},
sdc={},
command=command,
summary=summary,
outcome=DataFrame(),
output=utils.get_summary_dataframes(tables),
)
return results
def olsr( # pylint: disable=too-many-locals,keyword-arg-before-vararg
self, formula, data, subset=None, drop_cols=None, *args, **kwargs
) -> RegressionResultsWrapper:
"""Fits Ordinary Least Squares Regression from a formula and dataframe.
Parameters
----------
formula : str or generic Formula object
The formula specifying the model.
data : array_like
The data for the model. See Notes.
subset : array_like
An array-like object of booleans, integers, or index values that
indicate the subset of df to use in the model. Assumes df is a
`pandas.DataFrame`.
drop_cols : array_like
Columns to drop from the design matrix. Cannot be used to
drop terms involving categoricals.
*args
Additional positional argument that are passed to the model.
**kwargs
These are passed to the model with one exception. The
``eval_env`` keyword is passed to patsy. It can be either a
:class:`patsy:patsy.EvalEnvironment` object or an integer
indicating the depth of the namespace to use. For example, the
default ``eval_env=0`` uses the calling namespace. If you wish
to use a "clean" environment set ``eval_env=-1``.
Returns
-------
RegressionResultsWrapper
Results.
Notes
-----
data must define __getitem__ with the keys in the formula terms
args and kwargs are passed on to the model instantiation. E.g.,
a numpy structured or rec array, a dictionary, or a pandas DataFrame.
"""
logger.debug("olsr()")
command: str = utils.get_command("olsr()", stack())
model = smf.ols(
formula=formula,
data=data,
subset=subset,
drop_cols=drop_cols,
*args,
**kwargs,
)
results = model.fit()
status, summary, dof = self.__check_model_dof("olsr", model)
tables: list[SimpleTable] = results.summary().tables
self.results.add(
status=status,
output_type="regression",
properties={"method": "olsr", "dof": dof},
sdc={},
command=command,
summary=summary,
outcome=DataFrame(),
output=utils.get_summary_dataframes(tables),
)
return results
def logit( # pylint: disable=too-many-arguments,too-many-locals
self,
endog,
exog,
missing: str | None = None,
check_rank: bool = True,
) -> BinaryResultsWrapper:
"""Fits Logit model.
Parameters
----------
endog : array_like
A 1-d endogenous response variable. The dependent variable.
exog : array_like
A nobs x k array where nobs is the number of observations and k is
the number of regressors. An intercept is not included by default
and should be added by the user.
missing : str | None
Available options are ‘none’, ‘drop’, and ‘raise’. If ‘none’, no
nan checking is done. If ‘drop’, any observations with nans are
dropped. If ‘raise’, an error is raised. Default is ‘none’.
check_rank : bool
Check exog rank to determine model degrees of freedom. Default is
True. Setting to False reduces model initialization time when
exog.shape[1] is large.
Returns
-------
BinaryResultsWrapper
Results.
"""
logger.debug("logit()")
command: str = utils.get_command("logit()", stack())
model = sm.Logit(endog, exog, missing=missing, check_rank=check_rank)
results = model.fit()
status, summary, dof = self.__check_model_dof("logit", model)
tables: list[SimpleTable] = results.summary().tables
self.results.add(
status=status,
output_type="regression",
properties={"method": "logit", "dof": dof},
sdc={},
command=command,
summary=summary,
outcome=DataFrame(),
output=utils.get_summary_dataframes(tables),
)
return results
def logitr( # pylint: disable=too-many-locals,keyword-arg-before-vararg
self, formula, data, subset=None, drop_cols=None, *args, **kwargs
) -> RegressionResultsWrapper:
"""Fits Logit model from a formula and dataframe.
Parameters
----------
formula : str or generic Formula object
The formula specifying the model.
data : array_like
The data for the model. See Notes.
subset : array_like
An array-like object of booleans, integers, or index values that
indicate the subset of df to use in the model. Assumes df is a
`pandas.DataFrame`.
drop_cols : array_like
Columns to drop from the design matrix. Cannot be used to
drop terms involving categoricals.
*args
Additional positional argument that are passed to the model.
**kwargs
These are passed to the model with one exception. The
``eval_env`` keyword is passed to patsy. It can be either a
:class:`patsy:patsy.EvalEnvironment` object or an integer
indicating the depth of the namespace to use. For example, the
default ``eval_env=0`` uses the calling namespace. If you wish
to use a "clean" environment set ``eval_env=-1``.
Returns
-------
RegressionResultsWrapper
Results.
Notes
-----
data must define __getitem__ with the keys in the formula terms
args and kwargs are passed on to the model instantiation. E.g.,
a numpy structured or rec array, a dictionary, or a pandas DataFrame.
"""
logger.debug("logitr()")
command: str = utils.get_command("logitr()", stack())
model = smf.logit(
formula=formula,
data=data,
subset=subset,
drop_cols=drop_cols,
*args,
**kwargs,
)
results = model.fit()
status, summary, dof = self.__check_model_dof("logitr", model)
tables: list[SimpleTable] = results.summary().tables
self.results.add(
status=status,
output_type="regression",
properties={"method": "logitr", "dof": dof},
sdc={},
command=command,
summary=summary,
outcome=DataFrame(),
output=utils.get_summary_dataframes(tables),
)
return results
def probit( # pylint: disable=too-many-arguments,too-many-locals
self,
endog,
exog,
missing: str | None = None,
check_rank: bool = True,
) -> BinaryResultsWrapper:
"""Fits Probit model.
Parameters
----------
endog : array_like
A 1-d endogenous response variable. The dependent variable.
exog : array_like
A nobs x k array where nobs is the number of observations and k is
the number of regressors. An intercept is not included by default
and should be added by the user.
missing : str | None
Available options are ‘none’, ‘drop’, and ‘raise’. If ‘none’, no
nan checking is done. If ‘drop’, any observations with nans are
dropped. If ‘raise’, an error is raised. Default is ‘none’.
check_rank : bool
Check exog rank to determine model degrees of freedom. Default is
True. Setting to False reduces model initialization time when
exog.shape[1] is large.
Returns
-------
BinaryResultsWrapper
Results.
"""
logger.debug("probit()")
command: str = utils.get_command("probit()", stack())
model = sm.Probit(endog, exog, missing=missing, check_rank=check_rank)
results = model.fit()
status, summary, dof = self.__check_model_dof("probit", model)
tables: list[SimpleTable] = results.summary().tables
self.results.add(
status=status,
output_type="regression",
properties={"method": "probit", "dof": dof},
sdc={},
command=command,
summary=summary,
outcome=DataFrame(),
output=utils.get_summary_dataframes(tables),
)
return results
def probitr( # pylint: disable=too-many-locals,keyword-arg-before-vararg
self, formula, data, subset=None, drop_cols=None, *args, **kwargs
) -> RegressionResultsWrapper:
"""Fits Probit model from a formula and dataframe.
Parameters
----------
formula : str or generic Formula object
The formula specifying the model.
data : array_like
The data for the model. See Notes.
subset : array_like
An array-like object of booleans, integers, or index values that
indicate the subset of df to use in the model. Assumes df is a
`pandas.DataFrame`.
drop_cols : array_like
Columns to drop from the design matrix. Cannot be used to
drop terms involving categoricals.
*args
Additional positional argument that are passed to the model.
**kwargs
These are passed to the model with one exception. The
``eval_env`` keyword is passed to patsy. It can be either a
:class:`patsy:patsy.EvalEnvironment` object or an integer
indicating the depth of the namespace to use. For example, the
default ``eval_env=0`` uses the calling namespace. If you wish
to use a "clean" environment set ``eval_env=-1``.
Returns
-------
RegressionResultsWrapper
Results.
Notes
-----
data must define __getitem__ with the keys in the formula terms
args and kwargs are passed on to the model instantiation. E.g.,
a numpy structured or rec array, a dictionary, or a pandas DataFrame.
"""
logger.debug("probitr()")
command: str = utils.get_command("probitr()", stack())
model = smf.probit(
formula=formula,
data=data,
subset=subset,
drop_cols=drop_cols,
*args,
**kwargs,
)
results = model.fit()
status, summary, dof = self.__check_model_dof("probitr", model)
tables: list[SimpleTable] = results.summary().tables
self.results.add(
status=status,
output_type="regression",
properties={"method": "probitr", "dof": dof},
sdc={},
command=command,
summary=summary,
outcome=DataFrame(),
output=utils.get_summary_dataframes(tables),
)
return results
def rename_output(self, old: str, new: str) -> None:
"""Rename an output.
Parameters
----------
old : str
The old name of the output.
new : str
The new name of the output.
"""
self.results.rename(old, new)
def add_comments(self, output: str, comment: str) -> None:
"""Adds a comment to an output.
Parameters
----------
output : str
The name of the output.
comment : str
The comment.
"""
self.results.add_comments(output, comment)
def add_exception(self, output: str, reason: str) -> None:
"""Adds an exception request to an output.
Parameters
----------
output : str
The name of the output.
reason : str
The comment.
"""
self.results.add_exception(output, reason)
def add_constant(data, prepend: bool = True, has_constant: str = "skip"):
"""Add a column of ones to an array.
Parameters
----------
data : array_like
A column-ordered design matrix.
prepend : bool
If true, the constant is in the first column. Else the constant is
appended (last column).
has_constant : str {'raise', 'add', 'skip'}
Behavior if data already has a constant. The default will return
data without adding another constant. If 'raise', will raise an
error if any column has a constant value. Using 'add' will add a
column of 1s if a constant column is present.
Returns
-------
array_like
The original values with a constant (column of ones) as the first
or last column. Returned value type depends on input type.
Notes
-----
When the input is a pandas Series or DataFrame, the added column's name
is 'const'.
"""
return sm.add_constant(data, prepend=prepend, has_constant=has_constant)
| 32,438
| 36.243398
| 86
|
py
|
ACRO
|
ACRO-main/acro/version.py
|
"""ACRO version number."""
__version__ = "0.4.2"
| 49
| 15.666667
| 26
|
py
|
ACRO
|
ACRO-main/acro/__init__.py
|
"""ACRO."""
from .acro import *
| 32
| 10
| 19
|
py
|
ACRO
|
ACRO-main/stata/acro_stata_parser.py
|
# file with commands to manage the stata-acro interface
import pandas as pd
from acro import ACRO, add_constant
def apply_stata_ifstmt(raw: str, df: pd.DataFrame) -> pd.DataFrame:
if len(raw) == 0:
return df
else:
# add braces aroubd each clause- keeping any in the original
raw = "( " + raw + ")"
raw = raw.replace("&", ") & (")
raw = raw.replace("|", ") | (")
# put spaces around operators to ease parsing
for operator in [">", "<", "==", ">=", "<=", "!="]:
raw = raw.replace(operator, " " + operator + " ")
# replace variable names with df["varname"]
for vname in df.columns:
raw = raw.replace(vname, 'df["' + vname + '"]')
# print(raw)
# apply exclusion
df2 = df[eval(raw)]
return df2
def apply_stata_expstmt(raw: str, df: pd.DataFrame) -> pd.DataFrame:
# stata allows f and F for first item and l/L for last
last = len(df) - 1
token = raw.split("/")
# first index
if token[0] == "f" or token[0] == "F":
start = 0
else:
start = int(token[0])
if start < 0:
start = last + 1 + start
# last
if "/" not in raw:
end = last
else:
if token[1] == "l" or token[1] == "L":
token[1] = last
end = int(token[1])
if end < 0:
end = last + 1 + end
# enforce start <=end
if start > end:
end = last
return df.iloc[start:end]
def find_brace_contents(word: str, raw: str) -> (bool, str):
idx = raw.find(word)
if idx == -1:
return False, f"{word} not found"
idx += len(word) + 1
substr = ""
while raw[idx] != ")" and idx < len(raw):
substr += raw[idx]
idx += 1
if idx == len(raw):
return False, "phrase nor completed"
else:
return True, substr
def parse_table_details(varlist: list, varnames: list, options: str) -> dict:
"""Function to parse stata-13 style table calls
Note this is not for latest version of stata, syntax here:
https://www.stata.com/manuals13/rtable.pdf
>> table rowvar [colvar [supercolvar] [if] [in] [weight] [, options].
"""
details = {"errmsg": ""}
details["rowvars"] = [varlist.pop(0)]
details["colvars"] = list(reversed(varlist))
# by() contents are super-rows
found, superrows = find_brace_contents("by", options)
if found and len(superrows) > 0:
extras = superrows.split()
for word in extras:
if word not in varnames:
details[
"errmsg"
] = f"Error: word {word} in by-list is not a variables name"
return details
if word not in details["rowvars"]:
details["rowvars"].insert(0, word)
# contents can be variable names or aggregation functions
details["aggfuncs"], details["values"] = [], []
found, content = find_brace_contents("contents", options)
if found and len(content) > 0:
contents = content.split()
for word in contents:
if word in varnames:
if word not in details["values"]:
details["values"].append(word)
else:
if word not in details["aggfuncs"]:
details["aggfuncs"].append(word)
# default values
details["totals"] = False if "nototals" in options else True
details["suppress"] = False if "nosuppress" in options else True
return details
def parse_and_run(
df: pd.DataFrame,
command: str,
varlist: str,
exclusion: str,
exp: str,
weights: str,
options: str,
) -> pd.DataFrame:
"""
Takes a dataframe and the parsed stata command line.
Runs the appropriate command on a pre-existing ACRO object stata_acro
Returns the result as a formatted string.
"""
# TODO de-abbreviate according to
# https://www.stata.com/manuals13/u11.pdf#u11.1.3ifexp
global stata_acro
varlist = varlist.split()
# print(f' split varlist is {varlist}')
# data reduction
if len(exclusion) > 0:
df = apply_stata_ifstmt(exclusion, df)
if len(exp) > 0:
df = apply_stata_expstmt(exp, df)
# now look at the commands
# session management first
if command == "init":
# initialise the acro object
stata_acro = ACRO()
return "acro analysis session created\n"
elif command == "finalise":
stata_acro.finalise("stata_out", "json")
return "outputs and stata_out.json written\n"
elif command == "print_outputs":
stata_acro.print_outputs()
return ""
# now statistical commands
elif command == "table":
varnames = df.columns
details = parse_table_details(varlist, varnames, options)
if len(details["errmsg"]) > 0:
return details["errmsg"]
else:
aggfuncs = list(map(lambda x: x.replace("sd", "std"), details["aggfuncs"]))
rows, cols = [], []
for row in details["rowvars"]:
rows.append(df[row])
for col in details["colvars"]:
cols.append(df[col])
if len(aggfuncs) > 0 and len(details["values"]) > 0:
safe_output = stata_acro.crosstab(
index=rows,
columns=cols,
aggfunc=aggfuncs,
values=details["values"],
# suppress=details['suppress'],
margins=details["totals"],
margins_name="Total",
)
else:
safe_output = stata_acro.crosstab(
index=rows,
columns=cols,
# suppress=details['suppress'],
margins=details["totals"],
margins_name="Total",
)
options_str = ""
formatting = [
"cellwidth",
"csepwidth",
"stubwidth",
"scsepwidth",
"center",
"left",
]
if any(word in options for word in formatting):
options_str = (
"acro does not currently support table formatting commands.\n "
)
return options_str + safe_output.to_string() + "\n"
elif command == "regress":
depvar = varlist[0]
indep_vars = varlist[1:]
new_df = df[varlist].dropna()
y = new_df[depvar]
x = new_df[indep_vars]
x = add_constant(x)
results = stata_acro.ols(y, x)
res_str = results.summary().as_csv()
return res_str
elif command == "probit":
depvar = varlist[0]
indep_vars = varlist[1:]
new_df = df[varlist].dropna()
y = new_df[depvar].astype("category").cat.codes # numeric
y.name = depvar
x = new_df[indep_vars]
x = add_constant(x)
results = stata_acro.probit(y, x)
res_str = results.summary().as_csv()
return res_str
else:
return f"acro command not recognised: {command}"
| 7,197
| 30.432314
| 87
|
py
|
ACRO
|
ACRO-main/notebooks/test-nursery.py
|
"""
ACRO Tests
Copyright : Maha Albashir, Richard Preen, Jim Smith 2023.
"""
# import libraries
import os
import numpy as np
import pandas as pd
from scipy.io.arff import loadarff
from acro import ACRO, add_constant
# Instantiate ACRO by making an acro object
print(
"\n Creating an acro object().\n"
"The TRE's risk appetite is read from default.yml\n"
"and shown to the researcher and output checker"
)
acro = ACRO()
# Load test data
# The dataset used in this notebook is the nursery dataset from OpenML.
# - The dataset can be read directly from OpenML using the code commented in the next cell.
# - In this version, it can be read directly from the local machine
# if it has been downloaded.
# - The code below reads the data from a folder called "data"
# which we assume is at the same level as the folder where you are working.
# - The path might need to be changed if the data has been downloaded and stored elsewhere.
# - for example use:
# path = os.path.join("data", "nursery.arff")
# if the data is in a sub-folder of your work folder
# commented out version to load from web
# from sklearn.datasets import fetch_openml
# data = fetch_openml(data_id=26, as_frame=True)
# df = data.data
# df["recommend"] = data.target
# Version to load data from local directory
path = os.path.join("../data", "nursery.arff")
data = loadarff(path)
df = pd.DataFrame(data[0])
df = df.select_dtypes([object])
df = df.stack().str.decode("utf-8").unstack()
df.rename(columns={"class": "recommend"}, inplace=True)
print("\n Data loaded, these are the first five rows")
print(df.head())
# Convert 'more than 3' children to random between 4 and 10
# Change the children column from categorical to numeric
# in order to be able to test some of the ACRO functions that require a numeric feature
print("\nChanging number of children to integer type")
df["children"].replace(to_replace={"more": "4"}, inplace=True)
df["children"] = pd.to_numeric(df["children"])
df["children"] = df.apply(
lambda row: row["children"]
if row["children"] in (1, 2, 3)
else np.random.randint(4, 10),
axis=1,
)
# Examples of producing tabular output
# We rely on the industry-standard package **pandas** for tabulating data.
# In the next few examples we show:
# - first, how a researcher would normally make a call in pandas,
# saving the results in a variable that they can view on screen (or save to file?)
# - then how the call is identical in SACRO, except that:
# - "pd" is replaced by "acro"
# - the researcher immediately sees TRE output checking recommendations.
print(
"\nThe first set of examples show acro wrappers around "
" standard tabulation routines from the pandas package."
)
# Pandas crosstab
# This is an example of crosstab using pandas.
# We first make the call, then the second line print the outputs to wscreen.
print("\nCalling crosstab of recommendation by parents using pandas")
table = pd.crosstab(df.recommend, df.parents)
print(table)
# ACRO crosstab
# This is an example of crosstab using ACRO.
# The INFO lines show the researcher what will be reported to the output checkers.
# Then the (suppressed as necessary) table is shown via. the print command as before.
print("\nNow the same crosstab call using the ACRO interface")
safe_table = acro.crosstab(df.recommend, df.parents)
print("\nand this is the researchers output")
print(safe_table)
# ACRO crosstab with aggregation function
# Mean() in this case
# Then how Max and Min are not allowed by the code
print(
"\nIllustration of crosstab using an aggregation function " "- mean in this case."
)
safe_table = acro.crosstab(df.recommend, df.parents, values=df.children, aggfunc="mean")
print("\nand this is the researchers output")
print(safe_table)
print(
"\nThis is what happens if you try to get max values for a cell."
"\nSo that this script runs on one go, we've caught the exception "
"thrown by ACRO."
)
try:
safe_table = acro.crosstab(
df.recommend, df.parents, values=df.children, aggfunc="max"
)
except ValueError as e:
print("ValueError:")
print(e)
# ACRO pivot_table
# This is an example of pivot table using ACRO.
# - Some researchers may prefer this to using crosstab.
# - Again the call syntax is identical to the pandas "pd.pivot_table"
# - in this case the output is non-disclosive
print("\nIllustration of using the acro version of pandas pivot table")
table = acro.pivot_table(
df, index=["parents"], values=["children"], aggfunc=["mean", "std"]
)
print("\nand this is the researchers output")
print(table)
# Regression examples using ACRO
# Again there is an industry-standard package in python, this time called **statsmodels**.
# - The examples below illustrate the use of the ACRO wrapper standard statsmodel functions
# - Note that statsmodels can be called using an 'R-like' format
# (using an 'r' suffix on the command names)
# - most statsmodels functions return a "results object",
# which has a "summary" function that produces printable/saveable outputs
print(
"\nThe next set of examples illustrate acro wrappers "
"around functions from the statsmodels package"
)
# Start by manipulating the nursery data to get two numeric variables
# - The 'recommend' column is converted to an integer scale
df["recommend"].replace(
to_replace={
"not_recom": "0",
"recommend": "1",
"very_recom": "2",
"priority": "3",
"spec_prior": "4",
},
inplace=True,
)
df["recommend"] = pd.to_numeric(df["recommend"])
new_df = df[["recommend", "children"]]
new_df = new_df.dropna()
# ACRO OLS
# This is an example of ordinary least square regression using ACRO.
# - Above recommend column was converted form categorical to numeric.
# - Now we perform a the linear regression between recommend and children.
# - This version includes a constant (intercept)
# - This is just to show how the regression is done using ACRO.
# - **No correlation is expected to be seen by using these variables**
y = new_df["recommend"]
x = new_df["children"]
x = add_constant(x)
print("\nOrdinary Least Squares Regression")
results = acro.ols(y, x)
print("\nand this is the researchers output")
print(results.summary())
# ACRO OLSR
# This is an example of ordinary least squares regression using the 'R-like' statsmodels api,
# i.e. from a formula and dataframe using ACRO
print("\nAnd same, but passing a formula instead of two arrays")
results = acro.olsr(formula="recommend ~ children", data=new_df)
print("\nand this is the researchers output")
print(results.summary())
# ACRO Probit
# This is an example of probit regression using ACRO
# We use a different combination of variables from the original dataset.
new_df = df[["finance", "children"]]
new_df = new_df.dropna()
y = new_df["finance"].astype("category").cat.codes # numeric
y.name = "finance"
x = new_df["children"]
x = add_constant(x)
print("\n Example of a probit regression")
results = acro.probit(y, x)
print("\nand this is the researchers output")
print(results.summary())
# ACRO Logit
# This is an example of logistic regression using ACRO using the statmodels function
print("\n Example of a logit regression")
results = acro.logit(y, x)
print("\nand this is the researchers output")
print(results.summary())
# ACRO functionality to let users manage their outputs
#
# 1: List current ACRO outputs
# This is an example of using the print_output function to list all the outputs created so far
print("\nNow illustrating how users can manage their outputs")
print(
"\nStart by listing the outputs in the acro memory."
"For each output the key line is the one starting 'Summary'"
)
acro.print_outputs()
# 2: Remove some ACRO outputs before finalising
# This is an example of deleting some of the ACRO outputs.
# The name of the output to be removed should be passed to the function remove_output.
# - Currently, all outputs names contain timestamp;
# that is the time when the output was created.
# - The output name can be taken from the outputs listed by the print_outputs function,
# - or by listing the results and choosing the specific output that needs to be removed
print("\nNow removing two disclosive outputs")
acro.remove_output("output_1")
acro.remove_output("output_4")
# 3: Rename ACRO outputs before finalising
# This is an example of renaming the outputs to provide a more descriptive name.
# The timestamp associated with the output name will not get overwritten
print("\nUsers can rename output files to something more informative")
acro.rename_output("output_2", "pivot_table")
# 4: Add a comment to output
# This is an example to add a comment to outputs.
# It can be used to provide a description
# or to pass additional information to the output checkers.
print("\nUsers can add comments which the output checkers will see.")
acro.add_comments("output_0", "Please let me have this table!")
acro.add_comments("output_0", "6 cells were suppressed in this table")
# 5: Add an unsupported output to the list of outputs
# This is an example to add an unsupported outputs (such as images) to the list of outputs
print("\nUsers can add files produced by an analysis aCRO doesn't cover")
acro.custom_output(
"XandY.jfif", "This output is an image showing the relationship between X and Y"
)
# 6 (the big one) Finalise ACRO
# This is an example of the function _finalise()_
# which the users must call at the end of each session.
# - It takes each output and saves it to a CSV file.
# - It also saves the SDC analysis for each output to a json file or Excel file
# (depending on the extension of the name of the file provided as an input to the function)
print(
"\nUsers MUST call finalise to send their outputs to the checkers"
" If they don't, the SDC analysis, and their outputs, are lost."
)
output = acro.finalise("RES_TEST", "json")
| 9,954
| 32.518519
| 95
|
py
|
Dcm2Bids
|
Dcm2Bids-master/setup.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
description = """Reorganising NIfTI files from dcm2niix into the Brain Imaging Data Structure"""
try:
import pypandoc
long_description = pypandoc.convert('README.md', 'rst')
except(IOError, ImportError):
long_description = open('README.md').read()
import glob
from setuptools import setup
DISTNAME = "dcm2bids"
DESCRIPTION = description
VERSION = "0.4.0.1"
AUTHOR = "Johan Carlin"
AUTHOR_EMAIL = ""
URL = "https://github.com/jooh/Dcm2Bids"
DOWNLOAD_URL = URL + "/archive/" + VERSION + ".tar.gz"
if __name__ == "__main__":
setup(
name=DISTNAME,
version=VERSION,
description=description,
long_description=long_description,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
url=URL,
download_url=DOWNLOAD_URL,
packages=['dcm2bids'],
scripts=glob.glob('scripts/dcm2bids*'),
install_requires=['future'],
)
| 1,007
| 24.2
| 96
|
py
|
Dcm2Bids
|
Dcm2Bids-master/dcm2bids/structure.py
|
# -*- coding: utf-8 -*-
import os
class Participant(object):
"""
"""
def __init__(self, name, session=None):
self._name = name
self._session = session
@property
def name(self):
return "sub-{}".format(self._name)
@property
def session(self):
if self._session is None:
return None
else:
return "ses-{}".format(self._session)
@session.setter
def session(self, session):
self._session = session
@property
def directory(self):
if self.hasSession():
return os.path.join(self.name, self.session)
else:
return self.name
@property
def prefix(self):
if self.hasSession():
return "{}_{}".format(self.name, self.session)
else:
return self.name
def hasSession(self):
return self.session is not None
class Acquisition(object):
"""
"""
def __init__(self, base, dataType, suffix, customLabels=""):
self.base = base
self.dataType = dataType
self._suffix = suffix
self.customLabels = customLabels
@property
def suffix(self):
suffix = ""
if self.customLabels:
suffix += "{}_".format(self.customLabels)
suffix += self._suffix
return suffix
| 1,347
| 19.738462
| 64
|
py
|
Dcm2Bids
|
Dcm2Bids-master/dcm2bids/dcm2niix.py
|
# -*- coding: utf-8 -*-
import glob
import os
from subprocess import call
from collections import OrderedDict
import re
from .utils import clean
def sidecar2meta(carfile):
"""extract series number and potential
suffixes (reflecting e.g. separate images for each echo) from the dcm2niix
sidecar file name"""
hits = re.search(
"_series(?P<series>\d{3})(?P<suffix>\w*).json", os.path.split(carfile)[1]
)
return {"seriesnum": int(hits.group("series")), "suffix": hits.group("suffix")}
class Dcm2niix(object):
"""
"""
def __init__(
self,
dicom_dir,
participant=None,
output="dcm2niix-example",
outputdir=os.path.join(os.getcwd(), "tmp_dcm2bids"),
):
self.dicomDir = dicom_dir
self.participant = participant
self.output = output
self.outputdir = outputdir
if not os.path.exists(self.outputdir):
os.makedirs(self.outputdir)
self.options = "-b y -ba y -z y -f '%f_%p_%t_series%3s'"
self.sidecars = []
@property
def outputDir(self):
if self.participant is None:
return os.path.join(self.outputdir, self.output)
else:
return os.path.join(self.outputdir, self.participant.prefix)
def run(self):
clean(self.outputDir)
self.execute()
carfiles = glob.glob(os.path.join(self.outputDir, "*.json"))
carfiles.sort()
self.sidecars = OrderedDict(
(thiscar, sidecar2meta(thiscar)) for thiscar in carfiles
)
return 0
def execute(self):
with open(os.path.join(self.outputDir, "dcm2niix.log"), "a") as file_handle:
for directory in self.dicomDir:
commandStr = "dcm2niix {} -o {} {}"
command = commandStr.format(self.options, self.outputDir, directory)
call(command, shell=True, stdout=file_handle)
| 1,932
| 28.738462
| 84
|
py
|
Dcm2Bids
|
Dcm2Bids-master/dcm2bids/dcm2bids.py
|
# -*- coding: utf-8 -*-
import glob
import os
import datetime
import logging
from collections import OrderedDict
from .dcm2niix import Dcm2niix
from .sidecarparser import Sidecarparser
from .structure import Participant
from .utils import (
load_json,
make_directory_tree,
splitext_,
save_json,
write_txt,
read_participants,
write_participants,
)
from subprocess import call
class Dcm2bids(object):
"""
"""
def __init__(
self,
dicom_dir,
config,
clobber,
participant,
session=None,
selectseries=None,
outputdir=os.getcwd(),
loglevel="INFO",
anonymizer=None,
):
self.dicom_dir = dicom_dir
self.config = load_json(config)
self.clobber = clobber
self.extension = ".nii.gz"
self.participant = Participant(participant, session)
self.selectseries = selectseries
self.outputdir = outputdir
self.dicomdir = os.path.join(outputdir, "tmp_dcm2bids")
self.anonymizer = anonymizer
if not os.path.exists(self.outputdir):
# need exist_ok to prevent race conditions in parallel execution
os.makedirs(self.outputdir, exist_ok=True)
self.derivdir = os.path.join(outputdir, "derivatives")
logging.basicConfig(
format="%(asctime)s %(message)s",
datefmt="%Y/%m/%d %H:%M",
filemode="a",
filename=os.path.join(self.outputdir, "dcm2bids.log"),
)
self.logger = logging.getLogger("dcm2bids")
self.logger.setLevel(loglevel.upper())
self.logger.info("--- dcm2bids start ---")
self.logger.info("participant: %s", participant)
self.logger.info("session: %s", session)
[
self.logger.info("dicom_dir: %s", os.path.realpath(thisdir))
for thisdir in dicom_dir
]
self.logger.info("config: %s", os.path.realpath(config))
self.logger.info("outputdir: %s", os.path.realpath(self.outputdir))
@property
def session(self):
return self.participant.session
@session.setter
def session(self, value):
self.participant.session = value
def run(self):
# convert dicoms to temporary dir
self.logger.info("running dcm2niix DICOM to NIFTI conversion")
dcm2niix = Dcm2niix(self.dicom_dir, self.participant, outputdir=self.dicomdir)
dcm2niix.run()
self.logger.info("parsing sidecars")
parser = Sidecarparser(
dcm2niix.sidecars, self.config["descriptions"], self.selectseries
)
self.logger.info("moving acquisitions into BIDS output directory")
for acq in parser.acquisitions:
self._move(acq)
self.logger.info("updating standard study files")
if parser.acquisitions:
self._updatestudyfiles()
self.logger.info("--- dcm2bids finished without errors ---")
return 0
def _move(self, acquisition):
targetDir = os.path.join(
self.outputdir, self.participant.directory, acquisition.dataType
)
filename = "{}_{}".format(self.participant.prefix, acquisition.suffix)
targetBase = os.path.join(targetDir, filename)
# need to test for both because dcm2niix sometimes refuses to compress
if os.path.isfile(targetBase + ".nii.gz") or os.path.isfile(
targetBase + ".nii"
):
if self.clobber:
print("'{}' overwrites".format(filename))
for f in glob.glob(targetBase + ".*"):
os.remove(f)
for f in glob.glob(acquisition.base + ".*"):
_, ext = splitext_(f)
os.rename(f, targetBase + ext)
else:
print("'{}' already exists".format(filename))
return
# if we make it this far, we can copy away
make_directory_tree(targetDir)
for f in glob.glob(acquisition.base + ".*"):
_, ext = splitext_(f)
if self.anonymizer and acquisition.dataType == "anat" and ".nii" in ext:
# it's an anat scan - try the anonymizer
command = " ".join([self.anonymizer, f, "--outfile", targetBase + ext])
self.logger.info(
"anonymizing anatomical with %s: %s",
self.anonymizer,
targetBase + ext,
)
call(command, shell=True)
else:
# just move
os.rename(f, targetBase + ext)
def _updatestudyfiles(self):
if not os.path.exists(self.derivdir):
os.makedirs(self.derivdir)
# participant table
partfile = os.path.join(self.outputdir, "participants.tsv")
participants = read_participants(partfile)
if not participants or not any(
[part["participant_id"] == self.participant.name for part in participants]
):
participants.append(
OrderedDict(zip(("participant_id",), (self.participant.name,)))
)
write_participants(partfile, participants)
# dataset description
descfile = os.path.join(self.outputdir, "dataset_description.json")
if not os.path.exists(descfile):
save_json(
{
"Name": "",
"BIDSVersion": "1.1.0",
"License": "",
"Authors": [""],
"Acknowledgments": "",
"HowToAcknowledge": "",
"Funding": "",
"ReferencesAndLinks": [""],
"DatasetDOI": "",
},
descfile,
)
# readme/change files
readmefile = os.path.join(self.outputdir, "README")
if not os.path.exists(readmefile):
write_txt(readmefile)
changefile = os.path.join(self.outputdir, "CHANGES")
if not os.path.exists(changefile):
write_txt(
changefile,
[
"Revision history for BIDS dataset.",
"",
"0.01 " + datetime.date.today().strftime("%Y-%m-%d"),
"",
" - Initialised study directory",
],
)
# ignore file for bids-validator
ignorefile = os.path.join(self.outputdir, ".bidsignore")
if not os.path.exists(ignorefile):
write_txt(ignorefile, ["tmp_dcm2bids/*", "dcm2bids.log"])
| 6,650
| 33.82199
| 87
|
py
|
Dcm2Bids
|
Dcm2Bids-master/dcm2bids/utils.py
|
# -*- coding: utf-8 -*-
import json
import os
import shutil
import csv
from collections import OrderedDict
import sys
def load_json(filename):
with open(filename, "r") as f:
data = json.load(f, strict=False)
return data
def save_json(data, filename):
with open(filename, "w") as f:
json.dump(data, f, indent=4)
def write_txt(filename, lines=[]):
with open(filename, "a") as f:
for row in lines:
f.write("%s\n" % row)
def write_participants(filename, participants):
with open(filename, "w") as f:
writer = csv.DictWriter(f, delimiter="\t", fieldnames=participants[0].keys())
writer.writeheader()
writer.writerows(participants)
def read_participants(filename):
if not os.path.exists(filename):
return []
with open(filename, "r") as f:
reader = csv.reader(f, delimiter="\t")
# Check for python version and use the right syntax.
if sys.version_info[0] < 3:
header = reader.next()
else:
header = next(reader)
return [OrderedDict(zip(header, row)) for row in reader]
def make_directory_tree(directory):
if not os.path.exists(directory):
os.makedirs(directory)
def clean(directory):
make_directory_tree(directory)
if not os.listdir(directory) == []:
shutil.rmtree(directory)
make_directory_tree(directory)
else:
make_directory_tree(directory)
def splitext_(path):
for ext in [".nii.gz"]:
if path.endswith(ext):
return path[: -len(ext)], path[-len(ext) :]
return os.path.splitext(path)
| 1,632
| 23.014706
| 85
|
py
|
Dcm2Bids
|
Dcm2Bids-master/dcm2bids/__init__.py
|
# -*- coding: utf-8 -*-
__version__ = "0.4.0.1"
| 48
| 15.333333
| 23
|
py
|
Dcm2Bids
|
Dcm2Bids-master/dcm2bids/sidecarparser.py
|
# -*- coding: utf-8 -*-
import itertools
import os
from collections import defaultdict, OrderedDict
from future.utils import iteritems
from .structure import Acquisition
from .utils import load_json, save_json, splitext_
import logging
class Sidecarparser(object):
def __init__(self, sidecars, descriptions, selectseries=None):
self.sidecars = sidecars
self.descriptions = descriptions
self.selectseries = selectseries
self.logger = logging.getLogger("dcm2bids")
self.graph = self._generateGraph()
self.acquisitions = self._generateAcquisitions()
self.findRuns()
def _generateGraph(self):
graph = OrderedDict((_, []) for _ in self.sidecars)
for sidecar, index in itertools.product(
self.sidecars, range(len(self.descriptions))
):
if (
self.selectseries
and not self.sidecars[sidecar]["seriesnum"] in self.selectseries
):
continue
self.sidecars[sidecar]["header"] = load_json(sidecar)
self._sidecar = self.sidecars[sidecar]
if self._respect(self.descriptions[index]["criteria"]):
self.logger.debug("Description %d matches sidecar %s\n", index, sidecar)
graph[sidecar].append(index)
if "customHeader" in self.descriptions[index]:
self.sidecars[sidecar]["header"].update(
self.descriptions[index]["customHeader"]
)
save_json(self.sidecars[sidecar]["header"], sidecar)
# attempt to correct nifti header if a field has been
# changed that affects it
if "RepetitionTime" in self.descriptions[index]["customHeader"]:
self.logger.info(
"RepetitionTime field is changed to %.2f in "
"description, updating nifti header to match...",
self.descriptions[index]["customHeader"]["RepetitionTime"],
)
try:
import nibabel
# load up the nifti and re-write the header
# see also:
# https://groups.google.com/d/msg/bids-discussion/jPVb-4Ah29A/fB52S8ExBgAJ
niftipath = splitext_(sidecar)[0] + ".nii"
if not os.path.exists(niftipath):
niftipath += ".gz"
ni = nibabel.load(niftipath)
ni.header["pixdim"][4] = self.descriptions[index][
"customHeader"
]["RepetitionTime"]
# ugly but necessary because the above fix may not
# always work (all method because numpy does not
# work reliably with python builtin function)
assert (ni.header["xyzt_units"] == 10).all(), (
"sequences with non-standard xyzt_units field "
"are currently unsupported"
)
ni.to_filename(niftipath)
self.logger.info("updated header successfully.")
except ImportError:
self.logger.warning(
"nibabel is unavailable, unable to "
"fix nifti-sidecar mismatch"
)
except:
raise
return graph
def _generateAcquisitions(self):
rsl = []
print("")
for sidecar, match_descs in iteritems(self.graph):
base = splitext_(sidecar)[0]
basename = os.path.basename(sidecar)
if len(match_descs) == 1:
print("'{}' satisfies one description".format(basename))
acq = self._acquisition(base, self.descriptions[match_descs[0]])
rsl.append(acq)
elif len(match_descs) == 0:
print("'{}' satisfies no description - skipping".format(basename))
else:
print("'{}' satisfies several descriptions - skipping".format(basename))
return rsl
def findRuns(self):
def list_duplicates(seq):
"""
http://stackoverflow.com/a/5419576
"""
tally = defaultdict(list)
for i, item in enumerate(seq):
tally[item].append(i)
return ((key, locs) for key, locs in tally.items() if len(locs) > 1)
suffixes = [_.suffix for _ in self.acquisitions]
for suffix, dup in sorted(list_duplicates(suffixes)):
print("'{}': has several runs".format(suffix))
for run, acq_index in enumerate(dup):
runStr = "run-{:02d}".format(run + 1)
acq = self.acquisitions[acq_index]
if acq.customLabels:
acq.customLabels += "_" + runStr
else:
acq.customLabels = runStr
print("")
def _acquisition(self, base, desc):
acq = Acquisition(base, desc["dataType"], desc["suffix"])
if "customLabels" in desc:
acq.customLabels = desc["customLabels"]
if "customHeader" in desc and "TaskName" in desc["customHeader"]:
if acq.customLabels:
acq.customLabels += "_task-" + desc["customHeader"]["TaskName"]
else:
acq.customLabels = "task-" + desc["customHeader"]["TaskName"]
return acq
def _respect(self, criteria):
isEqual = "equal" in criteria
isIn = "in" in criteria
isSuff = "suffix" in criteria
isSeries = "seriesnum" in criteria
# Check if there is some criteria
if not any([isEqual, isIn, isSuff, isSeries]):
return False
if isEqual:
rsl_equal = self._isEqual(criteria["equal"])
else:
rsl_equal = True
if isIn:
rsl_in = self._isIn(criteria["in"])
else:
rsl_in = True
if isSuff:
rsl_suff = self._isFilenameSuffix(criteria["suffix"])
else:
rsl_suff = True
if isSeries:
rsl_series = self._isFilenameSeries(criteria["seriesnum"])
else:
rsl_series = True
return all([rsl_equal, rsl_in, rsl_suff, rsl_series])
def _isEqual(self, criteria):
rsl = []
for tag, query in iteritems(criteria):
rsl.append(query == self.get_value(tag))
return all(rsl)
def _isIn(self, criteria):
rsl = []
for tag, query in iteritems(criteria):
if isinstance(query, list):
for q in query:
rsl.append(q in self.get_value(tag))
else:
rsl.append(query in self.get_value(tag))
return all(rsl)
def _isFilenameSuffix(self, criteria):
if isinstance(criteria, list):
return any(self._isFilenameSuffix(crit) for crit in criteria)
return self._sidecar["suffix"] == criteria
def _isFilenameSeries(self, criteria):
if isinstance(criteria, list):
return any(self._isFilenameSeries(crit) for crit in criteria)
return self._sidecar["seriesnum"] == criteria
def get_value(self, tag):
if tag in self._sidecar["header"]:
return self._sidecar["header"][tag]
else:
return ""
| 7,753
| 38.969072
| 102
|
py
|
multimodal-vae-public
|
multimodal-vae-public-master/vision/sample.py
|
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import numpy as np
from PIL import Image
import torch
import torch.nn.functional as F
from torch.autograd import Variable
from torchvision import transforms
from torchvision.utils import save_image
from train import load_checkpoint
from datasets import obscure_image
from datasets import add_watermark
# this is the same loader used in datasets.py
image_transform = transforms.Compose([transforms.Resize(64),
transforms.CenterCrop(64),
transforms.ToTensor()])
if __name__ == "__main__":
import os
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('model_path', type=str, help='path to trained model file')
parser.add_argument('--condition-file', type=str,
help='if specified, condition on this image.')
parser.add_argument('--condition-type', type=str,
help='image|gray|edge|mask|obscured|watermark')
parser.add_argument('--n-samples', type=int, default=1,
help='Number of images and texts to sample [default: 1]')
parser.add_argument('--cuda', action='store_true', default=False,
help='enables CUDA training')
args = parser.parse_args()
args.cuda = args.cuda and torch.cuda.is_available()
if args.condition_type:
assert args.condition_type in ['image', 'gray', 'edge', 'mask', 'obscured', 'watermark']
if not os.path.isdir('./samples'):
os.makedirs('./samples')
model = load_checkpoint(args.model_path, use_cuda=args.cuda)
model.eval()
if args.cuda:
model.cuda()
if args.condition_file and args.condition_type:
image = Image.open(args.condition_file)
if args.condition_type == 'image':
image = image.convert('RGB')
image = image_transform(image).unsqueeze(0)
save_image(image, './samples/sample_image.png')
if args.cuda:
image = image.cuda()
image = Variable(image, volatile=True)
mu, logvar = model.get_params(1, image=image)
elif args.condition_type == 'gray':
image = image.convert('L')
image = image_transform(image).unsqueeze(0)
save_image(image, './samples/sample_gray.png')
if args.cuda:
image = image.cuda()
image = Variable(image, volatile=True)
mu, logvar = model.get_params(1, gray=image)
elif args.condition_type == 'edge':
image = image.convert('L')
image = image_transform(image).unsqueeze(0)
save_image(image, './samples/sample_edge.png')
if args.cuda:
image = image.cuda()
image = Variable(image, volatile=True)
mu, logvar = model.get_params(1, edge=image)
elif args.condition_type == 'mask':
image = image.convert('L')
image = 1 - image_transform(image).unsqueeze(0)
save_image(image, './samples/sample_mask.png')
if args.cuda:
image = image.cuda()
image = Variable(image, volatile=True)
mu, logvar = model.get_params(1, mask=image)
elif args.condition_type == 'obscured':
image = image.convert('RGB')
image = obscure_image(image)
image = image_transform(image).unsqueeze(0)
save_image(image, './samples/sample_obscured.png')
if args.cuda:
image = image.cuda()
image = Variable(image, volatile=True)
mu, logvar = model.get_params(1, obscured=image)
elif args.condition_type == 'watermark':
image = image.convert('RGB')
image = add_watermark(image)
image = image_transform(image).unsqueeze(0)
save_image(image, './samples/sample_watermark.png')
if args.cuda:
image = image.cuda()
image = Variable(image, volatile=True)
mu, logvar = model.get_params(1, watermark=image)
std = logvar.mul(0.5).exp_()
else: # sample from uniform Gaussian prior
mu = Variable(torch.Tensor([0]))
std = Variable(torch.Tensor([1]))
if args.cuda:
mu = mu.cuda()
std = std.cuda()
# sample from uniform gaussian
sample = Variable(torch.randn(args.n_samples, model.n_latents))
if args.cuda:
sample = sample.cuda()
# sample from particular gaussian by multiplying + adding
mu = mu.expand_as(sample)
std = std.expand_as(sample)
sample = sample.mul(std).add_(mu)
# generate image and text
image_recon = F.sigmoid(model.image_decoder(sample)).cpu().data
gray_recon = F.sigmoid(model.gray_decoder(sample)).cpu().data
edge_recon = F.sigmoid(model.edge_decoder(sample)).cpu().data
mask_recon = F.sigmoid(model.mask_decoder(sample)).cpu().data
obscured_recon = F.sigmoid(model.obscured_decoder(sample)).cpu().data
watermark_recon = F.sigmoid(model.watermark_decoder(sample)).cpu().data
# save image samples to filesystem
save_image(image_recon, './samples/sample_image.png')
save_image(gray_recon, './samples/sample_gray.png')
save_image(edge_recon, './samples/sample_edge.png')
save_image(mask_recon, './samples/sample_mask.png')
save_image(rotated_recon, './samples/sample_rotated.png')
save_image(obscured_recon, './samples/sample_obscured.png')
save_image(watermark_recon, './samples/sample_watermark.png')
| 5,676
| 40.437956
| 96
|
py
|
multimodal-vae-public
|
multimodal-vae-public-master/vision/setup.py
|
"""Grayscale, edge detection, and facial landmarks are pre-computed
prior to training. Obscuring and watermarks are done in-place in
datasets.py.
>>> python setup.py grayscale ./data/images ./data/grayscale
>>> python setup.py edge ./data/images ./data/edge
>>> python setup.py mask ./data/images ./data/mask
"""
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import os
import cv2
import dlib
import random
import numpy as np
from PIL import Image
from PIL import ImageFilter
from skimage import feature
from imutils import face_utils
from collections import OrderedDict
# define a dictionary that maps the indexes of the facial
# landmarks to specific face regions
FACIAL_LANDMARKS_IDXS = OrderedDict([
("mouth", (48, 68)),
("right_eyebrow", (17, 22)),
("left_eyebrow", (22, 27)),
("right_eye", (36, 42)),
("left_eye", (42, 48)),
("nose", (27, 35)),
("jaw", (0, 17))
])
def build_grayscale_dataset(in_dir, out_dir):
"""Generate a dataset of grayscale images.
@param in_dir: string
input directory of images.
@param out_dir: string
output directory of images.
"""
image_paths = os.listdir(in_dir)
n_images = len(image_paths)
for i, image_path in enumerate(image_paths):
print('Building grayscale dataset: [%d/%d] images.' % (i + 1, n_images))
image_full_path = os.path.join(in_dir, image_path)
image = Image.open(image_full_path)
image = image.convert('RGB').convert('L')
image.save(os.path.join(out_dir, image_path))
def build_edge_dataset(in_dir, out_dir, sigma=3):
"""Generate a dataset of (canny) edge-detected images.
@param in_dir: string
input directory of images.
@param out_dir: string
output directory of images.
@param sigma: float (default: 3)
smoothness for edge detection.
"""
image_paths = os.listdir(in_dir)
n_images = len(image_paths)
for i, image_path in enumerate(image_paths):
print('Building edge-detected dataset: [%d/%d] images.' % (i + 1, n_images))
image_full_path = os.path.join(in_dir, image_path)
image = Image.open(image_full_path).convert('L')
image_npy = np.asarray(image).astype(np.float) / 255.
image_npy = feature.canny(image_npy, sigma=sigma)
image_npy = image_npy.astype(np.uint8) * 255
image = Image.fromarray(image_npy)
image.save(os.path.join(out_dir, image_path))
def build_mask_dataset(in_dir, out_dir, model_path):
"""Generate a dataset of segmentation masks from images.
@param in_dir: string
input directory of images.
@param out_dir: string
output directory of images.
@param model_path: string
path to HOG model for facial features.
"""
# initialize dlib's face detector (HOG-based) and then create
# the facial landmark predictor
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(model_path)
image_paths = os.listdir(in_dir)
n_images = len(image_paths)
for i, image_path in enumerate(image_paths):
print('Building face-mask dataset: [%d/%d] images.' % (i + 1, n_images))
image_full_path = os.path.join(in_dir, image_path)
image = cv2.imread(image_full_path)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# detect faces in the grayscale image
rects = detector(gray, 1)
try:
rect = rects[0] # we are only going to use the first one
# determine the facial landmarks for the face region, then
# convert the landmark (x, y)-coordinates to a NumPy array
shape = predictor(gray, rect)
shape = face_utils.shape_to_np(shape)
output = visualize_facial_landmarks(image, shape)
cv2.imwrite(os.path.join(out_dir, image_path), output)
except:
# if for some reason no bounding box is found, send blank.
output = np.ones_like(image) * 255
cv2.imwrite(os.path.join(out_dir, image_path), output)
def visualize_facial_landmarks(image, shape, colors=None):
# create two copies of the input image -- one for the
# overlay and one for the final output image
overlay = np.ones_like(image) * 255
# loop over the facial landmark regions individually
for (i, name) in enumerate(FACIAL_LANDMARKS_IDXS.keys()):
# grab the (x, y)-coordinates associated with the
# face landmark
(j, k) = FACIAL_LANDMARKS_IDXS[name]
pts = shape[j:k]
# check if are supposed to draw the jawline
if name == "jaw":
# since the jawline is a non-enclosed facial region,
# just draw lines between the (x, y)-coordinates
for l in range(1, len(pts)):
ptA = tuple(pts[l - 1])
ptB = tuple(pts[l])
cv2.line(overlay, ptA, ptB, (0, 0, 0), 2)
# otherwise, compute the convex hull of the facial
# landmark coordinates points and display it
else:
hull = cv2.convexHull(pts)
cv2.drawContours(overlay, [hull], -1, (0, 0, 0), -1)
return overlay
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('type', type=str,
help='grayscale||edge|mask')
parser.add_argument('in_dir', type=str, help='where images are located')
parser.add_argument('out_dir', type=str, help='where images are to be saved')
args = parser.parse_args()
if args.type == 'grayscale':
build_grayscale_dataset(args.in_dir, args.out_dir)
elif args.type == 'edge':
build_edge_dataset(args.in_dir, args.out_dir, sigma=2)
elif args.type == 'mask':
build_mask_dataset(args.in_dir, args.out_dir,
'./data/shape_predictor_68_face_landmarks.dat')
| 6,049
| 35.666667
| 84
|
py
|
multimodal-vae-public
|
multimodal-vae-public-master/vision/model.py
|
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import sys
import torch
import torch.nn as nn
from torch.autograd import Variable
from torch.nn import functional as F
class MVAE(nn.Module):
def __init__(self, n_latents=250, use_cuda=False):
super(MVAE, self).__init__()
# define q(z|x_i) for i = 1...6
self.image_encoder = ImageEncoder(n_latents, 3)
self.gray_encoder = ImageEncoder(n_latents, 1)
self.edge_encoder = ImageEncoder(n_latents, 1)
self.mask_encoder = ImageEncoder(n_latents, 1)
self.obscured_encoder = ImageEncoder(n_latents, 3)
self.watermark_encoder = ImageEncoder(n_latents, 3)
# define p(x_i|z) for i = 1...6
self.image_decoder = ImageDecoder(n_latents, 3)
self.gray_decoder = ImageDecoder(n_latents, 1)
self.edge_decoder = ImageDecoder(n_latents, 1)
self.mask_decoder = ImageDecoder(n_latents, 1)
self.obscured_decoder = ImageDecoder(n_latents, 3)
self.watermark_decoder = ImageDecoder(n_latents, 3)
# define q(z|x) = q(z|x_1)...q(z|x_6)
self.experts = ProductOfExperts()
self.n_latents = n_latents
self.use_cuda = use_cuda`
def reparametrize(self, mu, logvar):
if self.training:
std = logvar.mul(0.5).exp_()
eps = Variable(std.data.new(std.size()).normal_())
return eps.mul(std).add_(mu)
else: # return mean during inference
return mu
def forward(self, image=None, gray=None, edge=None, mask=None,
obscured=None, watermark=None):
mu, logvar = self.get_params(image=image, gray=gray, edge=edge, mask=mask,
obscured=obscured, watermark=watermark)
# reparametrization trick to sample
z = self.reparametrize(mu, logvar)
# reconstruct inputs based on sample
image_recon = self.image_decoder(z)
gray_recon = self.gray_decoder(z)
edge_recon = self.edge_decoder(z)
mask_recon = self.mask_decoder(z)
obscured_recon = self.obscured_decoder(z)
watermark_recon = self.watermark_decoder(z)
return (image_recon, gray_recon, edge_recon, mask_recon,
rotated_recon, obscured_recon, mu, logvar)
def get_params(self, image=None, gray=None, edge=None,
mask=None, obscured=None, watermark=None):
# define universal expert
batch_size = get_batch_size(image, gray, edge, mask, obscured, watermark)
use_cuda = next(self.parameters()).is_cuda # check if CUDA
# initialize the universal prior expert
mu, logvar = prior_expert((1, batch_size, self.n_latents),
use_cuda=use_cuda)
if image is not None:
image_mu, image_logvar = self.image_encoder(image)
mu = torch.cat((mu, image_mu.unsqueeze(0)), dim=0)
logvar = torch.cat((logvar, image_logvar.unsqueeze(0)), dim=0)
if gray is not None:
gray_mu, gray_logvar = self.gray_encoder(gray)
mu = torch.cat((mu, gray_mu.unsqueeze(0)), dim=0)
logvar = torch.cat((logvar, gray_logvar.unsqueeze(0)), dim=0)
if edge is not None:
edge_mu, edge_logvar = self.edge_encoder(edge)
mu = torch.cat((mu, edge_mu.unsqueeze(0)), dim=0)
logvar = torch.cat((logvar, edge_logvar.unsqueeze(0)), dim=0)
if mask is not None:
mask_mu, mask_logvar = self.mask_encoder(mask)
mu = torch.cat((mu, mask_mu.unsqueeze(0)), dim=0)
logvar = torch.cat((logvar, mask_logvar.unsqueeze(0)), dim=0)
if obscured is not None:
obscured_mu, obscured_logvar = self.obscured_encoder(obscured)
mu = torch.cat((mu, obscured_mu.unsqueeze(0)), dim=0)
logvar = torch.cat((logvar, obscured_logvar.unsqueeze(0)), dim=0)
if watermark is not None:
watermark_mu, watermark_logvar = self.watermark_encoder(watermark)
mu = torch.cat((mu, watermark_mu.unsqueeze(0)), dim=0)
logvar = torch.cat((logvar, watermark_logvar.unsqueeze(0)), dim=0)
# product of experts to combine gaussians
mu, logvar = self.experts(mu, logvar)
return mu, logvar
def get_batch_size(*args):
for arg in args:
if arg is not None:
return arg.size(0)
class ImageEncoder(nn.Module):
"""Parametrizes q(z|x).
We will use this for every q(z|x_i) for all i.
@param n_latents: integer
number of latent dimensions
@param n_channels: integer [default: 3]
number of input channels
"""
def __init__(self, n_latents, n_channels=3):
super(ImageEncoder, self).__init__()
self.features = nn.Sequential(
nn.Conv2d(n_channels, 32, 4, 2, 1, bias=False),
Swish(),
nn.Conv2d(32, 64, 4, 2, 1, bias=False),
nn.BatchNorm2d(64),
Swish(),
nn.Conv2d(64, 128, 4, 2, 1, bias=False),
nn.BatchNorm2d(128),
Swish(),
nn.Conv2d(128, 256, 4, 1, 0, bias=False),
nn.BatchNorm2d(256),
Swish())
self.classifier = nn.Sequential(
nn.Linear(256 * 5 * 5, 512),
Swish(),
nn.Dropout(p=0.1),
nn.Linear(512, n_latents * 2))
self.n_latents = n_latents
def forward(self, x):
n_latents = self.n_latents
x = self.features(x)
x = x.view(-1, 256 * 5 * 5)
x = self.classifier(x)
return x[:, :n_latents], x[:, n_latents:]
class ImageDecoder(nn.Module):
"""Parametrizes p(x|z).
We will use this for every p(x_i|z) for all i.
@param n_latents: integer
number of latent dimensions
@param n_channels: integer [default: 3]
number of input channels
"""
def __init__(self, n_latents, n_channels=3):
super(ImageDecoder, self).__init__()
self.upsample = nn.Sequential(
nn.Linear(n_latents, 256 * 5 * 5),
Swish())
self.hallucinate = nn.Sequential(
nn.ConvTranspose2d(256, 128, 4, 1, 0, bias=False),
nn.BatchNorm2d(128),
Swish(),
nn.ConvTranspose2d(128, 64, 4, 2, 1, bias=False),
nn.BatchNorm2d(64),
Swish(),
nn.ConvTranspose2d(64, 32, 4, 2, 1, bias=False),
nn.BatchNorm2d(32),
Swish(),
nn.ConvTranspose2d(32, n_channels, 4, 2, 1, bias=False))
def forward(self, z):
# the input will be a vector of size |n_latents|
z = self.upsample(z)
z = z.view(-1, 256, 5, 5)
z = self.hallucinate(z)
return z # no sigmoid!
class ProductOfExperts(nn.Module):
"""Return parameters for product of independent experts.
See https://arxiv.org/pdf/1410.7827.pdf for equations.
@param mu: M x D for M experts
@param logvar: M x D for M experts
"""
def forward(self, mu, logvar, eps=1e-8):
var = torch.exp(logvar) + eps
T = 1 / (var + eps) # precision of i-th Gaussian expert at point x
pd_mu = torch.sum(mu * T, dim=0) / torch.sum(T, dim=0)
pd_var = 1 / torch.sum(T, dim=0)
pd_logvar = torch.log(pd_var + eps)
return pd_mu, pd_logvar
class Swish(nn.Module):
"""https://arxiv.org/abs/1710.05941"""
def forward(self, x):
return x * F.sigmoid(x)
def prior_expert(size, use_cuda=False):
"""Universal prior expert. Here we use a spherical
Gaussian: N(0, 1).
@param size: integer
dimensionality of Gaussian
@param use_cuda: boolean [default: False]
cast CUDA on variables
"""
mu = Variable(torch.zeros(size))
logvar = Variable(torch.log(torch.ones(size)))
if use_cuda:
mu, logvar = mu.cuda(), logvar.cuda()
return mu, logvar
| 8,131
| 36.13242
| 82
|
py
|
multimodal-vae-public
|
multimodal-vae-public-master/vision/datasets.py
|
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import os
import random
import numpy as np
from copy import deepcopy
from PIL import Image
import torch
from torch.utils.data.dataset import Dataset
from torchvision import transforms
N_MODALITIES = 6
VALID_PARTITIONS = {'train': 0, 'val': 1, 'test': 2}
class CelebVision(Dataset):
"""Define dataset of images of celebrities with a series of
transformations applied to it.
The user needs to have pre-defined the Anno and Eval folder from
http://mmlab.ie.cuhk.edu.hk/projects/CelebA.html
@param partition: string
train|val|test [default: train]
See VALID_PARTITIONS global variable.
@param data_dir: string
path to root of dataset images [default: ./data]
"""
def __init__(self, partition='train', data_dir='./data'):
super(CelebVision, self).__init__()
self.partition = partition
self.data_dir = data_dir
assert partition in VALID_PARTITIONS.keys()
# load a list of images for the user-chosen partition
self.image_paths = load_eval_partition(partition, data_dir=data_dir)
self.size = int(len(self.image_paths))
# resize image to 64 x 64
self.image_transform = transforms.Compose([transforms.Resize(64),
transforms.CenterCrop(64),
transforms.ToTensor()])
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target class.
"""
image_path = os.path.join(self.data_dir, 'img_align_celeba',
self.image_paths[index])
gray_path = os.path.join(self.data_dir, 'img_align_celeba_grayscale',
self.image_paths[index])
edge_path = os.path.join(self.data_dir, 'img_align_celeba_edge',
self.image_paths[index])
mask_path = os.path.join(self.data_dir, 'img_align_celeba_mask',
self.image_paths[index])
# open PIL Image -- these are fixed versions of image that we save
image = Image.open(image_path).convert('RGB')
gray_image = Image.open(gray_path).convert('L')
edge_image = Image.open(edge_path).convert('L')
mask_image = Image.open(mask_path).convert('L')
# add blocked to image
obscured_image = Image.open(image_path).convert('RGB')
obscured_image = obscure_image(obscured_image)
# add watermark to image
watermark_image = Image.open(image_path).convert('RGB')
watermark_image = add_watermark(obscured_image,
watermark_path='./watermark.png')
image = self.image_transform(image)
gray_image = self.image_transform(grayscale_image)
edge_image = self.image_transform(edge_image)
mask_image = self.image_transform(mask_image)
obscured_image = self.image_transform(obscured_image)
watermark_image = self.image_transform(watermark_image)
# masks are normally white with black lines but we want to
# be consistent with edges and MNIST-stuff, we so make the background
# black and the lines white.
mask_image = 1 - mask_image
# return everything as a bundle
return (image, grayscale_image, edge_image,
mask_image, obscured_image, watermark_image)
def __len__(self):
return self.size
def obscure_image(image):
"""Block image vertically in half with black pixels.
@param image: np.array
color image
@return: np.array
color image with vertically blocked pixels
"""
image_npy = deepcopy(np.asarray(image))
# we obscure half height because should be easier to complete
# a face given vertical half than horizontal half
center_h = image_npy.shape[1] // 2
image_npy[:, center_h + 1:, :] = 0
image = Image.fromarray(image_npy)
return image
def add_watermark(image, watermark_path='./watermark.png'):
"""Overlay image of watermark on color image.
@param image: np.array
color image
@param watermark_path: string
path to fixed watermark image
[default: ./watermark.png]
@return: np.array
color image with overlayed watermark
"""
watermark = Image.open(watermark_path)
nw, nh = image.size[0], image.size[1]
watermark = watermark.resize((nw, nh), Image.BICUBIC)
image.paste(watermark, (0, 0), watermark)
return image
| 4,896
| 36.669231
| 78
|
py
|
multimodal-vae-public
|
multimodal-vae-public-master/vision/train.py
|
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import os
import sys
import shutil
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.autograd import Variable
from torchvision.utils import save_image
from model import MVAE
from datasets import N_MODALITIES
def elbo_loss(recon_image, image, recon_gray, gray, recon_edge, edge, recon_mask, mask,
recon_rotated, rotated, recon_obscured, obscured, mu, logvar, annealing_factor=1.):
BCE = 0
if recon_image is not None and image is not None:
recon_image, image = recon_image.view(-1, 3 * 64 * 64), image.view(-1, 3 * 64 * 64)
image_BCE = torch.sum(binary_cross_entropy_with_logits(recon_image, image), dim=1)
BCE += image_BCE
if recon_gray is not None and gray is not None:
recon_gray, gray = recon_gray.view(-1, 1 * 64 * 64), gray.view(-1, 1 * 64 * 64)
gray_BCE = torch.sum(binary_cross_entropy_with_logits(recon_gray, gray), dim=1)
BCE += gray_BCE
if recon_edge is not None and edge is not None:
recon_edge, edge = recon_edge.view(-1, 1 * 64 * 64), edge.view(-1, 1 * 64 * 64)
edge_BCE = torch.sum(binary_cross_entropy_with_logits(recon_edge, edge), dim=1)
BCE += edge_BCE
if recon_mask is not None and mask is not None:
recon_mask, mask = recon_mask.view(-1, 1 * 64 * 64), mask.view(-1, 1 * 64 * 64)
mask_BCE = torch.sum(binary_cross_entropy_with_logits(recon_mask, mask), dim=1)
BCE += mask_BCE
if recon_obscured is not None and obscured is not None:
recon_obscured, obscured = recon_obscured.view(-1, 3 * 64 * 64), obscured.view(-1, 3 * 64 * 64)
obscured_BCE = torch.sum(binary_cross_entropy_with_logits(recon_obscured, obscured), dim=1)
BCE += obscured_BCE
if recon_watermark is not None and watermark is not None:
recon_watermark, watermark = recon_watermark.view(-1, 3 * 64 * 64), watermark.view(-1, 3 * 64 * 64)
watermark_BCE = torch.sum(binary_cross_entropy_with_logits(recon_watermark, watermark), dim=1)
BCE += watermark_BCE
# Kingma and Welling. Auto-Encoding Variational Bayes. ICLR, 2014
# https://arxiv.org/abs/1312.6114
KLD = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp(), dim=1)
# NOTE: we use lambda_i = 1 for all i since each modality is roughly equal
ELBO = torch.mean(BCE / float(N_MODALITIES) + annealing_factor * KLD)
return ELBO
def binary_cross_entropy_with_logits(input, target):
"""Sigmoid Activation + Binary Cross Entropy
@param input: torch.Tensor (size N)
@param target: torch.Tensor (size N)
@return loss: torch.Tensor (size N)
"""
if not (target.size() == input.size()):
raise ValueError("Target size ({}) must be the same as input size ({})".format(
target.size(), input.size()))
return (torch.clamp(input, 0) - input * target
+ torch.log(1 + torch.exp(-torch.abs(input))))
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def save_checkpoint(state, is_best, folder='./', filename='checkpoint.pth.tar'):
if not os.path.isdir(folder):
os.mkdir(folder)
torch.save(state, os.path.join(folder, filename))
if is_best:
shutil.copyfile(os.path.join(folder, filename),
os.path.join(folder, 'model_best.pth.tar'))
def load_checkpoint(file_path, use_cuda=False):
checkpoint = torch.load(file_path) if use_cuda else \
torch.load(file_path, map_location=lambda storage, location: storage)
model = MVAE(checkpoint['n_latents'])
model.load_state_dict(checkpoint['state_dict'])
return model
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--n-latents', type=int, default=250,
help='size of the latent embedding (default: 250)')
parser.add_argument('--batch-size', type=int, default=50, metavar='N',
help='input batch size for training (default: 50)')
parser.add_argument('--epochs', type=int, default=100, metavar='N',
help='number of epochs to train (default: 100)')
parser.add_argument('--annealing-epochs', type=int, default=20, metavar='N',
help='number of epochs to anneal KL for [default: 20]')
parser.add_argument('--lr', type=float, default=1e-4, metavar='LR',
help='learning rate (default: 1e-4)')
parser.add_argument('--log-interval', type=int, default=10, metavar='N',
help='how many batches to wait before logging training status')
parser.add_argument('--cuda', action='store_true', default=False,
help='enables CUDA training')
args = parser.parse_args()
args.cuda = args.cuda and torch.cuda.is_available()
new_directories = ['./results', './results/image', './results/gray', './results/edge',
'./results/mask', './results/obscured', './results/watermark']
for new_dir in new_directories:
if not os.path.isdir(new_dir):
os.makedirs(new_dir)
train_loader = torch.utils.data.DataLoader(
datasets.CelebVision(partition='train', data_dir='./data'),
batch_size=args.batch_size, shuffle=True)
N_mini_batches = len(train_loader)
test_loader = torch.utils.data.DataLoader(
datasets.CelebVision(partition='val', data_dir='./data'),
batch_size=args.batch_size, shuffle=False)
model = MVAE(args.n_latents, use_cuda=args.cuda)
optimizer = optim.Adam(model.parameters(), lr=args.lr)
if args.cuda:
model.cuda()
def train(epoch):
model.train()
train_loss_meter = AverageMeter()
for batch_idx, (image, gray_image, edge_image, mask_image,
rotated_image, obscured_image) in enumerate(train_loader):
if epoch < args.annealing_epochs:
# compute the KL annealing factor for the current mini-batch in the current epoch
annealing_factor = (float(batch_idx + (epoch - 1) * N_mini_batches + 1) /
float(args.annealing_epochs * N_mini_batches))
else:
# by default the KL annealing factor is unity
annealing_factor = 1.0
if args.cuda:
image = image.cuda()
gray_image = gray_image.cuda()
edge_image = edge_image.cuda()
mask_image = mask_image.cuda()
obscured_image = obscured_image.cuda()
watermark_image = watermark_image.cuda()
image = Variable(image)
gray_image = Variable(gray_image)
edge_image = Variable(edge_image)
mask_image = Variable(mask_image)
obscured_image = Variable(obscured_image)
watermark_image = Variable(watermark_image)
batch_size = image.size(0)
# refresh the optimizer
optimizer.zero_grad()
# compute reconstructions using all the modalities
(joint_recon_image, joint_recon_gray, joint_recon_edge,
joint_recon_mask, joint_recon_obscured, joint_recon_watermark,
joint_mu, joint_logvar) = model(image, gray_image, edge_image,
mask_image, obscured_image, watermark_image)
# compute reconstructions using each of the individual modalities
(image_recon_image, image_recon_gray, image_recon_edge,
image_recon_mask, image_recon_obscured, image_recon_watermark,
image_mu, image_logvar) = model(image=image)
(gray_recon_image, gray_recon_gray, gray_recon_edge,
gray_recon_mask, gray_recon_obscured, gray_recon_watermark,
gray_mu, gray_logvar) = model(gray=gray_image)
(edge_recon_image, edge_recon_gray, edge_recon_edge,
edge_recon_mask, edge_recon_obscured, edge_recon_watermark,
edge_mu, edge_logvar) = model(edge=edge_image)
(mask_recon_image, mask_recon_gray, mask_recon_edge,
mask_recon_mask, mask_recon_obscured, mask_recon_watermark,
mask_mu, mask_logvar) = model(mask=mask_image)
(obscured_recon_image, obscured_recon_gray, obscured_recon_edge,
obscured_recon_mask, obscured_recon_obscured, obscured_recon_watermark,
obscured_mu, obscured_logvar) = model(obscured=obscured_image)
(watermark_recon_image, watermark_recon_gray, watermark_recon_edge,
watermark_recon_mask, watermark_recon_obscured, watermark_recon_watermark,
watermark_mu, watermark_logvar) = model(watermark=watermark_image)
# compute joint loss
joint_train_loss = elbo_loss(joint_recon_image, image,
joint_recon_gray, gray_image,
joint_recon_edge, edge_image,
joint_recon_mask, mask_image,
joint_recon_obscured, obscured_image,
joint_recon_watermark, watermark_image,
joint_mu, joint_logvar,
annealing_factor=annealing_factor)
# compute loss with unimodal inputs
image_train_loss = elbo_loss(image_recon_image, image,
image_recon_gray, gray_image,
image_recon_edge, edge_image,
image_recon_mask, mask_image,
image_recon_obscured, obscured_image,
image_recon_watermark, watermark_image,
image_mu, image_logvar,
annealing_factor=annealing_factor)
gray_train_loss = elbo_loss(gray_recon_image, image,
gray_recon_gray, gray_image,
gray_recon_edge, edge_image,
gray_recon_mask, mask_image,
gray_recon_obscured, obscured_image,
gray_recon_watermark, watermark_image,
gray_mu, joint_logvar,
annealing_factor=annealing_factor)
edge_train_loss = elbo_loss(edge_recon_image, image,
edge_recon_gray, gray_image,
edge_recon_edge, edge_image,
edge_recon_mask, mask_image,
edge_recon_obscured, obscured_image,
edge_recon_watermark, watermark_image,
edge_mu, edge_logvar,
annealing_factor=annealing_factor)
mask_train_loss = elbo_loss(mask_recon_image, image,
mask_recon_gray, gray_image,
mask_recon_edge, edge_image,
mask_recon_mask, mask_image,
mask_recon_obscured, obscured_image,
mask_recon_watermark, watermark_image,
mask_mu, mask_logvar,
annealing_factor=annealing_factor)
obscured_train_loss = elbo_loss(obscured_recon_image, image,
obscured_recon_gray, gray_image,
obscured_recon_edge, edge_image,
obscured_recon_mask, mask_image,
obscured_recon_obscured, obscured_image,
obscured_recon_watermark, watermark_image,
obscured_mu, obscured_logvar,
annealing_factor=annealing_factor)
watermark_train_loss = elbo_loss(watermark_recon_image, image,
watermark_recon_gray, gray_image,
watermark_recon_edge, edge_image,
watermark_recon_mask, mask_image,
watermark_recon_obscured, obscured_image,
watermark_recon_watermark, watermark_image,
watermark_mu, watermark_logvar,
annealing_factor=annealing_factor)
train_loss = joint_train_loss + image_train_loss + gray_train_loss \
+ edge_train_loss + mask_train_loss + obscured_train_loss \
+ watermark_train_loss
train_loss_meter.update(train_loss.data[0], len(image))
# compute and take gradient step
train_loss.backward()
optimizer.step()
if batch_idx % args.log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}\tAnnealing Factor: {:.3f}'.format(
epoch, batch_idx * len(image), len(train_loader.dataset),
100. * batch_idx / len(train_loader), train_loss_meter.avg, annealing_factor))
print('====> Epoch: {}\tLoss: {:.4f}'.format(epoch, train_loss_meter.avg))
def test(epoch):
model.eval()
test_loss = 0
pbar = tqdm(total=len(test_loader))
for batch_idx, (image, gray_image, edge_image, mask_image,
obscured_image, watermark_image) in enumerate(test_loader):
if args.cuda:
image = image.cuda()
gray_image = gray_image.cuda()
edge_image = edge_image.cuda()
mask_image = mask_image.cuda()
obscured_image = obscured_image.cuda()
watermark_image = watermark_image.cuda()
image = Variable(image)
gray_image = Variable(gray_image)
edge_image = Variable(edge_image)
mask_image = Variable(mask_image)
obscured_image = Variable(obscured_image)
watermark_image = Variable(watermark_image)
batch_size = image.size(0)
# for ease, only compute the joint loss
(joint_recon_image, joint_recon_gray, joint_recon_edge,
joint_recon_mask, joint_recon_obscured, joint_recon_watermark,
joint_mu, joint_logvar) = model(batch_size, image, gray_image, edge_image,
mask_image, obscured_image, watermark_image)
test_loss += loss_function(joint_recon_image, image,
joint_recon_gray, gray_image,
joint_recon_edge, edge_image,
joint_recon_mask, mask_image,
joint_recon_obscured, obscured_image,
joint_recon_watermark, watermark_image,
joint_mu, joint_logvar).data[0]
if batch_idx == 0:
# from time to time, plot the reconstructions to see how well the model is learning
n = min(batch_size, 8)
image_comparison = torch.cat(
[image[:n],
F.sigmoid(joint_recon_image).view(args.batch_size, 3, 64, 64)[:n]])
gray_comparison = torch.cat([
gray_image[:n],
F.sigmoid(joint_recon_gray).view(args.batch_size, 1, 64, 64)[:n]])
edge_comparison = torch.cat([
edge_image[:n],
F.sigmoid(joint_recon_edge).view(args.batch_size, 1, 64, 64)[:n]])
mask_comparison = torch.cat([
mask_image[:n],
F.sigmoid(joint_recon_mask).view(args.batch_size, 1, 64, 64)[:n]])
obscured_comparison = torch.cat([
obscured_image[:n],
F.sigmoid(joint_recon_obscured).view(args.batch_size, 3, 64, 64)[:n]])
watermark_comparison = torch.cat([
watermark_image[:n],
F.sigmoid(joint_recon_watermark).view(args.batch_size, 3, 64, 64)[:n]])
# save these reconstructions
save_image(image_comparison.data.cpu(),
'./results/image/reconstruction_%d.png' % epoch, nrow=n)
save_image(gray_comparison.data.cpu(),
'./results/gray/reconstruction_%d.png' % epoch, nrow=n)
save_image(edge_comparison.data.cpu(),
'./results/edge/reconstruction_%d.png' % epoch, nrow=n)
save_image(mask_comparison.data.cpu(),
'./results/mask/reconstruction_%d.png' % epoch, nrow=n)
save_image(obscured_comparison.data.cpu(),
'./results/obscured/reconstruction_%d.png' % epoch, nrow=n)
save_image(watermark_comparison.data.cpu(),
'./results/watermark/reconstruction_%d.png' % epoch, nrow=n)
pbar.update()
pbar.close()
test_loss /= len(test_loader)
print('====> Test Loss: {:.4f}'.format(test_loss))
return test_loss
best_loss = sys.maxint
for epoch in range(1, args.epochs + 1):
train(epoch)
loss = test(epoch)
is_best = loss < best_loss
best_loss = min(loss, best_loss)
# save the best model and current model
save_checkpoint({
'state_dict': model.state_dict(),
'best_loss': best_loss,
'n_latents': args.n_latents,
'optimizer' : optimizer.state_dict(),
}, is_best, folder='./trained_models')
| 19,025
| 47.659847
| 107
|
py
|
multimodal-vae-public
|
multimodal-vae-public-master/mnist/sample.py
|
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import sys
import numpy as np
import torch
import torch.nn.functional as F
from torch.autograd import Variable
from torchvision import datasets, transforms
from torchvision.utils import save_image
from train import load_checkpoint
def fetch_mnist_image(label):
"""Return a random image from the MNIST dataset with label.
@param label: integer
a integer from 0 to 9
@return: torch.autograd.Variable
MNIST image
"""
mnist_dataset = datasets.MNIST('./data', train=False, download=True,
transform=transforms.ToTensor())
images = mnist_dataset.test_data.numpy()
labels = mnist_dataset.test_labels.numpy()
images = images[labels == label]
image = images[np.random.choice(np.arange(images.shape[0]))]
image = torch.from_numpy(image).float()
image = image.unsqueeze(0)
return Variable(image, volatile=True)
def fetch_mnist_text(label):
"""Randomly generate a number from 0 to 9.
@param label: integer
a integer from 0 to 9
@return: torch.autograd.Variable
Variable wrapped around an integer.
"""
text = torch.LongTensor([label])
return Variable(text, volatile=True)
if __name__ == "__main__":
import os
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('model_path', type=str, help='path to trained model file')
parser.add_argument('--n-samples', type=int, default=64,
help='Number of images and texts to sample [default: 64]')
# condition sampling on a particular images
parser.add_argument('--condition-on-image', type=int, default=None,
help='If True, generate text conditioned on an image.')
# condition sampling on a particular text
parser.add_argument('--condition-on-text', type=int, default=None,
help='If True, generate images conditioned on a text.')
parser.add_argument('--cuda', action='store_true', default=False,
help='enables CUDA training')
args = parser.parse_args()
args.cuda = args.cuda and torch.cuda.is_available()
model = load_checkpoint(args.model_path, use_cuda=args.cuda)
model.eval()
if args.cuda:
model.cuda()
# mode 1: unconditional generation
if not args.condition_on_image and not args.condition_on_text:
mu = Variable(torch.Tensor([0]))
std = Variable(torch.Tensor([1]))
if args.cuda:
mu = mu.cuda()
std = std.cuda()
# mode 2: generate conditioned on image
elif args.condition_on_image and not args.condition_on_text:
image = fetch_mnist_image(args.condition_on_image)
if args.cuda:
image = image.cuda()
mu, logvar = model.infer(image=image)
std = logvar.mul(0.5).exp_()
# mode 3: generate conditioned on text
elif args.condition_on_text and not args.condition_on_image:
text = fetch_mnist_text(args.condition_on_text)
if args.cuda:
text = text.cuda()
mu, logvar = model.infer(text=text)
std = logvar.mul(0.5).exp_()
# mode 4: generate conditioned on image and text
elif args.condition_on_text and args.condition_on_image:
image = fetch_mnist_image(args.condition_on_image)
text = fetch_mnist_text(args.condition_on_text)
if args.cuda:
image = image.cuda()
text = text.cuda()
mu, logvar = model.infer(image=image, text=text)
std = logvar.mul(0.5).exp_()
# sample from uniform gaussian
sample = Variable(torch.randn(args.n_samples, model.n_latents))
if args.cuda:
sample = sample.cuda()
# sample from particular gaussian by multiplying + adding
mu = mu.expand_as(sample)
std = std.expand_as(sample)
sample = sample.mul(std).add_(mu)
# generate image and text
img_recon = F.sigmoid(model.image_decoder(sample)).cpu().data
txt_recon = F.log_softmax(model.text_decoder(sample), dim=1).cpu().data
# save image samples to filesystem
save_image(img_recon.view(args.n_samples, 1, 28, 28),
'./sample_image.png')
# save text samples to filesystem
with open('./sample_text.txt', 'w') as fp:
txt_recon_np = txt_recon.numpy()
txt_recon_np = np.argmax(txt_recon_np, axis=1).tolist()
for i, item in enumerate(txt_recon_np):
fp.write('Text (%d): %s\n' % (i, item))
| 4,692
| 37.154472
| 82
|
py
|
multimodal-vae-public
|
multimodal-vae-public-master/mnist/model.py
|
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import numpy as np
import torch
import torch.nn as nn
from torch.autograd import Variable
from torch.nn import functional as F
from torch.nn.parameter import Parameter
class MVAE(nn.Module):
"""Multimodal Variational Autoencoder.
@param n_latents: integer
number of latent dimensions
"""
def __init__(self, n_latents):
super(MVAE, self).__init__()
self.image_encoder = ImageEncoder(n_latents)
self.image_decoder = ImageDecoder(n_latents)
self.text_encoder = TextEncoder(n_latents)
self.text_decoder = TextDecoder(n_latents)
self.experts = ProductOfExperts()
self.n_latents = n_latents
def reparametrize(self, mu, logvar):
if self.training:
std = logvar.mul(0.5).exp_()
eps = Variable(std.data.new(std.size()).normal_())
return eps.mul(std).add_(mu)
else:
return mu
def forward(self, image=None, text=None):
mu, logvar = self.infer(image, text)
# reparametrization trick to sample
z = self.reparametrize(mu, logvar)
# reconstruct inputs based on that gaussian
img_recon = self.image_decoder(z)
txt_recon = self.text_decoder(z)
return img_recon, txt_recon, mu, logvar
def infer(self, image=None, text=None):
batch_size = image.size(0) if image is not None else text.size(0)
use_cuda = next(self.parameters()).is_cuda # check if CUDA
# initialize the universal prior expert
mu, logvar = prior_expert((1, batch_size, self.n_latents),
use_cuda=use_cuda)
if image is not None:
img_mu, img_logvar = self.image_encoder(image)
mu = torch.cat((mu, img_mu.unsqueeze(0)), dim=0)
logvar = torch.cat((logvar, img_logvar.unsqueeze(0)), dim=0)
if text is not None:
txt_mu, txt_logvar = self.text_encoder(text)
mu = torch.cat((mu, txt_mu.unsqueeze(0)), dim=0)
logvar = torch.cat((logvar, txt_logvar.unsqueeze(0)), dim=0)
# product of experts to combine gaussians
mu, logvar = self.experts(mu, logvar)
return mu, logvar
class ImageEncoder(nn.Module):
"""Parametrizes q(z|x).
@param n_latents: integer
number of latent dimensions
"""
def __init__(self, n_latents):
super(ImageEncoder, self).__init__()
self.fc1 = nn.Linear(784, 512)
self.fc2 = nn.Linear(512, 512)
self.fc31 = nn.Linear(512, n_latents)
self.fc32 = nn.Linear(512, n_latents)
self.swish = Swish()
def forward(self, x):
h = self.swish(self.fc1(x.view(-1, 784)))
h = self.swish(self.fc2(h))
return self.fc31(h), self.fc32(h)
class ImageDecoder(nn.Module):
"""Parametrizes p(x|z).
@param n_latents: integer
number of latent dimensions
"""
def __init__(self, n_latents):
super(ImageDecoder, self).__init__()
self.fc1 = nn.Linear(n_latents, 512)
self.fc2 = nn.Linear(512, 512)
self.fc3 = nn.Linear(512, 512)
self.fc4 = nn.Linear(512, 784)
self.swish = Swish()
def forward(self, z):
h = self.swish(self.fc1(z))
h = self.swish(self.fc2(h))
h = self.swish(self.fc3(h))
return self.fc4(h) # NOTE: no sigmoid here. See train.py
class TextEncoder(nn.Module):
"""Parametrizes q(z|y).
@param n_latents: integer
number of latent dimensions
"""
def __init__(self, n_latents):
super(TextEncoder, self).__init__()
self.fc1 = nn.Embedding(10, 512)
self.fc2 = nn.Linear(512, 512)
self.fc31 = nn.Linear(512, n_latents)
self.fc32 = nn.Linear(512, n_latents)
self.swish = Swish()
def forward(self, x):
h = self.swish(self.fc1(x))
h = self.swish(self.fc2(h))
return self.fc31(h), self.fc32(h)
class TextDecoder(nn.Module):
"""Parametrizes p(y|z).
@param n_latents: integer
number of latent dimensions
"""
def __init__(self, n_latents):
super(TextDecoder, self).__init__()
self.fc1 = nn.Linear(n_latents, 512)
self.fc2 = nn.Linear(512, 512)
self.fc3 = nn.Linear(512, 512)
self.fc4 = nn.Linear(512, 10)
self.swish = Swish()
def forward(self, z):
h = self.swish(self.fc1(z))
h = self.swish(self.fc2(h))
h = self.swish(self.fc3(h))
return self.fc4(h) # NOTE: no softmax here. See train.py
class ProductOfExperts(nn.Module):
"""Return parameters for product of independent experts.
See https://arxiv.org/pdf/1410.7827.pdf for equations.
@param mu: M x D for M experts
@param logvar: M x D for M experts
"""
def forward(self, mu, logvar, eps=1e-8):
var = torch.exp(logvar) + eps
# precision of i-th Gaussian expert at point x
T = 1. / (var + eps)
pd_mu = torch.sum(mu * T, dim=0) / torch.sum(T, dim=0)
pd_var = 1. / torch.sum(T, dim=0)
pd_logvar = torch.log(pd_var + eps)
return pd_mu, pd_logvar
class Swish(nn.Module):
"""https://arxiv.org/abs/1710.05941"""
def forward(self, x):
return x * F.sigmoid(x)
def prior_expert(size, use_cuda=False):
"""Universal prior expert. Here we use a spherical
Gaussian: N(0, 1).
@param size: integer
dimensionality of Gaussian
@param use_cuda: boolean [default: False]
cast CUDA on variables
"""
mu = Variable(torch.zeros(size))
logvar = Variable(torch.zeros(size))
if use_cuda:
mu, logvar = mu.cuda(), logvar.cuda()
return mu, logvar
| 5,973
| 31.11828
| 73
|
py
|
multimodal-vae-public
|
multimodal-vae-public-master/mnist/train.py
|
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import os
import sys
import shutil
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.autograd import Variable
from torchvision import transforms
from torchvision.datasets import MNIST
from model import MVAE
def elbo_loss(recon_image, image, recon_text, text, mu, logvar,
lambda_image=1.0, lambda_text=1.0, annealing_factor=1):
"""Bimodal ELBO loss function.
@param recon_image: torch.Tensor
reconstructed image
@param image: torch.Tensor
input image
@param recon_text: torch.Tensor
reconstructed text probabilities
@param text: torch.Tensor
input text (one-hot)
@param mu: torch.Tensor
mean of latent distribution
@param logvar: torch.Tensor
log-variance of latent distribution
@param lambda_image: float [default: 1.0]
weight for image BCE
@param lambda_text: float [default: 1.0]
weight for text BCE
@param annealing_factor: integer [default: 1]
multiplier for KL divergence term
@return ELBO: torch.Tensor
evidence lower bound
"""
image_bce, text_bce = 0, 0 # default params
if recon_image is not None and image is not None:
image_bce = torch.sum(binary_cross_entropy_with_logits(
recon_image.view(-1, 1 * 28 * 28),
image.view(-1, 1 * 28 * 28)), dim=1)
if recon_text is not None and text is not None:
text_bce = torch.sum(cross_entropy(recon_text, text), dim=1)
# Kingma and Welling. Auto-Encoding Variational Bayes. ICLR, 2014
# https://arxiv.org/abs/1312.6114
KLD = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp(), dim=1)
ELBO = torch.mean(lambda_image * image_bce + lambda_text * text_bce
+ annealing_factor * KLD)
return ELBO
def binary_cross_entropy_with_logits(input, target):
"""Sigmoid Activation + Binary Cross Entropy
@param input: torch.Tensor (size N)
@param target: torch.Tensor (size N)
@return loss: torch.Tensor (size N)
"""
if not (target.size() == input.size()):
raise ValueError("Target size ({}) must be the same as input size ({})".format(
target.size(), input.size()))
return (torch.clamp(input, 0) - input * target
+ torch.log(1 + torch.exp(-torch.abs(input))))
def cross_entropy(input, target, eps=1e-6):
"""k-Class Cross Entropy (Log Softmax + Log Loss)
@param input: torch.Tensor (size N x K)
@param target: torch.Tensor (size N x K)
@param eps: error to add (default: 1e-6)
@return loss: torch.Tensor (size N)
"""
if not (target.size(0) == input.size(0)):
raise ValueError(
"Target size ({}) must be the same as input size ({})".format(
target.size(0), input.size(0)))
log_input = F.log_softmax(input + eps, dim=1)
y_onehot = Variable(log_input.data.new(log_input.size()).zero_())
y_onehot = y_onehot.scatter(1, target.unsqueeze(1), 1)
loss = y_onehot * log_input
return -loss
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def save_checkpoint(state, is_best, folder='./', filename='checkpoint.pth.tar'):
if not os.path.isdir(folder):
os.mkdir(folder)
torch.save(state, os.path.join(folder, filename))
if is_best:
shutil.copyfile(os.path.join(folder, filename),
os.path.join(folder, 'model_best.pth.tar'))
def load_checkpoint(file_path, use_cuda=False):
checkpoint = torch.load(file_path) if use_cuda else \
torch.load(file_path, map_location=lambda storage, location: storage)
model = MVAE(checkpoint['n_latents'])
model.load_state_dict(checkpoint['state_dict'])
return model
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--n-latents', type=int, default=64,
help='size of the latent embedding [default: 64]')
parser.add_argument('--batch-size', type=int, default=100, metavar='N',
help='input batch size for training [default: 100]')
parser.add_argument('--epochs', type=int, default=500, metavar='N',
help='number of epochs to train [default: 500]')
parser.add_argument('--annealing-epochs', type=int, default=200, metavar='N',
help='number of epochs to anneal KL for [default: 200]')
parser.add_argument('--lr', type=float, default=1e-3, metavar='LR',
help='learning rate [default: 1e-3]')
parser.add_argument('--log-interval', type=int, default=10, metavar='N',
help='how many batches to wait before logging training status [default: 10]')
parser.add_argument('--lambda-image', type=float, default=1.,
help='multipler for image reconstruction [default: 1]')
parser.add_argument('--lambda-text', type=float, default=10.,
help='multipler for text reconstruction [default: 10]')
parser.add_argument('--cuda', action='store_true', default=False,
help='enables CUDA training [default: False]')
args = parser.parse_args()
args.cuda = args.cuda and torch.cuda.is_available()
if not os.path.isdir('./trained_models'):
os.makedirs('./trained_models')
train_loader = torch.utils.data.DataLoader(
MNIST('./data', train=True, download=True, transform=transforms.ToTensor()),
batch_size=args.batch_size, shuffle=True)
N_mini_batches = len(train_loader)
test_loader = torch.utils.data.DataLoader(
MNIST('./data', train=False, download=True, transform=transforms.ToTensor()),
batch_size=args.batch_size, shuffle=False)
model = MVAE(args.n_latents)
optimizer = optim.Adam(model.parameters(), lr=args.lr)
if args.cuda:
model.cuda()
def train(epoch):
model.train()
train_loss_meter = AverageMeter()
# NOTE: is_paired is 1 if the example is paired
for batch_idx, (image, text) in enumerate(train_loader):
if epoch < args.annealing_epochs:
# compute the KL annealing factor for the current mini-batch in the current epoch
annealing_factor = (float(batch_idx + (epoch - 1) * N_mini_batches + 1) /
float(args.annealing_epochs * N_mini_batches))
else:
# by default the KL annealing factor is unity
annealing_factor = 1.0
if args.cuda:
image = image.cuda()
text = text.cuda()
image = Variable(image)
text = Variable(text)
batch_size = len(image)
# refresh the optimizer
optimizer.zero_grad()
# pass data through model
recon_image_1, recon_text_1, mu_1, logvar_1 = model(image, text)
recon_image_2, recon_text_2, mu_2, logvar_2 = model(image)
recon_image_3, recon_text_3, mu_3, logvar_3 = model(text=text)
# compute ELBO for each data combo
joint_loss = elbo_loss(recon_image_1, image, recon_text_1, text, mu_1, logvar_1,
lambda_image=args.lambda_image, lambda_text=args.lambda_text,
annealing_factor=annealing_factor)
image_loss = elbo_loss(recon_image_2, image, None, None, mu_2, logvar_2,
lambda_image=args.lambda_image, lambda_text=args.lambda_text,
annealing_factor=annealing_factor)
text_loss = elbo_loss(None, None, recon_text_3, text, mu_3, logvar_3,
lambda_image=args.lambda_image, lambda_text=args.lambda_text,
annealing_factor=annealing_factor)
train_loss = joint_loss + image_loss + text_loss
train_loss_meter.update(train_loss.data[0], batch_size)
# compute gradients and take step
train_loss.backward()
optimizer.step()
if batch_idx % args.log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}\tAnnealing-Factor: {:.3f}'.format(
epoch, batch_idx * len(image), len(train_loader.dataset),
100. * batch_idx / len(train_loader), train_loss_meter.avg, annealing_factor))
print('====> Epoch: {}\tLoss: {:.4f}'.format(epoch, train_loss_meter.avg))
def test(epoch):
model.eval()
test_loss_meter = AverageMeter()
for batch_idx, (image, text) in enumerate(test_loader):
if args.cuda:
image = image.cuda()
text = text.cuda()
image = Variable(image, volatile=True)
text = Variable(text, volatile=True)
batch_size = len(image)
recon_image_1, recon_text_1, mu_1, logvar_1 = model(image, text)
recon_image_2, recon_text_2, mu_2, logvar_2 = model(image)
recon_image_3, recon_text_3, mu_3, logvar_3 = model(text=text)
joint_loss = elbo_loss(recon_image_1, image, recon_text_1, text, mu_1, logvar_1)
image_loss = elbo_loss(recon_image_2, image, None, None, mu_2, logvar_2)
text_loss = elbo_loss(None, None, recon_text_3, text, mu_3, logvar_3)
test_loss = joint_loss + image_loss + text_loss
test_loss_meter.update(test_loss.data[0], batch_size)
print('====> Test Loss: {:.4f}'.format(test_loss_meter.avg))
return test_loss_meter.avg
best_loss = sys.maxint
for epoch in range(1, args.epochs + 1):
train(epoch)
test_loss = test(epoch)
is_best = test_loss < best_loss
best_loss = min(test_loss, best_loss)
# save the best model and current model
save_checkpoint({
'state_dict': model.state_dict(),
'best_loss': best_loss,
'n_latents': args.n_latents,
'optimizer' : optimizer.state_dict(),
}, is_best, folder='./trained_models')
| 10,817
| 39.215613
| 105
|
py
|
multimodal-vae-public
|
multimodal-vae-public-master/fashionmnist/sample.py
|
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import sys
import numpy as np
import torch
import torch.nn.functional as F
from torch.autograd import Variable
from torchvision import transforms
from torchvision.utils import save_image
from train import load_checkpoint
from datasets import FashionMNIST
from model import LABEL_IX_TO_STRING
def fetch_fashionmnist_image(label):
"""Return a random image from the FashionMNIST dataset with label.
@param label: integer
a integer from 0 to 9
@return: torch.autograd.Variable
FashionMNIST image
"""
dataset = FashionMNIST('./data', train=False, download=True,
transform=transforms.ToTensor())
images = dataset.test_data.numpy()
labels = dataset.test_labels.numpy()
images = images[labels == label]
image = images[np.random.choice(np.arange(images.shape[0]))]
image = torch.from_numpy(image).float()
image = image.unsqueeze(0)
return Variable(image, volatile=True)
def fetch_fashionmnist_text(label):
"""Randomly generate a number from 0 to 9.
@param label: integer
a integer from 0 to 9
@return: torch.autograd.Variable
Variable wrapped around an integer.
"""
text = torch.LongTensor([label])
return Variable(text, volatile=True)
if __name__ == "__main__":
import os
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('model_path', type=str, help='path to trained model file')
parser.add_argument('--n-samples', type=int, default=64,
help='Number of images and texts to sample [default: 64]')
# condition sampling on a particular images
parser.add_argument('--condition-on-image', type=int, default=None,
help='If True, generate text conditioned on an image.')
# condition sampling on a particular text
parser.add_argument('--condition-on-text', type=int, default=None,
help='If True, generate images conditioned on a text.')
parser.add_argument('--cuda', action='store_true', default=False,
help='enables CUDA training [default: False]')
args = parser.parse_args()
args.cuda = args.cuda and torch.cuda.is_available()
model = load_checkpoint(args.model_path, use_cuda=args.cuda)
model.eval()
if args.cuda:
model.cuda()
# mode 1: unconditional generation
if not args.condition_on_image and not args.condition_on_text:
mu = Variable(torch.Tensor([0]))
std = Variable(torch.Tensor([1]))
if args.cuda:
mu = mu.cuda()
std = std.cuda()
# mode 2: generate conditioned on image
elif args.condition_on_image and not args.condition_on_text:
image = fetch_fashionmnist_image(args.condition_on_image)
if args.cuda:
image = image.cuda()
mu, logvar = model.infer(1, image=image)
std = logvar.mul(0.5).exp_()
# mode 3: generate conditioned on text
elif args.condition_on_text and not args.condition_on_image:
text = fetch_fashionmnist_text(args.condition_on_text)
if args.cuda:
text = text.cuda()
mu, logvar = model.infer(1, text=text)
std = logvar.mul(0.5).exp_()
# mode 4: generate conditioned on image and text
elif args.condition_on_text and args.condition_on_image:
image = fetch_fashionmnist_image(args.condition_on_image)
text = fetch_fashionmnist_text(args.condition_on_text)
if args.cuda:
image = image.cuda()
text = text.cuda()
mu, logvar = model.infer(1, image=image, text=text)
std = logvar.mul(0.5).exp_()
# sample from uniform gaussian
sample = Variable(torch.randn(args.n_samples, model.n_latents))
if args.cuda:
sample = sample.cuda()
# sample from particular gaussian by multiplying + adding
mu = mu.expand_as(sample)
std = std.expand_as(sample)
sample = sample.mul(std).add_(mu)
# generate image and text
img_recon = F.sigmoid(model.image_decoder(sample)).cpu().data
txt_recon = F.log_softmax(model.text_decoder(sample), dim=1).cpu().data
# save image samples to filesystem
save_image(img_recon.view(args.n_samples, 1, 28, 28),
'./sample_image.png')
# save text samples to filesystem
with open('./sample_text.txt', 'w') as fp:
txt_recon_np = txt_recon.numpy()
txt_recon_np = np.argmax(txt_recon_np, axis=1).tolist()
for i, item in enumerate(txt_recon_np):
fp.write('Text (%d): %s\n' % (i, LABEL_IX_TO_STRING[item]))
| 4,827
| 37.624
| 82
|
py
|
multimodal-vae-public
|
multimodal-vae-public-master/fashionmnist/model.py
|
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import numpy as np
import torch
import torch.nn as nn
from torch.autograd import Variable
from torch.nn import functional as F
# MAP from index to the interpretable label
LABEL_IX_TO_STRING = {0: 'T-shirt/top', 1: 'Trouser', 2: 'Pullover', 3: 'Dress',
4: 'Coat', 5: 'Sandal', 6: 'Shirt', 7: 'Sneaker', 8: 'Bag',
9: 'Ankle boot'}
class MVAE(nn.Module):
"""Multimodal Variational Autoencoder.
@param n_latents: integer
number of latent dimensions
"""
def __init__(self, n_latents):
super(MVAE, self).__init__()
self.image_encoder = ImageEncoder(n_latents)
self.image_decoder = ImageDecoder(n_latents)
self.text_encoder = TextEncoder(n_latents)
self.text_decoder = TextDecoder(n_latents)
self.experts = ProductOfExperts()
self.n_latents = n_latents
def reparametrize(self, mu, logvar):
if self.training:
std = logvar.mul(0.5).exp_()
eps = Variable(std.data.new(std.size()).normal_())
return eps.mul(std).add_(mu)
else:
return mu
def forward(self, image=None, text=None):
mu, logvar = self.infer(image, text)
# reparametrization trick to sample
z = self.reparametrize(mu, logvar)
# reconstruct inputs based on that gaussian
img_recon = self.image_decoder(z)
txt_recon = self.text_decoder(z)
return img_recon, txt_recon, mu, logvar
def infer(self, image=None, text=None):
batch_size = image.size(0) if image is not None else text.size(0)
use_cuda = next(self.parameters()).is_cuda # check if CUDA
mu, logvar = prior_expert((1, batch_size, self.n_latents),
use_cuda=use_cuda)
if image is not None:
img_mu, img_logvar = self.image_encoder(image)
mu = torch.cat((mu, img_mu.unsqueeze(0)), dim=0)
logvar = torch.cat((logvar, img_logvar.unsqueeze(0)), dim=0)
if text is not None:
txt_mu, txt_logvar = self.text_encoder(text)
mu = torch.cat((mu, txt_mu.unsqueeze(0)), dim=0)
logvar = torch.cat((logvar, txt_logvar.unsqueeze(0)), dim=0)
# product of experts to combine gaussians
mu, logvar = self.experts(mu, logvar)
return mu, logvar
class ImageEncoder(nn.Module):
"""Parametrizes q(z|x).
@param n_latents: integer
number of latent dimensions
"""
def __init__(self, n_latents):
super(ImageEncoder, self).__init__()
self.features = nn.Sequential(
nn.Conv2d(1, 64, 4, 2, 1, bias=False),
Swish(),
nn.Conv2d(64, 128, 4, 2, 1, bias=False),
Swish())
self.classifier = nn.Sequential(
nn.Linear(128 * 7 * 7, 512),
Swish(),
nn.Linear(512, n_latents * 2))
self.n_latents = n_latents
def forward(self, x):
n_latents = self.n_latents
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return x[:, :n_latents], x[:, n_latents:]
class ImageDecoder(nn.Module):
"""Parametrizes p(x|z).
@param n_latents: integer
number of latent dimensions
"""
def __init__(self, n_latents):
super(ImageDecoder, self).__init__()
self.n_latents = n_latents
self.upsampler = nn.Sequential(
nn.Linear(n_latents, 512),
Swish(),
nn.Linear(512, 128 * 7 * 7),
Swish())
self.hallucinate = nn.Sequential(
nn.ConvTranspose2d(128, 64, 4, 2, 1, bias=False),
Swish(),
nn.ConvTranspose2d(64, 1, 4, 2, 1, bias=False))
def forward(self, z):
# the input will be a vector of size |n_latents|
z = self.upsampler(z)
z = z.view(-1, 128, 7, 7)
z = self.hallucinate(z)
return z # NOTE: no sigmoid here. See train.py
class TextEncoder(nn.Module):
"""Parametrizes q(z|y).
@param n_latents: integer
number of latent dimensions
"""
def __init__(self, n_latents):
super(TextEncoder, self).__init__()
self.net = nn.Sequential(
nn.Embedding(10, 512),
Swish(),
nn.Linear(512, 512),
Swish(),
nn.Linear(512, n_latents * 2))
self.n_latents = n_latents
def forward(self, x):
n_latents = self.n_latents
x = self.net(x)
return x[:, :n_latents], x[:, n_latents:]
class TextDecoder(nn.Module):
"""Parametrizes p(y|z).
@param n_latents: integer
number of latent dimensions
"""
def __init__(self, n_latents):
super(TextDecoder, self).__init__()
self.net = nn.Sequential(
nn.Linear(n_latents, 512),
Swish(),
nn.Linear(512, 512),
Swish(),
nn.Linear(512, 512),
Swish(),
nn.Linear(512, 10))
def forward(self, z):
z = self.net(z)
return z # NOTE: no softmax here. See train.py
class ProductOfExperts(nn.Module):
"""Return parameters for product of independent experts.
See https://arxiv.org/pdf/1410.7827.pdf for equations.
@param mu: M x D for M experts
@param logvar: M x D for M experts
"""
def forward(self, mu, logvar, eps=1e-8):
var = torch.exp(logvar) + eps
# precision of i-th Gaussian expert at point x
T = 1. / (var + eps)
pd_mu = torch.sum(mu * T, dim=0) / torch.sum(T, dim=0)
pd_var = 1. / torch.sum(T, dim=0)
pd_logvar = torch.log(pd_var + eps)
return pd_mu, pd_logvar
class Swish(nn.Module):
def forward(self, x):
return x * F.sigmoid(x)
def prior_expert(size, use_cuda=False):
"""Universal prior expert. Here we use a spherical
Gaussian: N(0, 1).
@param size: integer
dimensionality of Gaussian
@param use_cuda: boolean [default: False]
cast CUDA on variables
"""
mu = Variable(torch.zeros(size))
logvar = Variable(torch.log(torch.ones(size)))
if use_cuda:
mu, logvar = mu.cuda(), logvar.cuda()
return mu, logvar
| 6,482
| 30.779412
| 82
|
py
|
multimodal-vae-public
|
multimodal-vae-public-master/fashionmnist/datasets.py
|
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from torchvision.datasets import MNIST
class FashionMNIST(MNIST):
"""`Fashion-MNIST <https://github.com/zalandoresearch/fashion-mnist>`_ Dataset.
Args:
root (string): Root directory of dataset where ``processed/training.pt``
and ``processed/test.pt`` exist.
train (bool, optional): If True, creates dataset from ``training.pt``,
otherwise from ``test.pt``.
download (bool, optional): If true, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again.
transform (callable, optional): A function/transform that takes in an PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
"""
urls = [
'http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/train-images-idx3-ubyte.gz',
'http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/train-labels-idx1-ubyte.gz',
'http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/t10k-images-idx3-ubyte.gz',
'http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/t10k-labels-idx1-ubyte.gz',
]
| 1,428
| 46.633333
| 96
|
py
|
multimodal-vae-public
|
multimodal-vae-public-master/fashionmnist/train.py
|
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import os
import sys
import shutil
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.autograd import Variable
from torchvision import transforms
from model import MVAE
from datasets import FashionMNIST
def elbo_loss(recon_image, image, recon_text, text, mu, logvar,
lambda_image=1.0, lambda_text=1.0, annealing_factor=1):
"""Bimodal ELBO loss function.
@param recon_image: torch.Tensor
reconstructed image
@param image: torch.Tensor
input image
@param recon_text: torch.Tensor
reconstructed text probabilities
@param text: torch.Tensor
input text (one-hot)
@param mu: torch.Tensor
mean of latent distribution
@param logvar: torch.Tensor
log-variance of latent distribution
@param lambda_image: float [default: 1.0]
weight for image BCE
@param lambda_text: float [default: 1.0]
weight for text BCE
@param annealing_factor: integer [default: 1]
multiplier for KL divergence term
@return ELBO: torch.Tensor
evidence lower bound
"""
image_bce, text_bce = 0, 0 # default params
if recon_image is not None and image is not None:
image_bce = torch.sum(binary_cross_entropy_with_logits(
recon_image.view(-1, 1 * 28 * 28),
image.view(-1, 1 * 28 * 28)), dim=1)
if recon_text is not None and text is not None:
text_bce = torch.sum(cross_entropy(recon_text, text), dim=1)
# Kingma and Welling. Auto-Encoding Variational Bayes. ICLR, 2014
# https://arxiv.org/abs/1312.6114
KLD = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp(), dim=1)
ELBO = torch.mean(lambda_image * image_bce + lambda_text * text_bce
+ annealing_factor * KLD)
return ELBO
def binary_cross_entropy_with_logits(input, target):
"""Sigmoid Activation + Binary Cross Entropy
@param input: torch.Tensor (size N)
@param target: torch.Tensor (size N)
@return loss: torch.Tensor (size N)
"""
if not (target.size() == input.size()):
raise ValueError("Target size ({}) must be the same as input size ({})".format(
target.size(), input.size()))
return (torch.clamp(input, 0) - input * target
+ torch.log(1 + torch.exp(-torch.abs(input))))
def cross_entropy(input, target, eps=1e-6):
"""k-Class Cross Entropy (Log Softmax + Log Loss)
@param input: torch.Tensor (size N x K)
@param target: torch.Tensor (size N x K)
@param eps: error to add (default: 1e-6)
@return loss: torch.Tensor (size N)
"""
if not (target.size(0) == input.size(0)):
raise ValueError(
"Target size ({}) must be the same as input size ({})".format(
target.size(0), input.size(0)))
log_input = F.log_softmax(input + eps, dim=1)
y_onehot = Variable(log_input.data.new(log_input.size()).zero_())
y_onehot = y_onehot.scatter(1, target.unsqueeze(1), 1)
loss = y_onehot * log_input
return -loss
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def save_checkpoint(state, is_best, folder='./', filename='checkpoint.pth.tar'):
if not os.path.isdir(folder):
os.mkdir(folder)
torch.save(state, os.path.join(folder, filename))
if is_best:
shutil.copyfile(os.path.join(folder, filename),
os.path.join(folder, 'model_best.pth.tar'))
def load_checkpoint(file_path, use_cuda=False):
checkpoint = torch.load(file_path) if use_cuda else \
torch.load(file_path, map_location=lambda storage, location: storage)
model = MVAE(checkpoint['n_latents'])
model.load_state_dict(checkpoint['state_dict'])
return model
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--n-latents', type=int, default=64,
help='size of the latent embedding [default: 64]')
parser.add_argument('--batch-size', type=int, default=100, metavar='N',
help='input batch size for training [default: 100]')
parser.add_argument('--epochs', type=int, default=500, metavar='N',
help='number of epochs to train [default: 500]')
parser.add_argument('--annealing-epochs', type=int, default=200, metavar='N',
help='number of epochs to anneal KL for [default: 200]')
parser.add_argument('--lr', type=float, default=1e-3, metavar='LR',
help='learning rate [default: 1e-3]')
parser.add_argument('--log-interval', type=int, default=10, metavar='N',
help='how many batches to wait before logging training status [default: 10]')
parser.add_argument('--lambda-image', type=float, default=1.,
help='multipler for image reconstruction [default: 1]')
parser.add_argument('--lambda-text', type=float, default=10.,
help='multipler for text reconstruction [default: 10]')
parser.add_argument('--cuda', action='store_true', default=False,
help='enables CUDA training [default: False]')
args = parser.parse_args()
args.cuda = args.cuda and torch.cuda.is_available()
if not os.path.isdir('./trained_models'):
os.makedirs('./trained_models')
train_loader = torch.utils.data.DataLoader(
FashionMNIST('./data', train=True, download=True, transform=transforms.ToTensor()),
batch_size=args.batch_size, shuffle=True)
N_mini_batches = len(train_loader)
test_loader = torch.utils.data.DataLoader(
FashionMNIST('./data', train=False, download=True, transform=transforms.ToTensor()),
batch_size=args.batch_size, shuffle=False)
model = MVAE(args.n_latents)
optimizer = optim.Adam(model.parameters(), lr=args.lr)
if args.cuda:
model.cuda()
def train(epoch):
model.train()
train_loss_meter = AverageMeter()
# NOTE: is_paired is 1 if the example is paired
for batch_idx, (image, text) in enumerate(train_loader):
if epoch < args.annealing_epochs:
# compute the KL annealing factor for the current mini-batch in the current epoch
annealing_factor = (float(batch_idx + epoch * N_mini_batches + 1) /
float(args.annealing_epochs * N_mini_batches))
else:
# by default the KL annealing factor is unity
annealing_factor = 1.0
if args.cuda:
image = image.cuda()
text = text.cuda()
image = Variable(image)
text = Variable(text)
batch_size = len(image)
# refresh the optimizer
optimizer.zero_grad()
# pass data through model
recon_image_1, recon_text_1, mu_1, logvar_1 = model(image, text)
recon_image_2, recon_text_2, mu_2, logvar_2 = model(image)
recon_image_3, recon_text_3, mu_3, logvar_3 = model(text=text)
# compute ELBO for each data combo
joint_loss = elbo_loss(recon_image_1, image, recon_text_1, text, mu_1, logvar_1,
lambda_image=args.lambda_image, lambda_text=args.lambda_text,
annealing_factor=annealing_factor)
image_loss = elbo_loss(recon_image_2, image, None, None, mu_2, logvar_2,
lambda_image=args.lambda_image, lambda_text=args.lambda_text,
annealing_factor=annealing_factor)
text_loss = elbo_loss(None, None, recon_text_3, text, mu_3, logvar_3,
lambda_image=args.lambda_image, lambda_text=args.lambda_text,
annealing_factor=annealing_factor)
train_loss = joint_loss + image_loss + text_loss
train_loss_meter.update(train_loss.data[0], batch_size)
# compute gradients and take step
train_loss.backward()
optimizer.step()
if batch_idx % args.log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}\tAnnealing-Factor: {:.3f}'.format(
epoch, batch_idx * len(image), len(train_loader.dataset),
100. * batch_idx / len(train_loader), train_loss_meter.avg, annealing_factor))
print('====> Epoch: {}\tLoss: {:.4f}'.format(epoch, train_loss_meter.avg))
def test(epoch):
model.eval()
test_loss_meter = AverageMeter()
for batch_idx, (image, text) in enumerate(test_loader):
if args.cuda:
image = image.cuda()
text = text.cuda()
image = Variable(image, volatile=True)
text = Variable(text, volatile=True)
batch_size = len(image)
recon_image_1, recon_text_1, mu_1, logvar_1 = model(image, text)
recon_image_2, recon_text_2, mu_2, logvar_2 = model(image)
recon_image_3, recon_text_3, mu_3, logvar_3 = model(text=text)
joint_loss = elbo_loss(recon_image_1, image, recon_text_1, text, mu_1, logvar_1)
image_loss = elbo_loss(recon_image_2, image, None, None, mu_2, logvar_2)
text_loss = elbo_loss(None, None, recon_text_3, text, mu_3, logvar_3)
test_loss = joint_loss + image_loss + text_loss
test_loss_meter.update(test_loss.data[0], batch_size)
print('====> Test Loss: {:.4f}'.format(test_loss_meter.avg))
return test_loss_meter.avg
best_loss = sys.maxint
for epoch in range(1, args.epochs + 1):
train(epoch)
test_loss = test(epoch)
is_best = test_loss < best_loss
best_loss = min(test_loss, best_loss)
# save the best model and current model
save_checkpoint({
'state_dict': model.state_dict(),
'best_loss': best_loss,
'n_latents': args.n_latents,
'optimizer' : optimizer.state_dict(),
}, is_best, folder='./trained_models')
| 10,820
| 39.226766
| 105
|
py
|
multimodal-vae-public
|
multimodal-vae-public-master/multimnist/sample.py
|
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import sys
import numpy as np
import torch
import torch.nn.functional as F
from torch.autograd import Variable
from torchvision import transforms
from torchvision.utils import save_image
from datasets import MultiMNIST
from train import load_checkpoint
from utils import char_tensor, charlist_tensor
from utils import tensor_to_string
def fetch_multimnist_image(label):
"""Return a random image from the MultiMNIST dataset with label.
@param label: string
a string of up to 4 digits
@return: torch.autograd.Variable
MultiMNIST image
"""
dataset = MultiMNIST('./data', train=False, download=True,
transform=transforms.ToTensor(),
target_transform=charlist_tensor)
images = dataset.test_data
labels = dataset.test_labels
n_rows = len(images)
images = []
for i in xrange(n_rows):
image = images[i]
text = labels[i]
if tensor_to_string(text.squeeze(0)) == label:
images.append(image)
if len(images) == 0:
sys.exit('No images with label (%s) found.' % label)
images = torch.cat(images).cpu().numpy()
ix = np.random.choice(np.arange(images.shape[0]))
image = images[ix]
image = torch.from_numpy(image).float()
image = image.unsqueeze(0)
return Variable(image, volatile=True)
def fetch_multimnist_text(label):
"""Randomly generate a number from 0 to 9.
@param label: string
a string of up to 4 digits
@return: torch.autograd.Variable
Variable wrapped around an integer.
"""
text = char_tensor(label).unsqueeze(0)
return Variable(text, volatile=True)
if __name__ == "__main__":
import os
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('model_path', type=str, help='path to trained model file')
parser.add_argument('--n-samples', type=int, default=64,
help='Number of images and texts to sample [default: 64]')
# condition sampling on a particular images
parser.add_argument('--condition-on-image', type=int, default=None,
help='If True, generate text conditioned on an image.')
# condition sampling on a particular text
parser.add_argument('--condition-on-text', type=int, default=None,
help='If True, generate images conditioned on a text.')
parser.add_argument('--cuda', action='store_true', default=False,
help='enables CUDA training')
args = parser.parse_args()
args.cuda = args.cuda and torch.cuda.is_available()
model = load_checkpoint(args.model_path, use_cuda=args.cuda)
model.eval()
if args.cuda:
model.cuda()
# mode 1: unconditional generation
if not args.condition_on_image and not args.condition_on_text:
mu = Variable(torch.Tensor([0]))
std = Variable(torch.Tensor([1]))
if args.cuda:
mu = mu.cuda()
std = std.cuda()
# mode 2: generate conditioned on image
elif args.condition_on_image and not args.condition_on_text:
image = fetch_multimnist_image(args.condition_on_image)
if args.cuda:
image = image.cuda()
mu, logvar = model.infer(1, image=image)
std = logvar.mul(0.5).exp_()
# mode 3: generate conditioned on text
elif args.condition_on_text and not args.condition_on_image:
text = fetch_multimnist_text(args.condition_on_text)
if args.cuda:
text = text.cuda()
mu, logvar = model.infer(1, text=text)
std = logvar.mul(0.5).exp_()
# mode 4: generate conditioned on image and text
elif args.condition_on_text and args.condition_on_image:
image = fetch_multimnist_image(args.condition_on_image)
text = fetch_multimnist_text(args.condition_on_text)
if args.cuda:
image = image.cuda()
text = text.cuda()
mu, logvar = model.infer(1, image=image, text=text)
std = logvar.mul(0.5).exp_()
# sample from uniform gaussian
sample = Variable(torch.randn(args.n_samples, model.n_latents))
if args.cuda:
sample = sample.cuda()
# sample from particular gaussian by multiplying + adding
mu = mu.expand_as(sample)
std = std.expand_as(sample)
sample = sample.mul(std).add_(mu)
# generate image and text
img_recon = F.sigmoid(model.image_decoder(sample)).cpu().data
txt_recon = F.log_softmax(model.text_decoder(sample), dim=1).cpu().data
txt_recon = torch.max(txt_recon, dim=2)[1]
# save image samples to filesystem
save_image(img_recon.view(args.n_samples, 1, 50, 50),
'./sample_image.png')
# save text samples to filesystem
with open('./sample_text.txt', 'w') as fp:
for i in xrange(text_recon.size(0)):
text_recon_str = tensor_to_string(text_recon[i])
fp.write('Text (%d): %s\n' % (i, text_recon_str))
| 5,196
| 36.121429
| 82
|
py
|
multimodal-vae-public
|
multimodal-vae-public-master/multimnist/utils.py
|
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import string
import random
import time
import math
import torch
from torch.autograd import Variable
max_length = 4 # max of 4 characters in an image
all_characters = '0123456789'
n_characters = len(all_characters)
# add 2 characters; b/c we always generate a fixed number
# of characters, we do not need an EOS token
SOS = n_characters
FILL = n_characters + 1 # placeholder for nothing
n_characters += 2
def char_tensor(string):
"""Turn a string into a tensor.
@param string: str object
@return tensor: torch.Tensor object. Not a Variable.
"""
tensor = torch.ones(max_length).long() * FILL
for c in xrange(len(string)):
tensor[c] = all_characters.index(string[c])
return tensor
def charlist_tensor(charlist):
"""Turn a list of indexes into a tensor."""
string = ''.join([str(i) for i in charlist])
return char_tensor(string)
def tensor_to_string(tensor):
"""Identical to tensor_to_string but for LongTensors."""
string = ''
for i in range(tensor.size(0)):
top_i = tensor[i]
string += index_to_char(top_i)
return string
def index_to_char(top_i):
if top_i == SOS:
return '^'
# FILL is the default character
elif top_i == FILL:
return ''
else:
return all_characters[top_i]
| 1,417
| 23.877193
| 60
|
py
|
multimodal-vae-public
|
multimodal-vae-public-master/multimnist/model.py
|
"""This model will be quite similar to mnist/model.py
except we will need to be slightly fancier in the
encoder/decoders for each modality. Likely, we will need
convolutions/deconvolutions and RNNs.
"""
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import torch
import torch.nn as nn
from torch.autograd import Variable
from torch.nn import functional as F
from torch.nn.parameter import Parameter
from utils import n_characters, max_length
from utils import SOS, FILL
class MVAE(nn.Module):
"""Multimodal Variational Autoencoder.
@param n_latents: integer
number of latent dimensions
"""
def __init__(self, n_latents):
super(MVAE, self).__init__()
self.image_encoder = ImageEncoder(n_latents)
self.image_decoder = ImageDecoder(n_latents)
self.text_encoder = TextEncoder(n_latents, n_characters, n_hiddens=200,
bidirectional=True)
self.text_decoder = TextDecoder(n_latents, n_characters, n_hiddens=200)
self.experts = ProductOfExperts()
self.n_latents = n_latents
def reparametrize(self, mu, logvar):
if self.training:
std = logvar.mul(0.5).exp_()
eps = Variable(std.data.new(std.size()).normal_())
return eps.mul(std).add_(mu)
else:
return mu
def forward(self, image=None, text=None):
mu, logvar = self.infer(image, text)
# reparametrization trick to sample
z = self.reparametrize(mu, logvar)
# reconstruct inputs based on that gaussian
img_recon = self.image_decoder(z)
txt_recon = self.text_decoder(z)
return img_recon, txt_recon, mu, logvar
def infer(self, image=None, text=None):
batch_size = image.size(0) if image is not None else text.size(0)
use_cuda = next(self.parameters()).is_cuda # check if CUDA
# initialize the universal prior expert
mu, logvar = prior_expert((1, batch_size, self.n_latents),
use_cuda=use_cuda)
if image is not None:
img_mu, img_logvar = self.image_encoder(image)
mu = torch.cat((mu, img_mu.unsqueeze(0)), dim=0)
logvar = torch.cat((logvar, img_logvar.unsqueeze(0)), dim=0)
if text is not None:
txt_mu, txt_logvar = self.text_encoder(text)
mu = torch.cat((mu, txt_mu.unsqueeze(0)), dim=0)
logvar = torch.cat((logvar, txt_logvar.unsqueeze(0)), dim=0)
# product of experts to combine gaussians
mu, logvar = self.experts(mu, logvar)
return mu, logvar
class ImageEncoder(nn.Module):
"""Parametrizes q(z|x).
This task is quite a bit harder than MNIST so we probably need
to use an CNN of some form. This will be good to get us ready for
natural images.
@param n_latents: integer
size of latent vector
"""
def __init__(self, n_latents):
super(ImageEncoder, self).__init__()
self.features = nn.Sequential(
nn.Conv2d(1, 32, 4, 2, 1, bias=False),
Swish(),
nn.Conv2d(32, 64, 4, 2, 1, bias=False),
nn.BatchNorm2d(64),
Swish(),
nn.Conv2d(64, 128, 4, 2, 1, bias=False),
nn.BatchNorm2d(128),
Swish(),
nn.Conv2d(128, 256, 4, 2, 0, bias=False),
nn.BatchNorm2d(256),
Swish())
self.classifier = nn.Sequential(
nn.Linear(256 * 2 * 2, 512),
Swish(),
nn.Dropout(p=0.1),
nn.Linear(512, n_latents * 2))
self.n_latents = n_latents
def forward(self, x):
n_latents = self.n_latents
x = self.features(x)
x = x.view(-1, 256 * 2 * 2)
x = self.classifier(x)
return x[:, :n_latents], x[:, n_latents:]
class ImageDecoder(nn.Module):
"""Parametrizes p(x|z).
@param n_latents: integer
number of latent dimensions
"""
def __init__(self, n_latents):
super(ImageDecoder, self).__init__()
self.upsample = nn.Sequential(
nn.Linear(n_latents, 256 * 2 * 2),
Swish())
self.hallucinate = nn.Sequential(
nn.ConvTranspose2d(256, 128, 4, 2, 0, bias=False),
nn.BatchNorm2d(128),
Swish(),
nn.ConvTranspose2d(128, 64, 4, 2, 1, bias=False),
nn.BatchNorm2d(64),
Swish(),
nn.ConvTranspose2d(64, 32, 5, 2, 1, bias=False),
nn.BatchNorm2d(32),
Swish(),
nn.ConvTranspose2d(32, 1, 4, 2, 1, bias=False))
def forward(self, z):
# the input will be a vector of size |n_latents|
z = self.upsample(z)
z = z.view(-1, 256, 2, 2)
z = self.hallucinate(z)
return z # NOTE: no sigmoid here. See train.py
class TextEncoder(nn.Module):
"""Parametrizes q(z|y).
We train an embedding layer from the 10 digit space
to move to a continuous domain. The GRU is optionally
bidirectional.
@param n_latents: integer
size of latent vector
@param n_characters: integer
number of possible characters (10 for MNIST)
@param n_hiddens: integer [default: 200]
number of hidden units in GRU
@param bidirectional: boolean [default: True]
hyperparameter for GRU.
"""
def __init__(self, n_latents, n_characters, n_hiddens=200, bidirectional=True):
super(TextEncoder, self).__init__()
self.embed = nn.Embedding(n_characters, n_hiddens)
self.gru = nn.GRU(n_hiddens, n_hiddens, 1, dropout=0.1,
bidirectional=bidirectional)
self.h2p = nn.Linear(n_hiddens, n_latents * 2) # hiddens to parameters
self.n_latents = n_latents
self.n_hiddens = n_hiddens
self.bidirectional = bidirectional
def forward(self, x):
n_hiddens = self.n_hiddens
n_latents = self.n_latents
x = self.embed(x)
x = x.transpose(0, 1) # GRU expects (seq_len, batch, ...)
x, h = self.gru(x, None)
x = x[-1] # take only the last value
if self.bidirectional:
x = x[:, :n_hiddens] + x[:, n_hiddens:] # sum bidirectional outputs
x = self.h2p(x)
return x[:, :n_latents], x[:, n_latents:]
class TextDecoder(nn.Module):
"""Parametrizes p(y|z).
GRU for text decoding. Given a start token, sample a character
via an RNN and repeat for a fixed length.
@param n_latents: integer
size of latent vector
@param n_characters: integer
size of characters (10 for MNIST)
@param n_hiddens: integer [default: 200]
number of hidden units in GRU
"""
def __init__(self, n_latents, n_characters, n_hiddens=200):
super(TextDecoder, self).__init__()
self.embed = nn.Embedding(n_characters, n_hiddens)
self.z2h = nn.Linear(n_latents, n_hiddens)
self.gru = nn.GRU(n_hiddens + n_latents, n_hiddens, 2, dropout=0.1)
self.h2o = nn.Linear(n_hiddens + n_latents, n_characters)
self.n_latents = n_latents
self.n_characters = n_characters
def forward(self, z):
n_latents = self.n_latents
n_characters = self.n_characters
batch_size = z.size(0)
# first input character is SOS
c_in = Variable(torch.LongTensor([SOS]).repeat(batch_size))
# store output word here
words = Variable(torch.zeros(batch_size, max_length, n_characters))
if z.is_cuda:
c_in = c_in.cuda()
words = words.cuda()
# get hiddens from latents
h = self.z2h(z).unsqueeze(0).repeat(2, 1, 1)
# look through n_steps and generate characters
for i in xrange(max_length):
c_out, h = self.step(i, z, c_in, h)
sample = torch.max(F.log_softmax(c_out, dim=1), dim=1)[1]
words[:, i] = c_out
c_in = sample
return words # (batch_size, seq_len, ...)
def step(self, ix, z, c_in, h):
c_in = swish(self.embed(c_in))
c_in = torch.cat((c_in, z), dim=1)
c_in = c_in.unsqueeze(0)
c_out, h = self.gru(c_in, h)
c_out = c_out.squeeze(0)
c_out = torch.cat((c_out, z), dim=1)
c_out = self.h2o(c_out)
return c_out, h # NOTE: no softmax here. See train.py
class ProductOfExperts(nn.Module):
"""Return parameters for product of independent experts.
See https://arxiv.org/pdf/1410.7827.pdf for equations.
@param mu: M x D for M experts
@param logvar: M x D for M experts
"""
def forward(self, mu, logvar, eps=1e-8):
var = torch.exp(logvar) + eps
# precision of i-th Gaussian expert at point x
T = 1. / var
pd_mu = torch.sum(mu * T, dim=0) / torch.sum(T, dim=0)
pd_var = 1. / torch.sum(T, dim=0)
pd_logvar = torch.log(pd_var)
return pd_mu, pd_logvar
class Swish(nn.Module):
def forward(self, x):
return x * F.sigmoid(x)
def swish(x):
return x * F.sigmoid(x)
def prior_expert(size, use_cuda=False):
"""Universal prior expert. Here we use a spherical
Gaussian: N(0, 1).
@param size: integer
dimensionality of Gaussian
@param use_cuda: boolean [default: False]
cast CUDA on variables
"""
mu = Variable(torch.zeros(size))
logvar = Variable(torch.log(torch.ones(size)))
if use_cuda:
mu, logvar = mu.cuda(), logvar.cuda()
return mu, logvar
| 9,790
| 34.219424
| 83
|
py
|
multimodal-vae-public
|
multimodal-vae-public-master/multimnist/datasets.py
|
"""
This script generates a dataset similar to the MultiMNIST dataset
described in [1]. However, we remove any translation.
[1] Eslami, SM Ali, et al. "Attend, infer, repeat: Fast scene
understanding with generative models." Advances in Neural Information
Processing Systems. 2016.
"""
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import os
import sys
import random
import numpy as np
import numpy.random as npr
from PIL import Image
from random import shuffle
from scipy.misc import imresize
import torch
import torchvision.datasets as dset
from torch.utils.data.dataset import Dataset
class MultiMNIST(Dataset):
"""Images with 0 to 4 digits of non-overlapping MNIST numbers.
@param root: string
path to dataset root
@param train: boolean [default: True]
whether to return training examples or testing examples
@param transform: ?torchvision.Transforms
optional function to apply to training inputs
@param target_transform: ?torchvision.Transforms
optional function to apply to training outputs
"""
processed_folder = 'multimnist'
training_file = 'training.pt'
test_file = 'test.pt'
def __init__(self, root, train=True, transform=None, target_transform=None, download=False):
self.root = os.path.expanduser(root)
self.transform = transform
self.target_transform = target_transform
self.train = train # training set or test set
if download:
self.download()
if not self._check_exists():
raise RuntimeError('Dataset not found.' +
' You can use download=True to download it')
if self.train:
self.train_data, self.train_labels = torch.load(
os.path.join(self.root, self.processed_folder, self.training_file))
else:
self.test_data, self.test_labels = torch.load(
os.path.join(self.root, self.processed_folder, self.test_file))
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target class.
"""
if self.train:
img, target = self.train_data[index], self.train_labels[index]
else:
img, target = self.test_data[index], self.test_labels[index]
# doing this so that it is consistent with all other datasets
# to return a PIL Image
img = Image.fromarray(img.numpy(), mode='L')
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target
def __len__(self):
if self.train:
return len(self.train_data)
else:
return len(self.test_data)
def _check_exists(self):
return os.path.exists(os.path.join(self.root, self.processed_folder, self.training_file)) and \
os.path.exists(os.path.join(self.root, self.processed_folder, self.test_file))
def download(self):
if self._check_exists():
return
make_dataset(self.root, self.processed_folder,
self.training_file, self.test_file)
# -- code for generating MultiMNIST torch objects. --
# INSTRUCTIONS: run this file.
def sample_one(canvas_size, mnist, resize=True, translate=True):
i = np.random.randint(mnist['digits'].shape[0])
digit = mnist['digits'][i]
label = mnist['labels'][i]
if resize: # resize only if user specified
scale = 0.1 * np.random.randn() + 1.3
resized = imresize(digit, 1. / scale)
else:
resized = digit
w = resized.shape[0]
assert w == resized.shape[1]
padding = canvas_size - w
if translate: # translate only if user specified
pad_l = np.random.randint(0, padding)
pad_r = np.random.randint(0, padding)
pad_width = ((pad_l, padding - pad_l), (pad_r, padding - pad_r))
positioned = np.pad(resized, pad_width, 'constant', constant_values=0)
else:
pad_l = padding // 2
pad_r = padding // 2
pad_width = ((pad_l, padding - pad_l), (pad_r, padding - pad_r))
positioned = np.pad(resized, pad_width, 'constant', constant_values=0)
return positioned, label
def sample_multi(num_digits, canvas_size, mnist, resize=True, translate=True):
canvas = np.zeros((canvas_size, canvas_size))
labels = []
for _ in range(num_digits):
positioned_digit, label = sample_one(canvas_size, mnist, resize=resize,
translate=translate)
canvas += positioned_digit
labels.append(label)
# Crude check for overlapping digits.
if np.max(canvas) > 255:
return sample_multi(num_digits, canvas_size, mnist,
resize=resize, translate=translate)
else:
return canvas, labels
def mk_dataset(n, mnist, min_digits, max_digits, canvas_size,
resize=True, translate=True):
x = []
y = []
for _ in range(n):
num_digits = np.random.randint(min_digits, max_digits + 1)
canvas, labels = sample_multi(num_digits, canvas_size, mnist,
resize=resize, translate=translate)
x.append(canvas)
y.append(labels)
return np.array(x, dtype=np.uint8), y
def load_mnist():
train_loader = torch.utils.data.DataLoader(
dset.MNIST(root='./data', train=True, download=True))
test_loader = torch.utils.data.DataLoader(
dset.MNIST(root='./data', train=False, download=True))
train_data = {
'digits': train_loader.dataset.train_data.numpy(),
'labels': train_loader.dataset.train_labels
}
test_data = {
'digits': test_loader.dataset.test_data.numpy(),
'labels': test_loader.dataset.test_labels
}
return train_data, test_data
def make_dataset(root, folder, training_file, test_file, min_digits=0, max_digits=2,
resize=True, translate=True):
if not os.path.isdir(os.path.join(root, folder)):
os.makedirs(os.path.join(root, folder))
np.random.seed(681307)
train_mnist, test_mnist = load_mnist()
train_x, train_y = mk_dataset(60000, train_mnist, min_digits, max_digits, 50,
resize=resize, translate=translate)
test_x, test_y = mk_dataset(10000, test_mnist, min_digits, max_digits, 50,
resize=resize, translate=translate)
train_x = torch.from_numpy(train_x).byte()
test_x = torch.from_numpy(test_x).byte()
training_set = (train_x, train_y)
test_set = (test_x, test_y)
with open(os.path.join(root, folder, training_file), 'wb') as f:
torch.save(training_set, f)
with open(os.path.join(root, folder, test_file), 'wb') as f:
torch.save(test_set, f)
def sample_one_fixed(canvas_size, mnist, pad_l, pad_r, scale=1.3):
i = np.random.randint(mnist['digits'].shape[0])
digit = mnist['digits'][i]
label = mnist['labels'][i]
resized = imresize(digit, 1. / scale)
w = resized.shape[0]
assert w == resized.shape[1]
padding = canvas_size - w
pad_width = ((pad_l, padding - pad_l), (pad_r, padding - pad_r))
positioned = np.pad(resized, pad_width, 'constant', constant_values=0)
return positioned, label
def sample_multi_fixed(num_digits, canvas_size, mnist, reverse=False,
scramble=False, no_repeat=False):
canvas = np.zeros((canvas_size, canvas_size))
labels = []
pads = [(4, 4), (4, 23), (23, 4), (23, 23)]
for i in range(num_digits):
if no_repeat: # keep trying to generate examples that are
# not already in previously generated labels
while True:
positioned_digit, label = sample_one_fixed(
canvas_size, mnist, pads[i][0], pads[i][1])
if label not in labels:
break
else:
positioned_digit, label = sample_one_fixed(
canvas_size, mnist, pads[i][0], pads[i][1])
canvas += positioned_digit
labels.append(label)
if reverse and random.random() > 0.5:
labels = labels[::-1]
if scramble:
random.shuffle(labels)
# Crude check for overlapping digits.
if np.max(canvas) > 255:
return sample_multi_fixed(num_digits, canvas_size, mnist, reverse=reverse,
scramble=scramble, no_repeat=no_repeat)
else:
return canvas, labels
def mk_dataset_fixed(n, mnist, min_digits, max_digits, canvas_size,
reverse=False, scramble=False, no_repeat=False):
x = []
y = []
for _ in range(n):
num_digits = np.random.randint(min_digits, max_digits + 1)
canvas, labels = sample_multi_fixed(num_digits, canvas_size, mnist, reverse=reverse,
scramble=scramble, no_repeat=no_repeat)
x.append(canvas)
y.append(labels)
return np.array(x, dtype=np.uint8), y
def make_dataset_fixed(root, folder, training_file, test_file,
min_digits=0, max_digits=3, reverse=False,
scramble=False, no_repeat=False):
if not os.path.isdir(os.path.join(root, folder)):
os.makedirs(os.path.join(root, folder))
np.random.seed(681307)
train_mnist, test_mnist = load_mnist()
train_x, train_y = mk_dataset_fixed(60000, train_mnist, min_digits, max_digits, 50,
reverse=reverse, scramble=scramble, no_repeat=no_repeat)
test_x, test_y = mk_dataset_fixed(10000, test_mnist, min_digits, max_digits, 50,
reverse=reverse, scramble=scramble, no_repeat=no_repeat)
train_x = torch.from_numpy(train_x).byte()
test_x = torch.from_numpy(test_x).byte()
training_set = (train_x, train_y)
test_set = (test_x, test_y)
with open(os.path.join(root, folder, training_file), 'wb') as f:
torch.save(training_set, f)
with open(os.path.join(root, folder, test_file), 'wb') as f:
torch.save(test_set, f)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--min-digits', type=int, default=0,
help='minimum number of digits to add to an image')
parser.add_argument('--max-digits', type=int, default=4,
help='maximum number of digits to add to an image')
parser.add_argument('--no-resize', action='store_true', default=False,
help='if True, fix the image to be MNIST size')
parser.add_argument('--no-translate', action='store_true', default=False,
help='if True, fix the image to be in the center')
parser.add_argument('--fixed', action='store_true', default=False,
help='If True, ignore resize/translate options and generate')
parser.add_argument('--scramble', action='store_true', default=False,
help='If True, scramble labels and generate. Only does something if fixed is True.')
parser.add_argument('--reverse', action='store_true', default=False,
help='If True, reverse flips the labels i.e. 4321 instead of 1234 with 0.5 probability.')
parser.add_argument('--no-repeat', action='store_true', default=False,
help='If True, do not generate images with multiple of the same label.')
args = parser.parse_args()
args.resize = not args.no_resize
args.translate = not args.no_translate
if args.no_repeat and not args.fixed:
raise Exception('Must have --fixed if --no-repeat is supplied.')
if args.scramble and not args.fixed:
raise Exception('Must have --fixed if --scramble is supplied.')
if args.reverse and not args.fixed:
raise Exception('Must have --fixed if --reverse is supplied.')
if args.reverse and args.scramble:
print('Found --reversed and --scrambling. Overriding --reversed.')
args.reverse = False
# Generate the training set and dump it to disk. (Note, this will
# always generate the same data, else error out.)
if args.fixed:
make_dataset_fixed('./data', 'multimnist', 'training.pt', 'test.pt',
min_digits=args.min_digits, max_digits=args.max_digits,
reverse=args.reverse, scramble=args.scramble,
no_repeat=args.no_repeat)
else: # if not fixed, then make classic MultiMNIST dataset
# VAEs in general have trouble handling translation and rotation,
# likely resulting in blurry reconstructions without additional
# attention mechanisms. See AIR [1].
make_dataset('./data', 'multimnist', 'training.pt', 'test.pt',
min_digits=args.min_digits, max_digits=args.max_digits,
resize=args.resize, translate=args.translate)
| 13,354
| 37.93586
| 113
|
py
|
multimodal-vae-public
|
multimodal-vae-public-master/multimnist/train.py
|
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import os
import sys
import shutil
from tqdm import tqdm
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.autograd import Variable
from torchvision import transforms
from model import MVAE
from datasets import MultiMNIST
from utils import charlist_tensor
def elbo_loss(recon_image, image, recon_text, text, mu, logvar,
lambda_image=1.0, lambda_text=1.0, annealing_factor=1):
"""Bimodal ELBO loss function.
@param recon_image: torch.Tensor
reconstructed image
@param image: torch.Tensor
input image
@param recon_text: torch.Tensor
reconstructed text probabilities
@param text: torch.Tensor
input text (one-hot)
@param mu: torch.Tensor
mean of latent distribution
@param logvar: torch.Tensor
log-variance of latent distribution
@param lambda_image: float [default: 1.0]
weight for image BCE
@param lambda_text: float [default: 1.0]
weight for text BCE
@param annealing_factor: integer [default: 1]
multiplier for KL divergence term
@return ELBO: torch.Tensor
evidence lower bound
"""
image_bce, text_bce = 0, 0 # default params
if recon_image is not None and image is not None:
image_bce = torch.sum(binary_cross_entropy_with_logits(
recon_image.view(-1, 1 * 50 * 50),
image.view(-1, 1 * 50 * 50)), dim=1)
if recon_text is not None and text is not None:
batch_size, n_digits = recon_text.size(0), recon_text.size(1)
recon_text = recon_text.view(-1, recon_text.size(2))
text = text.view(-1)
# sum over the different classes
text_bce = torch.sum(cross_entropy(recon_text, text), dim=1)
text_bce = text_bce.view(batch_size, n_digits)
# sum over the number of digits
text_bce = torch.sum(text_bce, dim=1)
# Kingma and Welling. Auto-Encoding Variational Bayes. ICLR, 2014
# https://arxiv.org/abs/1312.6114
KLD = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp(), dim=1)
ELBO = torch.mean(lambda_image * image_bce + lambda_text * text_bce
+ annealing_factor * KLD)
return ELBO
def binary_cross_entropy_with_logits(input, target):
"""Sigmoid Activation + Binary Cross Entropy
@param input: torch.Tensor (size N)
@param target: torch.Tensor (size N)
@return loss: torch.Tensor (size N)
"""
if not (target.size() == input.size()):
raise ValueError("Target size ({}) must be the same as input size ({})".format(
target.size(), input.size()))
return (torch.clamp(input, 0) - input * target
+ torch.log(1 + torch.exp(-torch.abs(input))))
def cross_entropy(input, target, eps=1e-6):
"""k-Class Cross Entropy (Log Softmax + Log Loss)
@param input: torch.Tensor (size N x K)
@param target: torch.Tensor (size N x K)
@param eps: error to add (default: 1e-6)
@return loss: torch.Tensor (size N)
"""
if not (target.size(0) == input.size(0)):
raise ValueError(
"Target size ({}) must be the same as input size ({})".format(
target.size(0), input.size(0)))
log_input = F.log_softmax(input + eps, dim=1)
y_onehot = Variable(log_input.data.new(log_input.size()).zero_())
y_onehot = y_onehot.scatter(1, target.unsqueeze(1), 1)
loss = y_onehot * log_input
return -loss
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def save_checkpoint(state, is_best, folder='./', filename='checkpoint.pth.tar'):
if not os.path.isdir(folder):
os.mkdir(folder)
torch.save(state, os.path.join(folder, filename))
if is_best:
shutil.copyfile(os.path.join(folder, filename),
os.path.join(folder, 'model_best.pth.tar'))
def load_checkpoint(file_path, use_cuda=False):
checkpoint = torch.load(file_path) if use_cuda else \
torch.load(file_path, map_location=lambda storage, location: storage)
model = MVAE(checkpoint['n_latents'])
model.load_state_dict(checkpoint['state_dict'])
return model
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--n-latents', type=int, default=64,
help='size of the latent embedding [default: 64]')
parser.add_argument('--batch-size', type=int, default=100, metavar='N',
help='input batch size for training [default: 100]')
parser.add_argument('--epochs', type=int, default=500, metavar='N',
help='number of epochs to train [default: 500]')
parser.add_argument('--annealing-epochs', type=int, default=200, metavar='N',
help='number of epochs to anneal KL for [default: 200]')
parser.add_argument('--lr', type=float, default=1e-3, metavar='LR',
help='learning rate [default: 1e-3]')
parser.add_argument('--log-interval', type=int, default=10, metavar='N',
help='how many batches to wait before logging training status [default: 10]')
parser.add_argument('--lambda-image', type=float, default=1.,
help='multipler for image reconstruction [default: 1]')
parser.add_argument('--lambda-text', type=float, default=10.,
help='multipler for text reconstruction [default: 10]')
parser.add_argument('--cuda', action='store_true', default=False,
help='enables CUDA training [default: False]')
args = parser.parse_args()
args.cuda = args.cuda and torch.cuda.is_available()
if not os.path.isdir('./trained_models'):
os.makedirs('./trained_models')
train_loader = torch.utils.data.DataLoader(
MultiMNIST('./data', train=True, download=True, transform=transforms.ToTensor(),
target_transform=charlist_tensor),
batch_size=args.batch_size, shuffle=True)
N_mini_batches = len(train_loader)
test_loader = torch.utils.data.DataLoader(
MultiMNIST('./data', train=False, download=True, transform=transforms.ToTensor(),
target_transform=charlist_tensor),
batch_size=args.batch_size, shuffle=False)
model = MVAE(args.n_latents)
optimizer = optim.Adam(model.parameters(), lr=args.lr)
if args.cuda:
model.cuda()
def train(epoch):
model.train()
train_loss_meter = AverageMeter()
# NOTE: is_paired is 1 if the example is paired
for batch_idx, (image, text) in enumerate(train_loader):
if epoch < args.annealing_epochs:
# compute the KL annealing factor for the current mini-batch in the current epoch
annealing_factor = (float(batch_idx + (epoch - 1) * N_mini_batches + 1) /
float(args.annealing_epochs * N_mini_batches))
else:
# by default the KL annealing factor is unity
annealing_factor = 1.0
if args.cuda:
image = image.cuda()
text = text.cuda()
image = Variable(image)
text = Variable(text)
batch_size = len(image)
# refresh the optimizer
optimizer.zero_grad()
# pass data through model
recon_image_1, recon_text_1, mu_1, logvar_1 = model(image, text)
recon_image_2, recon_text_2, mu_2, logvar_2 = model(image)
recon_image_3, recon_text_3, mu_3, logvar_3 = model(text=text)
# compute ELBO for each data combo
joint_loss = elbo_loss(recon_image_1, image, recon_text_1, text, mu_1, logvar_1,
lambda_image=args.lambda_image, lambda_text=args.lambda_text,
annealing_factor=annealing_factor)
image_loss = elbo_loss(recon_image_2, image, None, None, mu_2, logvar_2,
lambda_image=args.lambda_image, lambda_text=args.lambda_text,
annealing_factor=annealing_factor)
text_loss = elbo_loss(None, None, recon_text_3, text, mu_3, logvar_3,
lambda_image=args.lambda_image, lambda_text=args.lambda_text,
annealing_factor=annealing_factor)
train_loss = joint_loss + image_loss + text_loss
train_loss_meter.update(train_loss.data[0], batch_size)
train_loss.backward()
optimizer.step()
if batch_idx % args.log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}\tAnnealing-Factor: {:.3f}'.format(
epoch, batch_idx * len(image), len(train_loader.dataset),
100. * batch_idx / len(train_loader), train_loss_meter.avg, annealing_factor))
print('====> Epoch: {}\tLoss: {:.4f}'.format(epoch, train_loss_meter.avg))
def test(epoch):
model.eval()
test_loss_meter = AverageMeter()
for batch_idx, (image, text) in enumerate(test_loader):
if args.cuda:
image = image.cuda()
text = text.cuda()
image = Variable(image, volatile=True)
text = Variable(text, volatile=True)
batch_size = len(image)
recon_image_1, recon_text_1, mu_1, logvar_1 = model(image, text)
recon_image_2, recon_text_2, mu_2, logvar_2 = model(image)
recon_image_3, recon_text_3, mu_3, logvar_3 = model(text=text)
joint_loss = elbo_loss(recon_image_1, image, recon_text_1, text, mu_1, logvar_1)
image_loss = elbo_loss(recon_image_2, image, None, None, mu_2, logvar_2)
text_loss = elbo_loss(None, None, recon_text_3, text, mu_3, logvar_3)
test_loss = joint_loss + image_loss + text_loss
test_loss_meter.update(test_loss.data[0], batch_size)
print('====> Test Loss: {:.4f}'.format(test_loss_meter.avg))
return test_loss_meter.avg
best_loss = sys.maxint
for epoch in range(1, args.epochs + 1):
train(epoch)
test_loss = test(epoch)
is_best = test_loss < best_loss
best_loss = min(test_loss, best_loss)
# save the best model and current model
save_checkpoint({
'state_dict': model.state_dict(),
'best_loss': best_loss,
'n_latents': args.n_latents,
'optimizer' : optimizer.state_dict(),
}, is_best, folder='./trained_models')
| 11,314
| 39.555556
| 105
|
py
|
multimodal-vae-public
|
multimodal-vae-public-master/celeba/sample.py
|
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import numpy as np
import torch
import torch.nn.functional as F
from torch.autograd import Variable
from torchvision import transforms
from torchvision.utils import save_image
from train import load_checkpoint
from datasets import ATTR_IX_TO_KEEP, N_ATTRS
from datasets import ATTR_TO_IX_DICT, IX_TO_ATTR_DICT
from datasets import tensor_to_attributes
from datasets import CelebAttributes
def fetch_celeba_image(attr_str):
"""Return a random image from the CelebA dataset with label.
@param label: string
name of the attribute (see ATTR_TO_IX_DICT)
@return: torch.autograd.Variable
CelebA image
"""
loader = torch.utils.data.DataLoader(
CelebAttributes(
partition='test',
image_transform=transforms.Compose([transforms.Resize(64),
transforms.CenterCrop(64),
transforms.ToTensor()])),
batch_size=128, shuffle=False)
images, attrs = [], []
for batch_idx, (image, attr) in enumerate(loader):
images.append(image)
attrs.append(attr)
images = torch.cat(images).cpu().numpy()
attrs = torch.cat(attrs).cpu().numpy()
attr_ix = ATTR_IX_TO_KEEP.index(ATTR_TO_IX_DICT[attr_str])
images = images[attrs[:, attr_ix] == 1]
image = images[np.random.choice(np.arange(images.shape[0]))]
image = torch.from_numpy(image).float()
image = image.unsqueeze(0)
return Variable(image, volatile=True)
def fetch_celeba_attrs(attr_str):
"""Return a random image from the CelebA dataset with label.
@param label: string
name of the attribute (see ATTR_TO_IX_DICT)
@return: torch.autograd.Variable
Variable wrapped around an integer.
"""
attrs = torch.zeros(N_ATTRS)
attr_ix = ATTR_IX_TO_KEEP.index(ATTR_TO_IX_DICT[attr_str])
attrs[attr_ix] = 1
return Variable(attrs.unsqueeze(0), volatile=True)
if __name__ == "__main__":
import os
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('model_path', type=str, help='path to trained model file')
parser.add_argument('--n-samples', type=int, default=64,
help='Number of images and texts to sample [default: 64]')
# condition sampling on a particular images
parser.add_argument('--condition-on-image', type=int, default=None,
help='If True, generate text conditioned on an image.')
# condition sampling on a particular text
parser.add_argument('--condition-on-text', type=int, default=None,
help='If True, generate images conditioned on a text.')
parser.add_argument('--cuda', action='store_true', default=False,
help='enables CUDA training [default: False]')
args = parser.parse_args()
args.cuda = args.cuda and torch.cuda.is_available()
model = load_checkpoint(args.model_path, use_cuda=args.cuda)
model.eval()
if args.cuda:
model.cuda()
# mode 1: unconditional generation
if not args.condition_on_image and not args.condition_on_attrs:
mu = Variable(torch.Tensor([0]))
std = Variable(torch.Tensor([1]))
if args.cuda:
mu = mu.cuda()
std = std.cuda()
# mode 2: generate conditioned on image
elif args.condition_on_image and not args.condition_on_attrs:
image = fetch_celeba_image(args.condition_on_image)
if args.cuda:
image = image.cuda()
mu, logvar = model.get_params(image=image)
std = logvar.mul(0.5).exp_()
# mode 3: generate conditioned on attrs
elif args.condition_on_attrs and not args.condition_on_image:
attrs = fetch_celeba_attrs(args.condition_on_attrs)
if args.cuda:
attrs = attrs.cuda()
mu, logvar = model.get_params(attrs=attrs)
std = logvar.mul(0.5).exp_()
# mode 4: generate conditioned on image and attrs
elif args.condition_on_attrs and args.condition_on_image:
image = fetch_celeba_image(args.condition_on_image)
attrs = fetch_celeba_attrs(args.condition_on_attrs)
if args.cuda:
image = image.cuda()
attrs = attrs.cuda()
mu, logvar = model.get_params(image=image, attrs=attrs)
std = logvar.mul(0.5).exp_()
# sample from uniform gaussian
sample = Variable(torch.randn(args.n_samples, model.n_latents))
if args.cuda:
sample = sample.cuda()
# sample from particular gaussian by multiplying + adding
mu = mu.expand_as(sample)
std = std.expand_as(sample)
sample = sample.mul(std).add_(mu)
# generate image and text
image_recon = F.sigmoid(model.image_decoder(sample)).cpu().data
attrs_recon = F.sigmoid(model.attrs_decoder(sample)).cpu().data
# save image samples to filesystem
save_image(image_recon.view(args.n_samples, 3, 64, 64),
'./sample_image.png')
# save text samples to filesystem
sample_attrs = []
for i in xrange(attrs_recon.size(0)):
attrs = tensor_to_attributes(attrs_recon[i])
sample_attrs.append(','.join(attrs))
with open('./sample_attrs.txt', 'w') as fp:
for attrs in sample_attrs:
fp.write('%s\n' % attrs)
| 5,535
| 38.542857
| 82
|
py
|
multimodal-vae-public
|
multimodal-vae-public-master/celeba/model.py
|
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import torch
import torch.nn as nn
from torch.autograd import Variable
from torch.nn import functional as F
from datasets import N_ATTRS
class MVAE(nn.Module):
"""Multimodal Variational Autoencoder.
@param n_latents: integer
number of latent dimensions
"""
def __init__(self, n_latents):
super(MVAE, self).__init__()
self.image_encoder = ImageEncoder(n_latents)
self.image_decoder = ImageDecoder(n_latents)
self.attrs_encoder = AttributeEncoder(n_latents)
self.attrs_decoder = AttributeDecoder(n_latents)
self.experts = ProductOfExperts()
self.n_latents = n_latents
def reparametrize(self, mu, logvar):
if self.training:
std = logvar.mul(0.5).exp_()
eps = Variable(std.data.new(std.size()).normal_())
return eps.mul(std).add_(mu)
else: # return mean during inference
return mu
def forward(self, image=None, attrs=None):
mu, logvar = self.infer(image, attrs)
# reparametrization trick to sample
z = self.reparametrize(mu, logvar)
# reconstruct inputs based on that gaussian
image_recon = self.image_decoder(z)
attrs_recon = self.attrs_decoder(z)
return image_recon, attrs_recon, mu, logvar
def infer(self, image=None, attrs=None):
batch_size = image.size(0) if image is not None else attrs.size(0)
use_cuda = next(self.parameters()).is_cuda # check if CUDA
# initialize the universal prior expert
mu, logvar = prior_expert((1, batch_size, self.n_latents),
use_cuda=use_cuda)
if image is not None:
image_mu, image_logvar = self.image_encoder(image)
mu = torch.cat((mu, image_mu.unsqueeze(0)), dim=0)
logvar = torch.cat((logvar, image_logvar.unsqueeze(0)), dim=0)
if attrs is not None:
attrs_mu, attrs_logvar = self.attrs_encoder(attrs)
mu = torch.cat((mu, attrs_mu.unsqueeze(0)), dim=0)
logvar = torch.cat((logvar, attrs_logvar.unsqueeze(0)), dim=0)
# product of experts to combine gaussians
mu, logvar = self.experts(mu, logvar)
return mu, logvar
class ImageEncoder(nn.Module):
"""Parametrizes q(z|x).
This is the standard DCGAN architecture.
@param n_latents: integer
number of latent variable dimensions.
"""
def __init__(self, n_latents):
super(ImageEncoder, self).__init__()
self.features = nn.Sequential(
nn.Conv2d(3, 32, 4, 2, 1, bias=False),
Swish(),
nn.Conv2d(32, 64, 4, 2, 1, bias=False),
nn.BatchNorm2d(64),
Swish(),
nn.Conv2d(64, 128, 4, 2, 1, bias=False),
nn.BatchNorm2d(128),
Swish(),
nn.Conv2d(128, 256, 4, 1, 0, bias=False),
nn.BatchNorm2d(256),
Swish())
self.classifier = nn.Sequential(
nn.Linear(256 * 5 * 5, 512),
Swish(),
nn.Dropout(p=0.1),
nn.Linear(512, n_latents * 2))
self.n_latents = n_latents
def forward(self, x):
n_latents = self.n_latents
x = self.features(x)
x = x.view(-1, 256 * 5 * 5)
x = self.classifier(x)
return x[:, :n_latents], x[:, n_latents:]
class ImageDecoder(nn.Module):
"""Parametrizes p(x|z).
This is the standard DCGAN architecture.
@param n_latents: integer
number of latent variable dimensions.
"""
def __init__(self, n_latents):
super(ImageDecoder, self).__init__()
self.upsample = nn.Sequential(
nn.Linear(n_latents, 256 * 5 * 5),
Swish())
self.hallucinate = nn.Sequential(
nn.ConvTranspose2d(256, 128, 4, 1, 0, bias=False),
nn.BatchNorm2d(128),
Swish(),
nn.ConvTranspose2d(128, 64, 4, 2, 1, bias=False),
nn.BatchNorm2d(64),
Swish(),
nn.ConvTranspose2d(64, 32, 4, 2, 1, bias=False),
nn.BatchNorm2d(32),
Swish(),
nn.ConvTranspose2d(32, 3, 4, 2, 1, bias=False))
def forward(self, z):
# the input will be a vector of size |n_latents|
z = self.upsample(z)
z = z.view(-1, 256, 5, 5)
z = self.hallucinate(z)
return z # NOTE: no sigmoid here. See train.py
class AttributeEncoder(nn.Module):
"""Parametrizes q(z|y).
We use a single inference network that encodes
all 18 features.
@param n_latents: integer
number of latent variable dimensions.
"""
def __init__(self, n_latents):
super(AttributeEncoder, self).__init__()
self.net = nn.Sequential(
nn.Linear(N_ATTRS, 512),
nn.BatchNorm1d(512),
Swish(),
nn.Linear(512, 512),
nn.BatchNorm1d(512),
Swish(),
nn.Linear(512, n_latents * 2))
self.n_latents = n_latents
def forward(self, x):
n_latents = self.n_latents
x = self.net(x)
return x[:, :n_latents], x[:, n_latents:]
class AttributeDecoder(nn.Module):
"""Parametrizes p(y|z).
We use a single generative network that decodes
all 18 features.
@param n_latents: integer
number of latent variable dimensions.
"""
def __init__(self, n_latents):
super(AttributeDecoder, self).__init__()
self.net = nn.Sequential(
nn.Linear(n_latents, 512),
nn.BatchNorm1d(512),
Swish(),
nn.Linear(512, 512),
nn.BatchNorm1d(512),
Swish(),
nn.Linear(512, 512),
nn.BatchNorm1d(512),
Swish(),
nn.Linear(512, N_ATTRS))
def forward(self, z):
z = self.net(z)
# not a one-hotted prediction: this returns a value
# for every single index
return z # NOTE: no sigmoid here. See train.py
class ProductOfExperts(nn.Module):
"""Return parameters for product of independent experts.
See https://arxiv.org/pdf/1410.7827.pdf for equations.
@param mu: M x D for M experts
@param logvar: M x D for M experts
"""
def forward(self, mu, logvar, eps=1e-8):
var = torch.exp(logvar) + eps
# precision of i-th Gaussian expert at point x
T = 1. / var
pd_mu = torch.sum(mu * T, dim=0) / torch.sum(T, dim=0)
pd_var = 1. / torch.sum(T, dim=0)
pd_logvar = torch.log(pd_var)
return pd_mu, pd_logvar
class Swish(nn.Module):
"""https://arxiv.org/abs/1710.05941"""
def forward(self, x):
return x * F.sigmoid(x)
def prior_expert(size, use_cuda=False):
"""Universal prior expert. Here we use a spherical
Gaussian: N(0, 1).
@param size: integer
dimensionality of Gaussian
@param use_cuda: boolean [default: False]
cast CUDA on variables
"""
mu = Variable(torch.zeros(size))
logvar = Variable(torch.log(torch.ones(size)))
if use_cuda:
mu, logvar = mu.cuda(), logvar.cuda()
return mu, logvar
| 7,415
| 31.243478
| 74
|
py
|
multimodal-vae-public
|
multimodal-vae-public-master/celeba/datasets.py
|
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import os
import sys
import copy
import random
import numpy as np
import numpy.random as npr
from PIL import Image
from random import shuffle
from scipy.misc import imresize
import torch
from torch.utils.data.dataset import Dataset
VALID_PARTITIONS = {'train': 0, 'val': 1, 'test': 2}
# go from label index to interpretable index
ATTR_TO_IX_DICT = {'Sideburns': 30, 'Black_Hair': 8, 'Wavy_Hair': 33, 'Young': 39, 'Heavy_Makeup': 18,
'Blond_Hair': 9, 'Attractive': 2, '5_o_Clock_Shadow': 0, 'Wearing_Necktie': 38,
'Blurry': 10, 'Double_Chin': 14, 'Brown_Hair': 11, 'Mouth_Slightly_Open': 21,
'Goatee': 16, 'Bald': 4, 'Pointy_Nose': 27, 'Gray_Hair': 17, 'Pale_Skin': 26,
'Arched_Eyebrows': 1, 'Wearing_Hat': 35, 'Receding_Hairline': 28, 'Straight_Hair': 32,
'Big_Nose': 7, 'Rosy_Cheeks': 29, 'Oval_Face': 25, 'Bangs': 5, 'Male': 20, 'Mustache': 22,
'High_Cheekbones': 19, 'No_Beard': 24, 'Eyeglasses': 15, 'Bags_Under_Eyes': 3,
'Wearing_Necklace': 37, 'Wearing_Lipstick': 36, 'Big_Lips': 6, 'Narrow_Eyes': 23,
'Chubby': 13, 'Smiling': 31, 'Bushy_Eyebrows': 12, 'Wearing_Earrings': 34}
# we only keep 18 of the more visually distinctive features
# See [1] Perarnau, Guim, et al. "Invertible conditional gans for
# image editing." arXiv preprint arXiv:1611.06355 (2016).
ATTR_IX_TO_KEEP = [4, 5, 8, 9, 11, 12, 15, 17, 18, 20, 21, 22, 26, 28, 31, 32, 33, 35]
IX_TO_ATTR_DICT = {v:k for k, v in ATTR_TO_IX_DICT.iteritems()}
N_ATTRS = len(ATTR_IX_TO_KEEP)
ATTR_TO_PLOT = ['Heavy_Makeup', 'Male', 'Mouth_Slightly_Open', 'Smiling', 'Wavy_Hair']
class CelebAttributes(Dataset):
"""Define dataset of images of celebrities and attributes.
The user needs to have pre-defined the Anno and Eval folder from
http://mmlab.ie.cuhk.edu.hk/projects/CelebA.html
@param partition: string
train|val|test [default: train]
See VALID_PARTITIONS global variable.
@param data_dir: string
path to root of dataset images [default: ./data]
@param image_transform: ?torchvision.Transforms
optional function to apply to training inputs
@param attr_transform: ?torchvision.Transforms
optional function to apply to training outputs
"""
def __init__(self, partition='train', data_dir='./data',
image_transform=None, attr_transform=None):
self.partition = partition
self.image_transform = image_transform
self.attr_transform = attr_transform
self.data_dir = data_dir
assert partition in VALID_PARTITIONS.keys()
self.image_paths = load_eval_partition(partition, data_dir=data_dir)
self.attr_data = load_attributes(self.image_paths, partition,
data_dir=data_dir)
self.size = int(len(self.image_paths))
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target class.
"""
image_path = os.path.join(self.data_dir, 'img_align_celeba',
self.image_paths[index])
attr = self.attr_data[index]
image = Image.open(image_path).convert('RGB')
if self.image_transform is not None:
image = self.image_transform(image)
if self.attr_transform is not None:
attr = self.attr_transform(attr)
return image, attr
def __len__(self):
return self.size
def load_eval_partition(partition, data_dir='./data'):
"""After downloading the dataset, we can load a subset for
training or testing.
@param partition: string
which subset to use (train|val|test)
@param data_dir: string [default: ./data]
where the images are saved
"""
eval_data = []
with open(os.path.join(data_dir, 'Eval/list_eval_partition.txt')) as fp:
rows = fp.readlines()
for row in rows:
path, label = row.strip().split(' ')
label = int(label)
if label == VALID_PARTITIONS[partition]:
eval_data.append(path)
return eval_data
def load_attributes(paths, partition, data_dir='./data'):
"""Load the attributes into a torch tensor.
@param paths: string
a numpy array of attributes (1 or 0)
@param partition: string
which subset to use (train|val|test)
@param data_dir: string [default: ./data]
where the images are saved
"""
if os.path.isfile(os.path.join(data_dir, 'Anno/attr_%s.npy' % partition)):
attr_data = np.load(os.path.join(data_dir, 'Anno/attr_%s.npy' % partition))
else:
attr_data = []
with open(os.path.join(data_dir, 'Anno/list_attr_celeba.txt')) as fp:
rows = fp.readlines()
for ix, row in enumerate(rows[2:]):
row = row.strip().split()
path, attrs = row[0], row[1:]
if path in paths:
attrs = np.array(attrs).astype(int)
attrs[attrs < 0] = 0
attr_data.append(attrs)
attr_data = np.vstack(attr_data).astype(np.int64)
attr_data = torch.from_numpy(attr_data).float()
return attr_data[:, ATTR_IX_TO_KEEP]
def tensor_to_attributes(tensor):
"""Use this for the <image_transform>.
@param tensor: PyTorch Tensor
D dimensional tensor
@return attributes: list of strings
"""
attrs = []
n = tensor.size(0)
tensor = torch.round(tensor)
for i in xrange(n):
if tensor[i] > 0.5:
attr = IX_TO_ATTR_DICT[ATTR_IX_TO_KEEP[i]]
attrs.append(attr)
return attrs
| 6,170
| 39.333333
| 111
|
py
|
multimodal-vae-public
|
multimodal-vae-public-master/celeba/train.py
|
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import os
import sys
import shutil
from tqdm import tqdm
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.autograd import Variable
from torchvision import transforms
from model import MVAE
from datasets import CelebAttributes
from datasets import N_ATTRS
def elbo_loss(recon_image, image, recon_attrs, attrs, mu, logvar,
lambda_image=1.0, lambda_attrs=1.0, annealing_factor=1):
"""Bimodal ELBO loss function.
@param recon_image: torch.Tensor
reconstructed image
@param image: torch.Tensor
input image
@param recon_attrs: torch.Tensor
reconstructed attribute probabilities
@param attrs: torch.Tensor
input attributes
@param mu: torch.Tensor
mean of latent distribution
@param logvar: torch.Tensor
log-variance of latent distribution
@param lambda_image: float [default: 1.0]
weight for image BCE
@param lambda_attrs: float [default: 1.0]
weight for attribute BCE
@param annealing_factor: integer [default: 1]
multiplier for KL divergence term
@return ELBO: torch.Tensor
evidence lower bound
"""
image_bce, attrs_bce = 0, 0 # default params
if recon_image is not None and image is not None:
image_bce = torch.sum(binary_cross_entropy_with_logits(
recon_image.view(-1, 3 * 64 * 64),
image.view(-1, 3 * 64 * 64)), dim=1)
if recon_attrs is not None and attrs is not None:
for i in xrange(N_ATTRS):
attr_bce = binary_cross_entropy_with_logits(
recon_attrs[:, i], attrs[:, i])
attrs_bce += attr_bce
# Kingma and Welling. Auto-Encoding Variational Bayes. ICLR, 2014
# https://arxiv.org/abs/1312.6114
KLD = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp(), dim=1)
ELBO = torch.mean(lambda_image * image_bce + lambda_attrs * attrs_bce
+ annealing_factor * KLD)
return ELBO
def binary_cross_entropy_with_logits(input, target):
"""Sigmoid Activation + Binary Cross Entropy
@param input: torch.Tensor (size N)
@param target: torch.Tensor (size N)
@return loss: torch.Tensor (size N)
"""
if not (target.size() == input.size()):
raise ValueError("Target size ({}) must be the same as input size ({})".format(
target.size(), input.size()))
return (torch.clamp(input, 0) - input * target
+ torch.log(1 + torch.exp(-torch.abs(input))))
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def save_checkpoint(state, is_best, folder='./', filename='checkpoint.pth.tar'):
if not os.path.isdir(folder):
os.mkdir(folder)
torch.save(state, os.path.join(folder, filename))
if is_best:
shutil.copyfile(os.path.join(folder, filename),
os.path.join(folder, 'model_best.pth.tar'))
def load_checkpoint(file_path, use_cuda=False):
checkpoint = torch.load(file_path) if use_cuda else \
torch.load(file_path, map_location=lambda storage, location: storage)
model = MVAE(checkpoint['n_latents'])
model.load_state_dict(checkpoint['state_dict'])
return model
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--n-latents', type=int, default=100,
help='size of the latent embedding [default: 100]')
parser.add_argument('--batch-size', type=int, default=100, metavar='N',
help='input batch size for training [default: 100]')
parser.add_argument('--epochs', type=int, default=100, metavar='N',
help='number of epochs to train [default: 100]')
parser.add_argument('--annealing-epochs', type=int, default=20, metavar='N',
help='number of epochs to anneal KL for [default: 20]')
parser.add_argument('--lr', type=float, default=1e-4, metavar='LR',
help='learning rate [default: 1e-4]')
parser.add_argument('--log-interval', type=int, default=10, metavar='N',
help='how many batches to wait before logging training status [default: 10]')
parser.add_argument('--lambda-image', type=float, default=1.,
help='multipler for image reconstruction [default: 1]')
parser.add_argument('--lambda-attrs', type=float, default=10.,
help='multipler for attributes reconstruction [default: 10]')
parser.add_argument('--cuda', action='store_true', default=False,
help='enables CUDA training [default: False]')
args = parser.parse_args()
args.cuda = args.cuda and torch.cuda.is_available()
if not os.path.isdir('./trained_models'):
os.makedirs('./trained_models')
# crop the input image to 64 x 64
preprocess_data = transforms.Compose([transforms.Resize(64),
transforms.CenterCrop(64),
transforms.ToTensor()])
train_loader = torch.utils.data.DataLoader(
CelebAttributes(partition='train', data_dir='./data',
image_transform=preprocess_data),
batch_size=args.batch_size, shuffle=True)
N_mini_batches = len(train_loader)
test_loader = torch.utils.data.DataLoader(
CelebAttributes(partition='val', data_dir='./data',
image_transform=preprocess_data),
batch_size=args.batch_size, shuffle=False)
model = MVAE(args.n_latents)
optimizer = optim.Adam(model.parameters(), lr=args.lr)
if args.cuda:
model.cuda()
def train(epoch):
model.train()
train_loss_meter = AverageMeter()
# NOTE: is_paired is 1 if the example is paired
for batch_idx, (image, attrs) in enumerate(train_loader):
if epoch < args.annealing_epochs:
# compute the KL annealing factor for the current mini-batch in the current epoch
annealing_factor = (float(batch_idx + (epoch - 1) * N_mini_batches + 1) /
float(args.annealing_epochs * N_mini_batches))
else:
# by default the KL annealing factor is unity
annealing_factor = 1.0
if args.cuda:
image = image.cuda()
attrs = attrs.cuda()
image = Variable(image)
attrs = Variable(attrs)
batch_size = len(image)
# refresh the optimizer
optimizer.zero_grad()
# pass data through model
recon_image_1, recon_attrs_1, mu_1, logvar_1 = model(image, attrs)
recon_image_2, recon_attrs_2, mu_2, logvar_2 = model(image)
recon_image_3, recon_attrs_3, mu_3, logvar_3 = model(attrs=attrs)
# compute ELBO for each data combo
joint_loss = elbo_loss(recon_image_1, image, recon_attrs_1, attrs, mu_1, logvar_1,
lambda_image=args.lambda_image, lambda_attrs=args.lambda_attrs,
annealing_factor=annealing_factor)
image_loss = elbo_loss(recon_image_2, image, None, None, mu_2, logvar_2,
lambda_image=args.lambda_image, lambda_attrs=args.lambda_attrs,
annealing_factor=annealing_factor)
attrs_loss = elbo_loss(None, None, recon_attrs_3, attrs, mu_3, logvar_3,
lambda_image=args.lambda_image, lambda_attrs=args.lambda_attrs,
annealing_factor=annealing_factor)
train_loss = joint_loss + image_loss + attrs_loss
train_loss_meter.update(train_loss.data[0], batch_size)
# compute and take gradient step
train_loss.backward()
optimizer.step()
if batch_idx % args.log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}\tAnnealing-Factor: {:.3f}'.format(
epoch, batch_idx * len(image), len(train_loader.dataset),
100. * batch_idx / len(train_loader), train_loss_meter.avg, annealing_factor))
print('====> Epoch: {}\tLoss: {:.4f}'.format(epoch, train_loss_meter.avg))
def test(epoch):
model.eval()
test_loss_meter = AverageMeter()
pbar = tqdm(total=len(test_loader))
for batch_idx, (image, attrs) in enumerate(test_loader):
if args.cuda:
image = image.cuda()
attrs = attrs.cuda()
image = Variable(image, volatile=True)
attrs = Variable(attrs, volatile=True)
batch_size = len(image)
recon_image_1, recon_attrs_1, mu_1, logvar_1 = model(image, attrs)
recon_image_2, recon_attrs_2, mu_2, logvar_2 = model(image)
recon_image_3, recon_attrs_3, mu_3, logvar_3 = model(attrs=attrs)
joint_loss = elbo_loss(recon_image_1, image, recon_attrs_1, attrs, mu_1, logvar_1,
lambda_image=args.lambda_image, lambda_attrs=args.lambda_attrs)
image_loss = elbo_loss(recon_image_2, image, None, None, mu_2, logvar_2,
lambda_image=args.lambda_image, lambda_attrs=args.lambda_attrs)
attrs_loss = elbo_loss(None, None, recon_attrs_3, attrs, mu_3, logvar_3,
lambda_image=args.lambda_image, lambda_attrs=args.lambda_attrs)
test_loss = joint_loss + image_loss + attrs_loss
test_loss_meter.update(test_loss.data[0], batch_size)
pbar.update()
pbar.close()
print('====> Test Loss: {:.4f}'.format(test_loss_meter.avg))
return test_loss_meter.avg
best_loss = sys.maxint
for epoch in range(1, args.epochs + 1):
train(epoch)
loss = test(epoch)
is_best = loss < best_loss
best_loss = min(loss, best_loss)
# save the best model and current model
save_checkpoint({
'state_dict': model.state_dict(),
'best_loss': best_loss,
'n_latents': args.n_latents,
'optimizer' : optimizer.state_dict(),
}, is_best, folder='./trained_models')
| 11,037
| 40.340824
| 105
|
py
|
multimodal-vae-public
|
multimodal-vae-public-master/celeba19/model.py
|
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import sys
import torch
import torch.nn as nn
from torch.autograd import Variable
from torch.nn import functional as F
sys.path.append('../celeba')
from datasets import N_ATTRS
class MVAE(nn.Module):
"""Multimodal Variational Autoencoder.
@param n_latents: integer
number of latent dimensions
"""
def __init__(self, n_latents):
super(MVAE, self).__init__()
self.image_encoder = ImageEncoder(n_latents)
self.image_decoder = ImageDecoder(n_latents)
# have an inference network and decoder for each attribute (18 total)
self.attr_encoders = nn.ModuleList([AttributeEncoder(n_latents)
for _ in xrange(N_ATTRS)])
self.attr_decoders = nn.ModuleList([AttributeDecoder(n_latents)
for _ in xrange(N_ATTRS)])
self.experts = ProductOfExperts()
self.n_latents = n_latents
def reparametrize(self, mu, logvar):
if self.training:
std = logvar.mul(0.5).exp_()
eps = Variable(std.data.new(std.size()).normal_())
return eps.mul(std).add_(mu)
else: # return mean during inference
return mu
def forward(self, image=None, attrs=[None for _ in xrange(N_ATTRS)]):
"""Forward pass through the MVAE.
@param image: ?PyTorch.Tensor
@param attrs: list of ?PyTorch.Tensors
If a single attribute is missing, pass None
instead of a Tensor. Regardless if all attributes
are missing, still pass a list of <N_ATTR> None's.
@return image_recon: PyTorch.Tensor
@return attr_recons: list of PyTorch.Tensors (N_ATTRS length)
"""
mu, logvar = self.infer(image, attrs)
# reparametrization trick to sample
z = self.reparametrize(mu, logvar)
# reconstruct inputs based on that gaussian
image_recon = self.image_decoder(z)
attr_recons = []
for i in xrange(N_ATTRS):
attr_recon = self.attr_decoders[i](z)
attr_recons.append(attr_recon.squeeze(1))
return image_recon, attr_recons, mu, logvar
def infer(self, image=None, attrs=[None for _ in xrange(N_ATTRS)]):
# get the batch size
if image is not None:
batch_size = len(image)
else:
for i in xrange(N_ATTRS):
if attrs[i] is not None:
batch_size = len(attrs[i])
break
use_cuda = next(self.parameters()).is_cuda # check if CUDA
mu, logvar = prior_expert((1, batch_size, self.n_latents),
use_cuda=use_cuda)
if image is not None:
image_mu, image_logvar = self.image_encoder(image)
mu = torch.cat((mu, image_mu.unsqueeze(0)), dim=0)
logvar = torch.cat((logvar, image_logvar.unsqueeze(0)), dim=0)
for i in xrange(N_ATTRS):
if attrs[i] is not None:
attr_mu, attr_logvar = self.attr_encoders[i](attrs[i].long())
mu = torch.cat((mu, attr_mu.unsqueeze(0)), dim=0)
logvar = torch.cat((logvar, attr_logvar.unsqueeze(0)), dim=0)
# product of experts to combine gaussians
mu, logvar = self.experts(mu, logvar)
return mu, logvar
class ImageEncoder(nn.Module):
"""Parametrizes q(z|x).
This is the standard DCGAN architecture.
@param n_latents: integer
number of latent variable dimensions.
"""
def __init__(self, n_latents):
super(ImageEncoder, self).__init__()
self.features = nn.Sequential(
nn.Conv2d(3, 32, 4, 2, 1, bias=False),
Swish(),
nn.Conv2d(32, 64, 4, 2, 1, bias=False),
nn.BatchNorm2d(64),
Swish(),
nn.Conv2d(64, 128, 4, 2, 1, bias=False),
nn.BatchNorm2d(128),
Swish(),
nn.Conv2d(128, 256, 4, 1, 0, bias=False),
nn.BatchNorm2d(256),
Swish())
self.classifier = nn.Sequential(
nn.Linear(256 * 5 * 5, 512),
Swish(),
nn.Dropout(p=0.1),
nn.Linear(512, n_latents * 2))
self.n_latents = n_latents
def forward(self, x):
n_latents = self.n_latents
x = self.features(x)
x = x.view(-1, 256 * 5 * 5)
x = self.classifier(x)
return x[:, :n_latents], x[:, n_latents:]
class ImageDecoder(nn.Module):
"""Parametrizes p(x|z).
This is the standard DCGAN architecture.
@param n_latents: integer
number of latent variable dimensions.
"""
def __init__(self, n_latents):
super(ImageDecoder, self).__init__()
self.upsample = nn.Sequential(
nn.Linear(n_latents, 256 * 5 * 5),
Swish())
self.hallucinate = nn.Sequential(
nn.ConvTranspose2d(256, 128, 4, 1, 0, bias=False),
nn.BatchNorm2d(128),
Swish(),
nn.ConvTranspose2d(128, 64, 4, 2, 1, bias=False),
nn.BatchNorm2d(64),
Swish(),
nn.ConvTranspose2d(64, 32, 4, 2, 1, bias=False),
nn.BatchNorm2d(32),
Swish(),
nn.ConvTranspose2d(32, 3, 4, 2, 1, bias=False))
def forward(self, z):
# the input will be a vector of size |n_latents|
z = self.upsample(z)
z = z.view(-1, 256, 5, 5)
z = self.hallucinate(z)
return z # NOTE: no sigmoid here. See train.py
class AttributeEncoder(nn.Module):
"""Parametrizes q(z|y).
We use a single inference network that encodes
a single attribute.
@param n_latents: integer
number of latent variable dimensions.
"""
def __init__(self, n_latents):
super(AttributeEncoder, self).__init__()
self.net = nn.Sequential(
nn.Embedding(2, 512),
Swish(),
nn.Linear(512, 512),
Swish(),
nn.Linear(512, n_latents * 2))
self.n_latents = n_latents
def forward(self, x):
n_latents = self.n_latents
x = self.net(x.long())
return x[:, :n_latents], x[:, n_latents:]
class AttributeDecoder(nn.Module):
"""Parametrizes p(y|z).
We use a single generative network that decodes
a single attribute.
@param n_latents: integer
number of latent variable dimensions.
"""
def __init__(self, n_latents):
super(AttributeDecoder, self).__init__()
self.net = nn.Sequential(
nn.Linear(n_latents, 512),
Swish(),
nn.Linear(512, 512),
Swish(),
nn.Linear(512, 512),
Swish(),
nn.Linear(512, 1))
def forward(self, z):
z = self.net(z)
return z # NOTE: no sigmoid here. See train.py
class ProductOfExperts(nn.Module):
"""Return parameters for product of independent experts.
See https://arxiv.org/pdf/1410.7827.pdf for equations.
@param mu: M x D for M experts
@param logvar: M x D for M experts
"""
def forward(self, mu, logvar, eps=1e-8):
var = torch.exp(logvar) + eps
# precision of i-th Gaussian expert at point x
T = 1. / var
pd_mu = torch.sum(mu * T, dim=0) / torch.sum(T, dim=0)
pd_var = 1. / torch.sum(T, dim=0)
pd_logvar = torch.log(pd_var)
return pd_mu, pd_logvar
class Swish(nn.Module):
"""https://arxiv.org/abs/1710.05941"""
def forward(self, x):
return x * F.sigmoid(x)
def prior_expert(size, use_cuda=False):
"""Universal prior expert. Here we use a spherical
Gaussian: N(0, 1).
@param size: integer
dimensionality of Gaussian
@param use_cuda: boolean [default: False]
cast CUDA on variables
"""
mu = Variable(torch.zeros(size))
logvar = Variable(torch.log(torch.ones(size)))
if use_cuda:
mu, logvar = mu.cuda(), logvar.cuda()
return mu, logvar
| 8,328
| 32.316
| 91
|
py
|
multimodal-vae-public
|
multimodal-vae-public-master/celeba19/train.py
|
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import os
import sys
import shutil
import numpy as np
from tqdm import tqdm
from itertools import combinations
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.autograd import Variable
from torchvision import transforms
from model import MVAE
sys.path.append('../celeba')
from datasets import N_ATTRS
from datasets import CelebAttributes
def elbo_loss(recon, data, mu, logvar, lambda_image=1.0,
lambda_attrs=1.0, annealing_factor=1.):
"""Compute the ELBO for an arbitrary number of data modalities.
@param recon: list of torch.Tensors/Variables
Contains one for each modality.
@param data: list of torch.Tensors/Variables
Size much agree with recon.
@param mu: Torch.Tensor
Mean of the variational distribution.
@param logvar: Torch.Tensor
Log variance for variational distribution.
@param lambda_image: float [default: 1.0]
weight for image BCE
@param lambda_attr: float [default: 1.0]
weight for attribute BCE
@param annealing_factor: float [default: 1]
Beta - how much to weight the KL regularizer.
"""
assert len(recon) == len(data), "must supply ground truth for every modality."
n_modalities = len(recon)
batch_size = mu.size(0)
BCE = 0 # reconstruction cost
for ix in xrange(n_modalities):
# dimensionality > 1 implies an image
if len(recon[ix].size()) > 1:
recon_ix = recon[ix].view(batch_size, -1)
data_ix = data[ix].view(batch_size, -1)
BCE += lambda_image * torch.sum(binary_cross_entropy_with_logits(recon_ix, data_ix), dim=1)
else: # this is for an attribute
BCE += lambda_attrs * binary_cross_entropy_with_logits(recon[ix], data[ix])
KLD = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp(), dim=1)
ELBO = torch.mean(BCE + annealing_factor * KLD)
return ELBO
def binary_cross_entropy_with_logits(input, target):
"""Sigmoid Activation + Binary Cross Entropy
@param input: torch.Tensor (size N)
@param target: torch.Tensor (size N)
@return loss: torch.Tensor (size N)
"""
if not (target.size() == input.size()):
raise ValueError("Target size ({}) must be the same as input size ({})".format(
target.size(), input.size()))
return (torch.clamp(input, 0) - input * target
+ torch.log(1 + torch.exp(-torch.abs(input))))
def tensor_2d_to_list(x):
# convert a 2D tensor to a list of 1D tensors.
n_dims = x.size(1)
list_of_tensors = []
for i in xrange(n_dims):
list_of_tensors.append(x[:, i])
return list_of_tensors
def enumerate_combinations(n):
"""Enumerate entire pool of combinations.
We use this to define the domain of ELBO terms,
(the pool of 2^19 ELBO terms).
@param n: integer
number of features (19 for Celeb19)
@return: a list of ALL permutations
"""
combos = []
for i in xrange(2, n): # 1 to n - 1
_combos = list(combinations(range(n), i))
combos += _combos
combos_np = np.zeros((len(combos), n))
for i in xrange(len(combos)):
for idx in combos[i]:
combos_np[i][idx] = 1
combos_np = combos_np.astype(np.bool)
return combos_np
def sample_combinations(pool, size=1):
"""Return boolean list of which data points to use to compute a modality.
Ignore combinations that are all True or only contain a single True.
@param pool: np.array
enumerating all possible combinations.
@param size: integer (default: 1)
number of combinations to sample.
"""
n_modalities = pool.shape[1]
pool_size = len(pool)
pool_sums = np.sum(pool, axis=1)
pool_dist = np.bincount(pool_sums)
pool_space = np.where(pool_dist > 0)[0]
sample_pool = np.random.choice(pool_space, size, replace=True)
sample_dist = np.bincount(sample_pool)
if sample_dist.size < n_modalities:
zeros_pad = np.zeros(n_modalities - sample_dist.size).astype(np.int)
sample_dist = np.concatenate((sample_dist, zeros_pad))
sample_combo = []
for ix in xrange(n_modalities):
if sample_dist[ix] > 0:
pool_i = pool[pool_sums == ix]
combo_i = np.random.choice(range(pool_i.shape[0]),
size=sample_dist[ix],
replace=False)
sample_combo.append(pool_i[combo_i])
sample_combo = np.concatenate(sample_combo)
return sample_combo
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def save_checkpoint(state, is_best, folder='./', filename='checkpoint.pth.tar'):
if not os.path.isdir(folder):
os.mkdir(folder)
torch.save(state, os.path.join(folder, filename))
if is_best:
shutil.copyfile(os.path.join(folder, filename),
os.path.join(folder, 'model_best.pth.tar'))
def load_checkpoint(file_path, use_cuda=False):
checkpoint = torch.load(file_path) if use_cuda else \
torch.load(file_path, map_location=lambda storage, location: storage)
model = MVAE(checkpoint['n_latents'])
model.load_state_dict(checkpoint['state_dict'])
return model
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--n-latents', type=int, default=100,
help='size of the latent embedding [default: 100]')
parser.add_argument('--batch-size', type=int, default=100, metavar='N',
help='input batch size for training [default: 100]')
parser.add_argument('--epochs', type=int, default=100, metavar='N',
help='number of epochs to train [default: 100]')
parser.add_argument('--annealing-epochs', type=int, default=20, metavar='N',
help='number of epochs to anneal KL for [default: 20]')
parser.add_argument('--lr', type=float, default=1e-4, metavar='LR',
help='learning rate [default: 1e-4]')
parser.add_argument('--log-interval', type=int, default=10, metavar='N',
help='how many batches to wait before logging training status [default: 10]')
parser.add_argument('--approx-m', type=int, default=1,
help='number of ELBO terms to approx. the full MVAE objective [default: 1]')
parser.add_argument('--lambda-image', type=float, default=1.,
help='multipler for image reconstruction [default: 1]')
parser.add_argument('--lambda-attrs', type=float, default=10.,
help='multipler for attributes reconstruction [default: 10]')
parser.add_argument('--cuda', action='store_true', default=False,
help='enables CUDA training [default: False]')
args = parser.parse_args()
args.cuda = args.cuda and torch.cuda.is_available()
if not os.path.isdir('./trained_models'):
os.makedirs('./trained_models')
# crop the input image to 64 x 64
preprocess_data = transforms.Compose([transforms.Resize(64),
transforms.CenterCrop(64),
transforms.ToTensor()])
train_loader = torch.utils.data.DataLoader(
CelebAttributes(partition='train', data_dir='./data',
image_transform=preprocess_data),
batch_size=args.batch_size, shuffle=True)
N_mini_batches = len(train_loader)
test_loader = torch.utils.data.DataLoader(
CelebAttributes(partition='val', data_dir='./data',
image_transform=preprocess_data),
batch_size=args.batch_size, shuffle=False)
model = MVAE(args.n_latents)
optimizer = optim.Adam(model.parameters(), lr=args.lr)
if args.cuda:
model.cuda()
# enumerate all combinations so we can sample from this
# every gradient step. NOTE: probably not the most efficient
# way to do this but oh well.
combination_pool = enumerate_combinations(19)
def train(epoch):
model.train()
train_loss_meter = AverageMeter()
for batch_idx, (image, attrs) in enumerate(train_loader):
if epoch < args.annealing_epochs:
# compute the KL annealing factor for the current mini-batch in the current epoch
annealing_factor = (float(batch_idx + (epoch - 1) * N_mini_batches + 1) /
float(args.annealing_epochs * N_mini_batches))
else:
# by default the KL annealing factor is unity
annealing_factor = 1.0
if args.cuda:
image = image.cuda()
attrs = attrs.cuda()
image = Variable(image)
attrs = Variable(attrs)
attrs = tensor_2d_to_list(attrs) # convert tensor to list
batch_size = len(image)
# refresh the optimizer
optimizer.zero_grad()
train_loss = 0 # accumulate train loss here so we don't store a lot of things.
n_elbo_terms = 0 # track number of ELBO terms
# compute ELBO using all data (``complete")
recon_image, recon_attrs, mu, logvar = model(image, attrs)
train_loss += elbo_loss([recon_image] + recon_attrs, [image] + attrs, mu, logvar,
lambda_image=args.lambda_image, lambda_attrs=args.lambda_attrs,
annealing_factor=annealing_factor)
n_elbo_terms += 1 # keep track of how many terms there are
# compute ELBO using only image data
recon_image, _, mu, logvar = model(image=image)
train_loss += elbo_loss([recon_image], [image], mu, logvar,
lambda_image=args.lambda_image, lambda_attrs=args.lambda_attrs,
annealing_factor=annealing_factor)
n_elbo_terms += 1 # keep track of how many terms there are
# compute ELBO using only text data
for ix in xrange(len(attrs)):
_, recon_attrs, mu, logvar = model(attrs=[attrs[k] if k == ix else None
for k in xrange(len(attrs))])
train_loss += elbo_loss([recon_attrs[ix]], [attrs[ix]], mu, logvar,
annealing_factor=annealing_factor)
n_elbo_terms += 1
# sample some number of terms
if args.approx_m > 0:
sample_combos = sample_combinations(combination_pool, size=args.approx_m)
for sample_combo in sample_combos:
attrs_combo = sample_combo[1:]
recon_image, recon_attrs, mu, logvar = model(image=image if sample_combo[0] else None,
attrs=[attrs[ix] if attrs_combo[ix] else None
for ix in xrange(attrs_combo.size)])
if sample_combo[0]: # check if image is present
elbo = elbo_loss([recon_image] + [recon_attrs[ix] for ix in xrange(attrs_combo.size) if attrs_combo[ix]],
[image] + [attrs[ix] for ix in xrange(attrs_combo.size) if attrs_combo[ix]],
mu, logvar, annealing_factor=annealing_factor)
else:
elbo = elbo_loss([recon_attrs[ix] for ix in xrange(attrs_combo.size) if attrs_combo[ix]],
[attrs[ix] for ix in xrange(attrs_combo.size) if attrs_combo[ix]],
mu, logvar, annealing_factor=annealing_factor)
train_loss += elbo
n_elbo_terms += 1
assert n_elbo_terms == (len(attrs) + 1) + 1 + args.approx_m # N + 1 + M
train_loss_meter.update(train_loss.data[0], len(image))
# compute and take gradient step
train_loss.backward()
optimizer.step()
if batch_idx % args.log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}\tAnnealing-Factor: {:.3f}'.format(
epoch, batch_idx * batch_size, len(train_loader.dataset),
100. * batch_idx / len(train_loader), train_loss_meter.avg, annealing_factor))
print('====> Epoch: {}\tLoss: {:.4f}'.format(epoch, train_loss_meter.avg))
def test(epoch):
model.eval()
test_loss = 0
# for simplicitly, here i'm only going to track the joint loss.
pbar = tqdm(total=len(test_loader))
for batch_idx, (image, attrs) in enumerate(test_loader):
if args.cuda:
image, attrs = image.cuda(), attrs.cuda()
image = Variable(image, volatile=True)
attrs = Variable(attrs, volatile=True)
batch_size = image.size(0)
attrs = tensor_2d_to_list(attrs)
# compute the elbo using all data.
recon_image, recon_attrs, mu, logvar = model(image, attrs)
test_loss += elbo_loss([recon_image] + recon_attrs, [image] + attrs, mu, logvar).data[0]
pbar.update()
pbar.close()
test_loss /= len(test_loader)
print('====> Test Loss: {:.4f}'.format(test_loss))
return test_loss
best_loss = sys.maxint
for epoch in range(1, args.epochs + 1):
train(epoch)
loss = test(epoch)
is_best = loss < best_loss
best_loss = min(loss, best_loss)
# save the best model and current model
save_checkpoint({
'state_dict': model.state_dict(),
'best_loss': best_loss,
'n_latents': args.n_latents,
'optimizer' : optimizer.state_dict(),
}, is_best, folder=args.out_dir)
| 14,718
| 40.345506
| 129
|
py
|
Fine-tuning-NOs
|
Fine-tuning-NOs-master/main.py
|
from pytorch_lightning.callbacks import ModelCheckpoint
import pytorch_lightning as pl
import yaml
import argparse
import utilities
import os
import torch
import shutil
def datasetFactory(config, do, args=None):
c_data =config["data"]
if args is None:
gl = utilities.GettingLists(data_for_training=c_data["n_sample"],
wave_eq = c_data["PDE_type"],
data_base = c_data["process"],
PATH = c_data["PATH"])
return utilities.MyLoader(GL=gl, do = do, config=config, args = None)
elif args is not None:
gl = utilities.GettingLists(data_for_training=c_data["n_sample"],
wave_eq = c_data["PDE_type"],
data_base = args.data_base,
PATH = args.PATH)
return utilities.MyLoader(GL=gl, do = do, config=config, args=args)
def main(args, config = None):
if config is None:
with open(args.config_file, 'r') as stream:
config = yaml.load(stream, yaml.FullLoader)
print(config)
print(args)
model = utilities.choosing_model(config)
print(model)
if args.all_ckp == False:
save_file = os.path.join(config["ckpt"]["PATH"],
config["ckpt"]["save_dir"]
)
checkpoint_callback = ModelCheckpoint(
dirpath=save_file,
every_n_epochs = 1,
save_last = True,
monitor = 'val_loss',
mode = 'min',
save_top_k = args.save_top_k,
filename="model-{epoch:03d}-{val_loss:.4f}",
)
elif args.all_ckp == True:
save_file = os.path.join(config["ckpt"]["PATH"], "all_epochs",
f'{config["train"]["epochs"]}_{config["model"]["activ"]}',
config["ckpt"]["save_dir"]
)
checkpoint_callback = ModelCheckpoint(
dirpath=save_file,
every_n_epochs = 1,
save_top_k = -1,
filename="model-{epoch:03d}-{val_loss:.4f}",
)
if os.path.exists(save_file):
print(f"The model directory exists. Overwrite? {args.erase}")
if args.erase == True:
shutil.rmtree(save_file)
if args.checkpoint is None:
#left the default values provided by the config file
train_dataloader, val_dataloader = datasetFactory(config=config, do=args.do, args=None)
max_epochs = config["train"]["epochs"]
elif args.checkpoint is not None:
print(f"Load from checkpoint {args.checkpoint}")
#model=model.load_from_checkpoint(args.checkpoint)
checkpoint = torch.load(args.checkpoint, map_location=lambda storage, loc: storage)
print(checkpoint.keys())
model.load_state_dict(checkpoint["state_dict"])
print(model.learning_rate)
#change optimizer if needed
if args.lr is None:
args.lr = config["train"]["lr"]
if args.weight_decay is None:
args.weight_decay = config["train"]["weight_decay"]
if args.optimizer is not None:
if args.optimizer == "SGD":
optimizer = torch.optim.SGD(model.parameters(), lr=args.lr, momentum=0.9)
elif args.optimizer == "Adam":
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.weight_decay)
else:
optimizer = torch.optim.AdamW(model.parameters(), lr=args.lr, weight_decay=args.weight_decay)
#change the scheduler if needed
if args.scheduler is not None:
if args.scheduler == "ReduceLROnPlateau":
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.1, patience=10, verbose=True, eps=1e-08, min_lr=0)
elif args.scheduler == "CosineAnnealingLR":
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=10, eta_min=0, last_epoch=-1)
else:
if args.step_size is None:
args.step_size = config["train"]["step_size"]
if args.gamma is None:
args.gamma = config["train"]["gamma"]
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=args.step_size, gamma=args.gamma)
model.configure_optimizers(optimizer, scheduler)
#change the number of epochs if needed
if args.epochs is not None:
print(f"Change the number of epochs to {args.epochs}")
max_epochs = args.epochs
else:
max_epochs = config["train"]["epochs"]
print(checkpoint["epoch"])
#change the filename if needed
if args.filename is not None:
print(f"Change the filename to {args.filename}")
checkpoint_callback.filename = args.filename+"-{epoch:03d}-{val_loss:.4f}"
if args.data_base is None:
args.data_base = config["data"]["process"]
args.PATH = config["data"]["PATH"]
else:
args.PATH = os.path.join('save_files', 'acoustic', args.data_base)
#change the config file if needed through the command line
train_dataloader, val_dataloader = datasetFactory(config=config, do = args.do, args=args)
if args.usual_ckpt == True:
trainer = pl.Trainer(max_epochs=max_epochs,
accelerator=args.accelerator,
devices=args.devices,
default_root_dir=save_file)
elif args.usual_ckpt == False:
trainer = pl.Trainer(max_epochs=max_epochs,
accelerator=args.accelerator,
devices=args.devices,
callbacks=[checkpoint_callback])
if args.resume == True:
trainer.fit(model, train_dataloader, val_dataloader, ckpt_path=args.checkpoint)
else:
trainer.fit(model, train_dataloader, val_dataloader)
if __name__ == '__main__':
parser = argparse.ArgumentParser('Training of the Architectures', add_help=True)
parser.add_argument('-c','--config_file', type=str,
help='Path to the configuration file',
default='config/acoustic/GRF_7Hz/FNO25k.yaml')
parser.add_argument('-a', '--all_ckp', type = bool,
help='Allow to save all the ckpt',
default=False)
parser.add_argument('-u_ckpt', '--usual_ckpt', type = bool,
help='Allow to save the usual ckpt as in pytorch-lightning\
use it only if you want to save the ckpt in a different directory\
or multiple analysis of networks',
default=False)
parser.add_argument('-e', '--erase', type = bool,
help='erase_save_dir',
default=False)
parser.add_argument( '-ckpt', '--checkpoint', type = str,
help='checkpoint file to load',
default=None)
parser.add_argument('-r', '--resume', type = bool,
help='resume training',
default=False)
parser.add_argument('-savetop', '--save_top_k', type = int,
help='save top k ckpt',
default=3)
parser.add_argument('-lr', '--lr', type = float,
help='learning rate',
default=None)
parser.add_argument('-o', '--optimizer', type = str,
help='optimizer',
default=None)
parser.add_argument('-s', '--scheduler', type = str,
help='scheduler',
default=None)
parser.add_argument('-ep', '--epochs', type = int,
help='number of epochs',
default=None)
parser.add_argument('-f', '--filename', type = str,
help='filename',
default=None)
parser.add_argument('-b', '--batch_size', type = int,
help='batch_size',
default=None)
parser.add_argument('-lw', '--load_workers', type = int,
help='load_workers',
default=None)
parser.add_argument('-db', '--data_base', type = str,
help='database',
default=None)
parser.add_argument('-weight_decay', '--weight_decay', type = float,
help='weight_decay',
default=None)
parser.add_argument('-step_size', '--step_size', type = int,
help='step_size',
default=None)
parser.add_argument('-gamma', '--gamma', type = float,
help='gamma',
default=None)
parser.add_argument('-P', '--PATH', type = str,
help='PATH',
default=None)
parser.add_argument('-d', '--devices', type = int,
help='devices',
default=1)
parser.add_argument('-acc', '--accelerator', type = str,
help='accelerator',
default='gpu')
parser.add_argument('-do', '--do', type=str,
help='do',
default="train")
args=parser.parse_args()
config_file = args.config_file
main(args)
| 10,447
| 45.435556
| 153
|
py
|
Fine-tuning-NOs
|
Fine-tuning-NOs-master/reconstruction_data.py
|
from main import choosing_model
import yaml
import argparse
import utilities
import os
import torch
import pytorch_lightning as pl
import numpy as np
import matplotlib.pyplot as plt
from utilities import to_numpy
def saving_files(x, y, out, database, name):
PATH = "make_graph/data"+'/'+database+'/'+name
x = to_numpy(x)
y = to_numpy(y)
out = to_numpy(out)
if not os.path.exists(PATH):
os.makedirs(PATH)
os.chdir(PATH)
np.save('wavespeed.npy', x)
np.save('data.npy', y)
np.save(f'data_{name}.npy', out)
def datasetFactoryTest(config):
c_data =config["data"]
gl = utilities.GettingLists(data_for_training=c_data["n_sample"],
wave_eq = c_data["PDE_type"],
data_base = c_data["process"],
PATH = c_data["PATH"])
return utilities.MyLoader(GL=gl, do = "test", config=config)
if __name__ == '__main__':
parser = argparse.ArgumentParser('Getting data from the test set', add_help=False)
parser.add_argument('-c','--config_file', type=str,
help='Path to the configuration file',
default='config/acoustic/GRF_7Hz/FNO25k.yaml')
args=parser.parse_args()
config_file = args.config_file
with open(config_file, 'r') as stream:
config = yaml.load(stream, yaml.FullLoader)
print(config)
if config["Project"]["name"]== "sFNO+epsilon_v2":
file_ckpt = "epoch=199-step=166800.ckpt"
else:
file_ckpt = "epoch=99-step=50000.ckpt"
c_save = config["ckpt"]
model = choosing_model(config)
test_dataloader = datasetFactoryTest(config)
myloss = utilities.LpLoss(size_average=False)
PATH = os.path.join(c_save["PATH"], c_save["save_dir"], "lightning_logs", f"version_{0}",\
"checkpoints", file_ckpt)
checkpoint = torch.load(PATH, map_location=lambda storage, loc: storage)
model.load_state_dict(checkpoint["state_dict"])
model.cuda()
model.eval()
k_list = [k for k in range(10)]
save = True
batch_size = 20
with torch.no_grad():
for x, y in test_dataloader:
s= x.shape[2]
x, y = (x[:batch_size,...]).cuda(), (y[:batch_size,...]).cuda()
out = model(x).reshape(batch_size, s, s, -1)
break
saving_files(x, y, out, database=config["data"]["process"], name =config["ckpt"]["alias"])
| 2,493
| 32.253333
| 94
|
py
|
Fine-tuning-NOs
|
Fine-tuning-NOs-master/reconstruction_plot.py
|
from main import choosing_model
import yaml
import argparse
import utilities
import os
import torch
import pytorch_lightning as pl
import numpy as np
import matplotlib.pyplot as plt
from utilities import to_numpy
def plotting(in_, NN_out, out, name, database,
k_list =[1,2,3,4], save=False, vmin=-0.5, vmax =0.5,
shrink = 0.8):
in_ = to_numpy(in_)[k_list,...]
NN_out = to_numpy(NN_out)[k_list,...]
out = to_numpy(out)[k_list,...]
for k in k_list:
if database == 'GRF_7Hz':
s = 128
in_k = in_[k,...].reshape(s,s)
out_k = out[k,...].reshape(s,s)
NN_k =NN_out[k,...].reshape(s,s)
plt.figure(figsize=(10,10))
plt.subplot(131)
plt.imshow(in_k, vmin=1., vmax =3., cmap = 'jet')
plt.colorbar(shrink =shrink)
plt.title(f'wavespeed: {k}')
plt.subplot(132)
plt.imshow(out_k, vmin=vmin, vmax =vmax, cmap = 'seismic')
plt.colorbar(shrink =shrink)
plt.title(f'HDG sample: {k}')
plt.subplot(133)
plt.imshow(NN_k, vmin=vmin, vmax =vmax, cmap = 'seismic')
plt.colorbar(shrink =shrink)
plt.title(f'{name} sample: {k}')
elif database ==('GRF_12Hz') or ('GRF_15Hz'):
s = 64
in_k = in_[k,...].reshape(s,s)
out_k = out[k,...].reshape(s,s,-1)
NN_k =NN_out[k,...].reshape(s,s,-1)
plt.figure(figsize=(20,10))
plt.subplot(231)
plt.imshow(in_k, vmin=1., vmax =5., cmap = 'jet')
plt.colorbar(shrink =shrink)
plt.title(f'wavespeed: {k}')
plt.subplot(232)
plt.imshow(out_k[:,:,0].reshape(s,s), vmin=vmin, vmax =vmax, cmap = 'seismic')
plt.colorbar(shrink =shrink)
plt.title(f'HDG (real) sample: {k}')
plt.subplot(233)
plt.imshow(NN_k[:,:,0].reshape(s,s), vmin=vmin, vmax =vmax, cmap = 'seismic')
plt.colorbar(shrink =shrink)
plt.title(f'{name} (real) sample: {k}')
plt.subplot(235)
plt.imshow(out_k[:,:,1].reshape(s,s), vmin=vmin, vmax =vmax, cmap = 'seismic')
plt.colorbar(shrink =shrink)
plt.title(f'HDG (imaginary) sample: {k}')
plt.subplot(236)
plt.imshow(NN_k[:,:,1].reshape(s,s), vmin=vmin, vmax =vmax, cmap = 'seismic')
plt.colorbar(shrink =shrink)
plt.title(f'{name} (imaginary) sample: {k}')
if save== True:
saving_dir = f'make_graph/figures/{database}/'+f'{name}'
if not os.path.exists(saving_dir):
os.makedirs(saving_dir)
plt.savefig(f"{saving_dir}/ex_{k}.png")
def datasetFactoryTest(config):
c_data =config["data"]
gl = utilities.GettingLists(data_for_training=c_data["n_sample"],
wave_eq = c_data["PDE_type"],
data_base = c_data["process"],
PATH = c_data["PATH"])
return utilities.MyLoader(GL=gl, do = "test", config=config)
if __name__ == '__main__':
parser = argparse.ArgumentParser('Plotting from test data', add_help=False)
parser.add_argument('-c','--config_file', type=str,
help='Path to the configuration file',
default='config/acoustic/GRF_7Hz/FNO25k.yaml')
parser.add_argument('-s','--shrink', type=float,
help='shrink bar value',
default=0.8)
args=parser.parse_args()
config_file = args.config_file
with open(config_file, 'r') as stream:
config = yaml.load(stream, yaml.FullLoader)
print(config)
c_save = config["ckpt"]
model = choosing_model(config)
if config["Project"]["name"]== "sFNO+epsilon_v2":
file_ckpt = "epoch=199-step=166800.ckpt"
else:
file_ckpt = "epoch=99-step=50000.ckpt"
test_dataloader = datasetFactoryTest(config)
myloss = utilities.LpLoss(size_average=False)
PATH = os.path.join(c_save["PATH"], c_save["save_dir"], "lightning_logs", f"version_{0}",\
"checkpoints", file_ckpt)
checkpoint = torch.load(PATH, map_location=lambda storage, loc: storage)
model.load_state_dict(checkpoint["state_dict"])
model.cuda()
model.eval()
k_list = [k for k in range(10)]
save = True
with torch.no_grad():
for x, y in test_dataloader:
batch_size, s= x.shape[0:2]
x, y = x.cuda(), y.cuda()
out = model(x).reshape(batch_size, s, s,-1)
break
plotting(
in_ = x,
NN_out =out,
out= y,
name=config["ckpt"]["alias"],
database = config["data"]["process"],
k_list= k_list,
save = save,
shrink= args.shrink,
vmin=-0.2,
vmax =0.2)
| 4,950
| 35.138686
| 94
|
py
|
Fine-tuning-NOs
|
Fine-tuning-NOs-master/OOD.py
|
import yaml
from evaluation import saving_files
import argparse
import utilities
from utilities import to_numpy
import os
import torch
import pytorch_lightning as pl
import numpy as np
import matplotlib.pyplot as plt
def load_ood(arg, size = 64, dir_skeleton= None):
if dir_skeleton is None:
dir_skeleton = 'set_{:02d}'.format(args.ood_sample)
dir_ood = os.path.join('OOD', "OOD_files", dir_skeleton)
data = np.load(os.path.join(dir_ood, f'data_set{args.ood_sample}_freq{args.freq}.npy'))
model = np.load(os.path.join(dir_ood, f'model_set{args.ood_sample}.npy'))
model = torch.tensor(model*1e-3, dtype=torch.float).view(-1,size,size,1)
data =torch.tensor(data, dtype=torch.float).view(-1, size,size,2)
print(f'vp= {model.shape}, data={data.shape}')
return model, data
def test_ood(config, args, name =None, dir_skeleton= None, realization_k = 0, x=None, y=None):
if dir_skeleton is None:
dir_skeleton = 'set_{:02d}'.format(args.ood_sample)+f'_freq{args.freq}'
if name is None:
name= config["ckpt"]["alias"]
model = utilities.choosing_model(config)
if x is None or y is None:
x, y =load_ood(args)
myloss = utilities.LpLoss(size_average=False)
checkpoint = torch.load(args.checkpoint, map_location=lambda storage, loc: storage)
model.load_state_dict(checkpoint["state_dict"])
model.cuda()
model.eval()
loss_dict = {
'test_loss_ood': 0.0
}
x, y = x.cuda(), y.cuda()
batch_size, s, s, _ = x.shape
out = model(x).reshape(batch_size, s, s, -1)
loss_test = myloss(out.view(batch_size,-1), y.view(batch_size,-1))
loss_dict['test_loss_ood']+= loss_test.item()/batch_size
print(f"test test_loss_ood: {loss_dict['test_loss_ood']}")
if args.save_graph:
print("generating and saving graph")
utilities.plotting(in_ = x, NN_out = out, out = y,
name = name, database=dir_skeleton, PATH='OOD', ksample=realization_k)
if args.save_npy:
print("saving npy files")
utilities.saving_files(in_files = to_numpy(x), out_files = to_numpy(y),
NN_out_files = to_numpy(out), NN_name = name,
database=dir_skeleton, PATH='OOD', realization_k = realization_k)
return loss_dict['test_loss_ood']
if __name__ == '__main__':
parser = argparse.ArgumentParser('out of distribution check', add_help=False)
parser.add_argument('-c','--config_file', type=str,
help='Path to the configuration file',
default='config/acoustic/GRF_7Hz/FNO25k.yaml')
parser.add_argument('-ckpt', '--checkpoint', type=str,
help='Path to the checkpoint file',
default=None)
parser.add_argument('-ood','--ood_sample', type=int,
help='out of distribution set',
default=0)
parser.add_argument('-sg','--save-graph', type=bool,
help='Saving Image',
default=True)
parser.add_argument('-snpy','--save-npy', type=bool,
help='Saving NPY',
default=True)
parser.add_argument('-f','--freq', type=int,
help='frequency of the OOD',
default=None)
parser.add_argument('-vmax','--vamax', type=float,
help='vmax of the OOD',
default=0.5)
parser.add_argument('-vmin','--vmin', type=float,
help='vmin of the OOD',
default=-0.5)
parser.add_argument('-s','--shrink', type=float,
help='shrink bar value',
default=0.8)
args=parser.parse_args()
config_file = args.config_file
assert args.ood_sample in [0,1,2,3,4,5], "out of distribution sample should be in [0,1,2,3,4,5]"
assert args.freq in [None, 12, 15], "frequency should be in [12,15] Hz"
with open(config_file, 'r') as stream:
config = yaml.load(stream, yaml.FullLoader)
if args.freq is None:
args.freq = config['data']['frequency']
# getting the name of the dataset
dir_skeleton = 'set_{:02d}'.format(args.ood_sample)+f'_freq{args.freq}'
if args.checkpoint is None:
c_save = config["ckpt"]
if config["ckpt"]["alias"]== "sFNO+epsilon_v2":
ckpt = "epoch=199-step=166800.ckpt"
else:
ckpt = "epoch=99-step=50000.ckpt"
x, y =load_ood(args)
list_test = []
for k in range(0,3):
args.checkpoint = os.path.join(c_save["PATH"],
c_save["save_dir"],
"lightning_logs",
f"version_{k}",\
"checkpoints", ckpt)
list_test.append(test_ood(config,args=args, realization_k = k, x=x, y=y))
print(list_test)
saving_files(list_test, database=dir_skeleton, name =config["ckpt"]["alias"], dir_= "OOD")
print(f"Load from checkpoint {args.checkpoint}")
| 5,428
| 42.087302
| 100
|
py
|
Fine-tuning-NOs
|
Fine-tuning-NOs-master/evaluation.py
|
import yaml
import argparse
import utilities
import os
import torch
import numpy as np
from main import datasetFactory
import pytorch_lightning as pl
def saving_files(data, database, name, dir_= "make_graph"):
if len(data) != 1:
PATH = os.path.join(dir_, "test_loss", database)
if not os.path.exists(PATH):
os.makedirs(PATH)
np.savetxt(os.path.join(PATH, f'{name}.csv'), data, delimiter=",")
else:
PATH = os.path.join(dir_, "test_loss", database,f"{name}.csv")
if not os.path.exists(PATH):
os.makedirs(PATH)
#add a new row in th csv file
with open(PATH, "a") as f:
f.write(str(data[0]))
def test(config, args):
model = utilities.choosing_model(config)
test_dataloader = datasetFactory(config, do = args.do, args=None)
myloss = utilities.LpLoss(size_average=False)
print(f"Load from checkpoint {args.checkpoint}")
checkpoint = torch.load(args.checkpoint, map_location=lambda storage, loc: storage)
model.load_state_dict(checkpoint["state_dict"])
model.cuda()
model.eval()
loss_dict = {
'test_loss': 0.0
}
with torch.no_grad():
for x, y in test_dataloader:
batch_size, s= x.shape[0:2]
x, y = x.cuda(), y.cuda()
out = model(x).reshape(batch_size, s, s, -1)
loss_test = myloss(out.view(batch_size,-1), y.view(batch_size,-1))
loss_dict['test_loss']+= loss_test.item()
return loss_dict['test_loss'] / len(test_dataloader.dataset)
if __name__ == '__main__':
parser = argparse.ArgumentParser('Testing losses', add_help=False)
parser.add_argument('-c','--config_file', type=str,
help='Path to the configuration file',
default='config/acoustic/GRF_7Hz/FNO25k.yaml')
parser.add_argument('-do', '--do', type=str,
help='do',
default="test")
parser.add_argument('-n', '--numb_samples', type= int, default = 3)
parser.add_argument('-ckpt', '--checkpoint', type=str,
help='Path to the checkpoint file',
default=None)
args=parser.parse_args()
config_file = args.config_file
with open(config_file, 'r') as stream:
config = yaml.load(stream, yaml.FullLoader)
if config["model"]["activ"] is None:
activ = "Identity"
else:
activ = config["model"]["activ"]
database= activ+"_"+config["data"]["process"]
name= config["ckpt"]["alias"]
if args.checkpoint is None:
c_save = config["ckpt"]
if config["ckpt"]["alias"]== "sFNO+epsilon_v2":
ckpt = "epoch=199-step=166800.ckpt"
else:
ckpt = "epoch=99-step=50000.ckpt"
list_test = []
for k in range(0,args.numb_samples):
args.checkpoint = os.path.join(c_save["PATH"],
c_save["save_dir"], "lightning_logs", f"version_{k}",\
"checkpoints", ckpt)
list_test.append(test(config,args=args))
print(list_test)
saving_files(list_test, database=database, name =name)
elif args.checkpoint is not None:
list_test= test(config,args=args)
print(list_test)
saving_files([list_test], database=database, name =name)
| 3,492
| 36.159574
| 98
|
py
|
Fine-tuning-NOs
|
Fine-tuning-NOs-master/models/sFNO_epsilon_v2.py
|
import pytorch_lightning as pl
import torch
from torch import optim, nn
from .FNO import fourier_conv_2d
from .basics_model import LayerNorm, get_grid2D, FC_nn
from timm.models.layers import DropPath, trunc_normal_
import torch.nn.functional as F
from utilities import LpLoss
from .sFNO import IO_layer
###################################################
# Integral Operator Layer Block with skip connection
###################################################
class IO_ResNetblock(nn.Module):
def __init__(self, features_,
wavenumber,
drop_path = 0.,
drop = 0.):
super().__init__()
self.IO = IO_layer(features_, wavenumber, drop)
self.pwconv1 = nn.Conv2d(features_, 4* features_, 1) # pointwise/1x1 convs, implemented with linear layers
self.act = nn.GELU()
self.pwconv2 =nn.Conv2d(4 * features_, features_,1)
self.norm1 = LayerNorm(features_, eps=1e-5, data_format = "channels_first")
self.norm2 = LayerNorm(features_, eps=1e-5, data_format = "channels_first")
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
def forward(self, x):
skip = x
x = self.norm1(x)
x =self.IO(x)
x =skip+self.drop_path(x) #NonLocal Layers
skip = x
x = self.norm2(x)
#local
x = self.pwconv1(x)
x = self.act(x)
x = self.pwconv2(x)
x = skip + self.drop_path(x)
return x
#######################################
#sFNO_epsilon_v2
#######################################
class sFNO_epsilon_v2(pl.LightningModule):
def __init__(self,
in_chans = 3,
out_chans = 1,
modes = [12, 12, 12, 12],
depths = [3,3,9,3],
dims = [36, 36, 32, 34],
drop_path_rate = 0.,
drop = 0.,
head_init_scale=1.,
padding=9,
with_grid = True,
loss = "rel_l2",
learning_rate = 1e-3,
step_size= 100,
gamma= 0.5,
weight_decay= 1e-5,
):
super().__init__()
if loss == 'l1':
self.criterion = nn.L1Loss()
elif loss == 'l2':
self.criterion = nn.MSELoss()
elif loss == 'smooth_l1':
self.criterion = nn.SmoothL1Loss()
elif loss == "rel_l2":
self.criterion =LpLoss()
self.with_grid = with_grid
self.padding = padding
self.learning_rate = learning_rate
self.step_size = step_size
self.gamma = gamma
self.weight_decay = weight_decay
self.lifting_layers = nn.ModuleList()
steam = nn.Conv2d(in_chans, dims[0], 1,1)
self.lifting_layers.append(steam)
for i in range(3):
lifting_layers = nn.Sequential(
LayerNorm(dims[i], eps=1e-6, data_format= "channels_first"),
nn.Conv2d(dims[i], dims[i+1], kernel_size = 1, stride = 1)
)
self.lifting_layers.append(lifting_layers)
self.stages = nn.ModuleList()
dp_rates = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))]
cur = 0
for i in range(4):
stage = nn.Sequential(
*[IO_ResNetblock(features_=dims[i],
wavenumber=[modes[i]]*2,
drop_path=dp_rates[cur + j],
drop =drop) for j in range(depths[i])]
)
self.stages.append(stage)
cur += depths[i]
self.head = nn.Conv2d(dims[-1], out_chans,1,1)
def forward_features(self, x):
x=x.permute(0,3,1,2).contiguous()
x = self.lifting_layers[0](x)
x = F.pad(x, [0,self.padding, 0, self.padding])
for i in range(1,4):
x = self.lifting_layers[i](x)
x = self.stages[i](x)
x = x[..., :-self.padding, :-self.padding]
return x
def forward(self, x):
if self.with_grid:
grid = get_grid2D(x.shape, x.device)
x = torch.cat((x, grid), dim=-1)
del grid
x = self.forward_features(x)
x = self.head(x)
return x
def training_step(self, batch: torch.Tensor, batch_idx):
# training_step defines the train loop.
# it is independent of forward
x, y = batch
batch_size = x.shape[0]
out= self(x)
loss = self.criterion(out.view(batch_size,-1), y.view(batch_size,-1))
self.log("loss", loss, on_epoch=True, prog_bar=True, logger=True)
return loss
def validation_step(self, val_batch: torch.Tensor, batch_idx):
x, y = val_batch
batch_size = x.shape[0]
out= self(x)
val_loss = self.criterion(out.view(batch_size,-1), y.view(batch_size,-1))
self.log('val_loss', val_loss, on_epoch=True, prog_bar=True, logger=True)
return val_loss
def configure_optimizers(self):
optimizer = optim.AdamW(self.parameters(), lr=self.learning_rate, weight_decay=self.weight_decay)
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=self.step_size, gamma=self.gamma)
return {
"optimizer": optimizer,
"lr_scheduler": {
"scheduler": scheduler
},
}
#######################################
# Ensemble of the sFNO_epsilon_v2_proj
# the only diff. is in allowing the projection
# to be taken as an input
#######################################
class sFNO_epsilon_v2_proj(pl.LightningModule):
def __init__(self,
in_chans = 3,
proj = None,
modes = [12, 12, 12, 12],
depths = [3,3,9,3],
dims = [36, 36, 32, 34],
drop_path_rate = 0.,
drop = 0.,
head_init_scale=1.,
padding=9,
with_grid = True,
loss = "rel_l2",
learning_rate = 1e-3,
step_size= 100,
gamma= 0.5,
weight_decay= 1e-5,
):
super().__init__()
if loss == 'l1':
self.criterion = nn.L1Loss()
elif loss == 'l2':
self.criterion = nn.MSELoss()
elif loss == 'smooth_l1':
self.criterion = nn.SmoothL1Loss()
elif loss == "rel_l2":
self.criterion =LpLoss()
self.with_grid = with_grid
self.padding = padding
self.learning_rate = learning_rate
self.step_size = step_size
self.gamma = gamma
self.weight_decay = weight_decay
self.lifting_layers = nn.ModuleList()
steam = nn.Conv2d(in_chans, dims[0], 1,1)
self.lifting_layers.append(steam)
for i in range(3):
lifting_layers = nn.Sequential(
LayerNorm(dims[i], eps=1e-6, data_format= "channels_first"),
nn.Conv2d(dims[i], dims[i+1], kernel_size = 1, stride = 1)
)
self.lifting_layers.append(lifting_layers)
self.stages = nn.ModuleList()
dp_rates = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))]
cur = 0
for i in range(4):
stage = nn.Sequential(
*[IO_ResNetblock(features_=dims[i],
wavenumber=[modes[i]]*2,
drop_path=dp_rates[cur + j],
drop =drop) for j in range(depths[i])]
)
self.stages.append(stage)
cur += depths[i]
if proj is None:
self.proj = FC_nn([features_, features_//2, 1],
activation = "relu",
outermost_norm=False
)
else:
self.proj = proj
def forward_features(self, x):
x=x.permute(0,3,1,2).contiguous()
x = self.lifting_layers[0](x)
x = F.pad(x, [0,self.padding, 0, self.padding])
for i in range(1,4):
x = self.lifting_layers[i](x)
x = self.stages[i](x)
x = x[..., :-self.padding, :-self.padding]
return x
def forward(self, x):
if self.with_grid:
grid = get_grid2D(x.shape, x.device)
x = torch.cat((x, grid), dim=-1)
del grid
x = self.forward_features(x)
x = x.permute(0, 2, 3, 1 )
x = self.proj(x)
return x
def training_step(self, batch: torch.Tensor, batch_idx):
# training_step defines the train loop.
# it is independent of forward
x, y = batch
batch_size = x.shape[0]
out= self(x)
loss = self.criterion(out.view(batch_size,-1), y.view(batch_size,-1))
self.log("loss", loss, on_epoch=True, prog_bar=True, logger=True)
return loss
def validation_step(self, val_batch: torch.Tensor, batch_idx):
x, y = val_batch
batch_size = x.shape[0]
out= self(x)
val_loss = self.criterion(out.view(batch_size,-1), y.view(batch_size,-1))
self.log('val_loss', val_loss, on_epoch=True, prog_bar=True, logger=True)
return val_loss
def configure_optimizers(self, optimizer=None, scheduler=None):
if optimizer is None:
optimizer = optim.AdamW(self.parameters(), lr=self.learning_rate, weight_decay=self.weight_decay)
if scheduler is None:
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=self.step_size, gamma=self.gamma)
return {
"optimizer": optimizer,
"lr_scheduler": {
"scheduler": scheduler
},
}
| 10,276
| 35.967626
| 114
|
py
|
Fine-tuning-NOs
|
Fine-tuning-NOs-master/models/FNO_residual.py
|
import pytorch_lightning as pl
import torch
from torch import optim, nn
from .FNO import fourier_conv_2d
from .basics_model import LayerNorm, get_grid2D, FC_nn, set_activ
import torch.nn.functional as F
from utilities import LpLoss
from timm.models.layers import DropPath
#######################################
# Integral Operator Layer
#######################################
class IO_layer(nn.Module):
def __init__(self, features_,
wavenumber,
drop = 0.,
activation = "relu"):
super().__init__()
self.W = nn.Conv2d(features_, features_, 1)
self.IO = fourier_conv_2d(features_, features_,*wavenumber)
self.act = set_activ(activation)
self.dropout = nn.Dropout(drop)
def forward(self, x):
x = self.IO(x)+self.W(x)
x = self.dropout(x)
x = self.act(x)
return x
class FNO_residual_Block(nn.Module):
def __init__(self, features_,
wavenumber,
drop = 0.,
drop_path= 0.,
activation = "relu"):
super().__init__()
self.IO = IO_layer(features_=features_,
wavenumber=wavenumber,
drop= drop,
activation= activation)
self.act = set_activ(activation)
self.norm1 = LayerNorm(features_, eps=1e-5, data_format = "channels_first")
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
def forward(self, x):
input = x
x = self.norm1(x)
x = self.IO(x)
x =input+self.drop_path(x) #NonLocal Layers
return x
#######################################
# Ensemble of the sFNO_epsilon_v1
#######################################
class FNO_residual(pl.LightningModule):
def __init__(self,
wavenumber, features_,
padding = 9,
lifting = None,
proj = None,
dim_input = 1,
with_grid= True,
loss = "rel_l2",
learning_rate = 1e-2,
step_size= 100,
gamma= 0.5,
weight_decay= 1e-5,
drop = 0.,
drop_path= 0.,
activation = "relu"
):
super().__init__()
self.with_grid = with_grid
self.padding = padding
self.layers = len(wavenumber)
self.learning_rate = learning_rate
self.step_size = step_size
self.gamma = gamma
self.weight_decay = weight_decay
if loss == 'l1':
self.criterion = nn.L1Loss()
elif loss == 'l2':
self.criterion = nn.MSELoss()
elif loss == 'smooth_l1':
self.criterion = nn.SmoothL1Loss()
elif loss == "rel_l2":
self.criterion =LpLoss()
if with_grid == True:
dim_input+=2
if lifting is None:
self.lifting = FC_nn([dim_input, features_//2, features_],
activation = "relu",
outermost_norm=False
)
else:
self.lifting = lifting
if proj is None:
self.proj = FC_nn([features_, features_//2, 1],
activation = "relu", drop = drop,
outermost_norm=False
)
else:
self.proj = proj
self.no = []
self.dp_rates = [x.item() for x in torch.linspace(0, drop_path, self.layers )]
for l in range(self.layers):
self.no.append(FNO_residual_Block(features_ = features_,
wavenumber=[wavenumber[l]]*2,
drop= drop,
drop_path= self.dp_rates[l],
activation=activation))
self.no =nn.Sequential(*self.no)
def forward(self, x: torch.Tensor):
if self.with_grid == True:
grid = get_grid2D(x.shape, x.device)
x = torch.cat((x, grid), dim=-1)
x = self.lifting(x)
x = x.permute(0, 3, 1, 2)
x = F.pad(x, [0,self.padding, 0,self.padding])
x = self.no(x)
x = x[..., :-self.padding, :-self.padding]
x = x.permute(0, 2, 3, 1 )
x =self.proj(x)
return x
def training_step(self, batch: torch.Tensor, batch_idx):
# training_step defines the train loop.
# it is independent of forward
x, y = batch
batch_size = x.shape[0]
out= self(x)
loss = self.criterion(out.view(batch_size,-1), y.view(batch_size,-1))
self.log("loss", loss, on_epoch=True, prog_bar=True, logger=True)
return loss
def validation_step(self, val_batch: torch.Tensor, batch_idx):
x, y = val_batch
batch_size = x.shape[0]
out= self(x)
val_loss = self.criterion(out.view(batch_size,-1), y.view(batch_size,-1))
self.log('val_loss', val_loss, on_epoch=True, prog_bar=True, logger=True)
return val_loss
def configure_optimizers(self, optimizer=None, scheduler=None):
if optimizer is None:
optimizer = optim.AdamW(self.parameters(), lr=self.learning_rate, weight_decay=self.weight_decay)
if scheduler is None:
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=self.step_size, gamma=self.gamma)
return {
"optimizer": optimizer,
"lr_scheduler": {
"scheduler": scheduler
},
}
| 5,887
| 34.46988
| 109
|
py
|
Fine-tuning-NOs
|
Fine-tuning-NOs-master/models/basics_model.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
##########################################
# Fully connected Layer
##########################################
class FCLayer(nn.Module):
"""Fully connected layer """
def __init__(self, in_feature, out_feature,
activation = "gelu",
is_normalized = True):
super().__init__()
if is_normalized:
self.LinearBlock = nn.Sequential(
nn.Linear(in_feature,out_feature),
LayerNorm(out_feature),
)
else:
self.LinearBlock = nn.Linear(in_feature,out_feature)
self.act = set_activ(activation)
def forward(self, x):
return self.act(self.LinearBlock(x))
##########################################
# Fully connected Block
##########################################
class FC_nn(nn.Module):
r"""Simple MLP to code lifting and projection"""
def __init__(self, sizes = [2, 128, 128, 1],
activation = 'relu',
outermost_linear = True,
outermost_norm = True,
drop = 0.):
super().__init__()
self.dropout = nn.Dropout(drop)
self.net = nn.ModuleList([FCLayer(in_feature= m, out_feature= n,
activation=activation,
is_normalized = False)
for m, n in zip(sizes[:-2], sizes[1:-1])
])
if outermost_linear == True:
self.net.append(FCLayer(sizes[-2],sizes[-1], activation = None,
is_normalized = outermost_norm))
else:
self.net.append(FCLayer(in_feature= sizes[-2], out_feature= sizes[-1],
activation=activation,
is_normalized = outermost_norm))
def forward(self,x):
for module in self.net:
x = module(x)
x = self.dropout(x)
return x
###### Inverse Bottleneck ########
class MLP_inv_bottleneck(nn.Module):
"""Inverse Bottleneck MLP"""
def __init__(self, dim, activation = 'gelu'):
super().__init__()
self.nonlinear = set_activ(activation)
self.L1 = nn.Linear(dim, 4*dim)
self.L2 = nn.Linear(4*dim, dim)
def forward(self,x):
x = self.L1(x)
x = self.nonlinear(x)
x = self.L2(x)
return x
########## Simple MLP ##############
class MLP_join(nn.Module):
"""Simple MLP to code lifting and projection"""
def __init__(self, sizes = [1, 128, 128, 1], activation = 'gelu', drop = 0.):
super(MLP_join, self).__init__()
self.hidden_layer = sizes
self.nonlinear = set_activ(activation)
self.dropout = nn.Dropout(drop)
self.net = nn.ModuleList([nn.Linear(m, n)
for m, n in zip(sizes[:-1], sizes[1:])
])
def forward(self,x):
for module in self.net[:-1]:
x = module(x)
x = self.nonlinear(x)
x = self.dropout(x)
return self.net[-1](x)
########## Layer Normalization ##############
class LayerNorm(nn.Module):
r""" LayerNorm that supports two data formats: channels_last (default) or channels_first.
The ordering of the dimensions in the inputs. channels_last corresponds to inputs with
shape (batch_size, height, width, channels) while channels_first corresponds to inputs
with shape (batch_size, channels, height, width).
"""
def __init__(self, normalized_shape, eps=1e-6, data_format="channels_last"):
super().__init__()
self.weight = nn.Parameter(torch.ones(normalized_shape))
self.bias = nn.Parameter(torch.zeros(normalized_shape))
self.eps = eps
self.data_format = data_format
if self.data_format not in ["channels_last", "channels_first"]:
raise NotImplementedError
self.normalized_shape = (normalized_shape, )
def forward(self, x):
if self.data_format == "channels_last":
return F.layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps)
elif self.data_format == "channels_first":
u = x.mean(1, keepdim=True)
s = (x - u).pow(2).mean(1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.eps)
x = self.weight[:, None, None] * x + self.bias[:, None, None]
return x
######################################################################################
# new additions over the main code
class GroupNorm(nn.GroupNorm):
"""
Group Normalization with 1 group.
Input: tensor in shape [B, C, H, W]
"""
def __init__(self, num_channels, **kwargs):
super().__init__(1, num_channels, **kwargs)
######################################################################################
# Miscellaneous functions
######################################################################################
########## Getting the 2D grid using the batch
def get_grid2D(shape, device):
batchsize, size_x, size_y = shape[0], shape[1], shape[2]
gridx = torch.tensor(np.linspace(0, 1, size_x), dtype=torch.float)
gridx = gridx.reshape(1, size_x, 1, 1).repeat([batchsize, 1, size_y, 1])
gridy = torch.tensor(np.linspace(0, 1, size_y), dtype=torch.float)
gridy = gridy.reshape(1, 1, size_y, 1).repeat([batchsize, size_x, 1, 1])
return torch.cat((gridx, gridy), dim=-1).to(device)
########## Set automatically the activation function for the NN
def set_activ(activation):
if activation is not None:
activation = activation.lower()
if activation == 'relu':
nonlinear = F.relu
elif activation == "leaky_relu":
nonlinear = F.leaky_relu
elif activation == 'tanh':
nonlinear = F.tanh
elif activation == 'sine':
nonlinear= torch.sin
elif activation == 'gelu':
nonlinear= F.gelu
elif activation == 'elu':
nonlinear = F.elu_
elif activation == None:
nonlinear = nn.Identity()
else:
raise Exception('The activation is not recognized from the list')
return nonlinear
| 6,354
| 38.228395
| 94
|
py
|
Fine-tuning-NOs
|
Fine-tuning-NOs-master/models/FNO.py
|
import pytorch_lightning as pl
import torch
from torch import optim, nn
from .basics_model import get_grid2D, set_activ, FC_nn
from utilities import LpLoss
#######################################
# Fourier Convolution,
# \int_D k(x-y) v(y) dy
# = \mathcal{F}^{-1}(P \mathcal{F}(v))
#######################################
class fourier_conv_2d(nn.Module):
def __init__(self, in_, out_, wavenumber1, wavenumber2):
super(fourier_conv_2d, self).__init__()
self.out_ = out_
self.wavenumber1 = wavenumber1
self.wavenumber2 = wavenumber2
scale = (1 / (in_ * out_))
self.weights1 = nn.Parameter(scale * torch.rand(in_, out_, wavenumber1, wavenumber2, dtype=torch.cfloat))
self.weights2 = nn.Parameter(scale * torch.rand(in_, out_, wavenumber1, wavenumber2, dtype=torch.cfloat))
# Complex multiplication
def compl_mul2d(self, input, weights):
# (batch, in_channel, x,y ), (in_channel, out_channel, x,y) -> (batch, out_channel, x,y)
return torch.einsum("bixy,ioxy->boxy", input, weights)
def forward(self, x):
batchsize = x.shape[0]
#Compute Fourier coeffcients up to factor of e^(- something constant)
x_ft = torch.fft.rfft2(x)
# Multiply relevant Fourier modes
out_ft = torch.zeros(batchsize, self.out_, x.size(-2), x.size(-1)//2 + 1, dtype=torch.cfloat, device=x.device)
out_ft[:, :, :self.wavenumber1, :self.wavenumber2] = \
self.compl_mul2d(x_ft[:, :, :self.wavenumber1, :self.wavenumber2], self.weights1)
out_ft[:, :, -self.wavenumber1:, :self.wavenumber2] = \
self.compl_mul2d(x_ft[:, :, -self.wavenumber1:, :self.wavenumber2], self.weights2)
#Return to physical space
x = torch.fft.irfft2(out_ft, s=(x.size(-2), x.size(-1)))
return x
#######################################
# Fourier Layer:
# \sigma( Wx + FourierConv(x))
#######################################
class Fourier_layer(nn.Module):
def __init__(self, features_, wavenumber, activation = 'relu', is_last = False):
super(Fourier_layer, self).__init__()
self.W = nn.Conv2d(features_, features_, 1)
self.fourier_conv = fourier_conv_2d(features_, features_ , *wavenumber)
if is_last== False:
self.act = set_activ(activation)
else:
self.act = set_activ(None)
def forward(self, x):
x1 = self.fourier_conv(x)
x2 = self.W(x)
return self.act(x1 + x2)
#######################################
# FNO: Ensemble of the FNO
#######################################
class FNO(pl.LightningModule):
def __init__(self,
wavenumber, features_,
padding = 9,
activation= 'relu',
lifting = None,
proj = None,
dim_input = 1,
with_grid= True,
loss = "rel_l2",
learning_rate = 1e-2,
step_size= 100,
gamma= 0.5,
weight_decay= 1e-5,
):
super(FNO, self).__init__()
self.with_grid = with_grid
self.padding = padding
self.layers = len(wavenumber)
self.learning_rate = learning_rate
self.step_size = step_size
self.gamma = gamma
self.weight_decay = weight_decay
if loss == 'l1':
self.criterion = nn.L1Loss()
elif loss == 'l2':
self.criterion = nn.MSELoss()
elif loss == 'smooth_l1':
self.criterion = nn.SmoothL1Loss()
elif loss == "rel_l2":
self.criterion =LpLoss()
if with_grid == True:
dim_input+=2
if lifting is None:
self.lifting = FC_nn([dim_input, features_//2, features_],
activation = "relu",
outermost_norm=False
)
else:
self.lifting = lifting
if proj is None:
self.proj = FC_nn([features_, features_//2, 1],
activation = "relu",
outermost_norm=False
)
else:
self.proj = proj
self.fno = []
for l in range(self.layers-1):
self.fno.append(Fourier_layer(features_ = features_,
wavenumber=[wavenumber[l]]*2,
activation = activation))
self.fno.append(Fourier_layer(features_=features_,
wavenumber=[wavenumber[-1]]*2,
activation = activation,
is_last= True))
self.fno =nn.Sequential(*self.fno)
def forward(self, x: torch.Tensor):
if self.with_grid == True:
grid = get_grid2D(x.shape, x.device)
x = torch.cat((x, grid), dim=-1)
x = self.lifting(x)
x = x.permute(0, 3, 1, 2)
x = nn.functional.pad(x, [0,self.padding, 0,self.padding])
x = self.fno(x)
x = x[..., :-self.padding, :-self.padding]
x = x.permute(0, 2, 3, 1 )
x =self.proj(x)
return x
def training_step(self, batch: torch.Tensor, batch_idx):
# training_step defines the train loop.
# it is independent of forward
x, y = batch
batch_size = x.shape[0]
out= self(x)
loss = self.criterion(out.view(batch_size,-1), y.view(batch_size,-1))
self.log("loss", loss, on_epoch=True, prog_bar=True, logger=True)
return loss
def validation_step(self, val_batch: torch.Tensor, batch_idx):
x, y = val_batch
batch_size = x.shape[0]
out= self(x)
val_loss = self.criterion(out.view(batch_size,-1), y.view(batch_size,-1))
self.log('val_loss', val_loss, on_epoch=True, prog_bar=True, logger=True)
return val_loss
def configure_optimizers(self, optimizer=None, scheduler=None):
if optimizer is None:
optimizer = optim.AdamW(self.parameters(), lr=self.learning_rate, weight_decay=self.weight_decay)
if scheduler is None:
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=self.step_size, gamma=self.gamma)
return {
"optimizer": optimizer,
"lr_scheduler": {
"scheduler": scheduler
},
}
| 6,612
| 39.078788
| 119
|
py
|
Fine-tuning-NOs
|
Fine-tuning-NOs-master/models/sFNO_epsilon_v1.py
|
import pytorch_lightning as pl
import torch
from torch import optim, nn
from .FNO import fourier_conv_2d
from .basics_model import LayerNorm, get_grid2D, FC_nn, set_activ
import torch.nn.functional as F
from utilities import LpLoss
from timm.models.layers import DropPath
#######################################
# Integral Operator Layer
#######################################
class IO_layer(nn.Module):
def __init__(self, features_,
wavenumber,
drop = 0.,
activation = "relu"):
super().__init__()
self.W = nn.Conv2d(features_, features_, 1)
self.IO = fourier_conv_2d(features_, features_,*wavenumber)
self.act = set_activ(activation)
self.dropout = nn.Dropout(drop)
def forward(self, x):
x = self.IO(x)+self.W(x)
x = self.dropout(x)
x = self.act(x)
return x
class MetaFormerNO_Block(nn.Module):
def __init__(self, features_,
wavenumber,
drop = 0.,
drop_path= 0.,
activation = "relu"):
super().__init__()
self.IO = IO_layer(features_=features_,
wavenumber=wavenumber,
drop= drop,
activation = activation)
self.norm1 = LayerNorm(features_, eps=1e-5, data_format = "channels_first")
self.norm2 = LayerNorm(features_, eps=1e-5, data_format = "channels_last")
self.pwconv1 = nn.Linear(features_, 4*features_) # pointwise/1x1 convs, implemented with linear layers
self.act = set_activ(activation) if activation is not None else set_activ("gelu")
self.pwconv2 = nn.Linear(4*features_, features_)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
def forward(self, x):
input = x
x = self.norm1(x)
x = self.IO(x)
x =input+self.drop_path(x) #NonLocal Layers
input = x
x = x.permute(0, 2, 3, 1)# (N, C, H, W)-> (N, H, W, C)
x = self.norm2(x)
x = self.pwconv1(x)
x = self.act(x)
x = self.pwconv2(x)
x = x.permute(0, 3, 1, 2) # (N, H, W, C) -> (N, C, H, W)
x = input + self.drop_path(x)
return x
#######################################
# Ensemble of the sFNO_epsilon_v1
#######################################
class sFNO_epsilon_v1(pl.LightningModule):
def __init__(self,
wavenumber, features_,
padding = 9,
lifting = None,
proj = None,
dim_input = 1,
with_grid= True,
loss = "rel_l2",
learning_rate = 1e-2,
step_size= 100,
gamma= 0.5,
weight_decay= 1e-5,
drop = 0.,
drop_path= 0.,
activation = "relu"
):
super().__init__()
self.with_grid = with_grid
self.padding = padding
self.layers = len(wavenumber)
self.learning_rate = learning_rate
self.step_size = step_size
self.gamma = gamma
self.weight_decay = weight_decay
if loss == 'l1':
self.criterion = nn.L1Loss()
elif loss == 'l2':
self.criterion = nn.MSELoss()
elif loss == 'smooth_l1':
self.criterion = nn.SmoothL1Loss()
elif loss == "rel_l2":
self.criterion =LpLoss()
if with_grid == True:
dim_input+=2
if lifting is None:
self.lifting = FC_nn([dim_input, features_//2, features_],
activation = "relu",
outermost_norm=False
)
else:
self.lifting = lifting
if proj is None:
self.proj = FC_nn([features_, features_//2, 1],
activation = "relu", drop = drop,
outermost_norm=False
)
else:
self.proj = proj
self.no = []
self.dp_rates = [x.item() for x in torch.linspace(0, drop_path, self.layers )]
for l in range(self.layers):
self.no.append(MetaFormerNO_Block(features_ = features_,
wavenumber=[wavenumber[l]]*2,
drop= drop,
drop_path= self.dp_rates[l],
activation=activation))
self.no =nn.Sequential(*self.no)
def forward(self, x: torch.Tensor):
if self.with_grid == True:
grid = get_grid2D(x.shape, x.device)
x = torch.cat((x, grid), dim=-1)
x = self.lifting(x)
x = x.permute(0, 3, 1, 2)
x = F.pad(x, [0,self.padding, 0,self.padding])
x = self.no(x)
x = x[..., :-self.padding, :-self.padding]
x = x.permute(0, 2, 3, 1 )
x =self.proj(x)
return x
def training_step(self, batch: torch.Tensor, batch_idx):
# training_step defines the train loop.
# it is independent of forward
x, y = batch
batch_size = x.shape[0]
out= self(x)
loss = self.criterion(out.view(batch_size,-1), y.view(batch_size,-1))
self.log("loss", loss, on_epoch=True, prog_bar=True, logger=True)
return loss
def validation_step(self, val_batch: torch.Tensor, batch_idx):
x, y = val_batch
batch_size = x.shape[0]
out= self(x)
val_loss = self.criterion(out.view(batch_size,-1), y.view(batch_size,-1))
self.log('val_loss', val_loss, on_epoch=True, prog_bar=True, logger=True)
return val_loss
def configure_optimizers(self, optimizer=None, scheduler=None):
if optimizer is None:
optimizer = optim.AdamW(self.parameters(), lr=self.learning_rate, weight_decay=self.weight_decay)
if scheduler is None:
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=self.step_size, gamma=self.gamma)
return {
"optimizer": optimizer,
"lr_scheduler": {
"scheduler": scheduler
},
}
| 6,482
| 35.627119
| 110
|
py
|
Fine-tuning-NOs
|
Fine-tuning-NOs-master/models/sFNO.py
|
import pytorch_lightning as pl
import torch
from torch import optim, nn
from .FNO import fourier_conv_2d
from .basics_model import LayerNorm, get_grid2D, FC_nn, set_activ
import torch.nn.functional as F
from utilities import LpLoss
#######################################
# Integral Operator Layer
#######################################
class IO_layer(nn.Module):
def __init__(self, features_,
wavenumber,
drop = 0.,
activation = "relu"):
super().__init__()
self.W = nn.Conv2d(features_, features_, 1)
self.IO = fourier_conv_2d(features_, features_,*wavenumber)
self.act = set_activ(activation)
self.dropout = nn.Dropout(drop)
def forward(self, x):
x = self.IO(x)+self.W(x)
x = self.dropout(x)
x = self.act(x)
return x
#######################################
# Integral Operator Block
#######################################
class IO_Block(nn.Module):
def __init__(self, features_,
wavenumber,
drop = 0.,
activation = "relu"):
super().__init__()
self.IO = IO_layer(features_=features_,
wavenumber=wavenumber,
drop= drop,
activation = activation)
self.pwconv1 = nn.Linear(features_, 4*features_) # pointwise/1x1 convs, implemented with linear layers
self.act = set_activ(activation) if activation is not None else set_activ("gelu")
self.norm = nn.LayerNorm(features_, eps=1e-5)
self.pwconv2 = nn.Linear(4*features_, features_) #
def forward(self, x):
x =(self.IO(x)).permute(0,2,3,1) #B C W H -> B W H C
x = self.pwconv1(x)
x = self.act(x)
x = self.pwconv2(x)
x = self.norm(x)
x = x.permute(0, 3, 1, 2)
return x
#######################################
# sFNO: Ensemble of the sFNO
#######################################
class sFNO(pl.LightningModule):
def __init__(self,
wavenumber, features_,
padding = 9,
lifting = None,
proj = None,
dim_input = 1,
with_grid= True,
loss = "rel_l2",
learning_rate = 1e-2,
step_size= 100,
gamma= 0.5,
weight_decay= 1e-5,
drop = 0.,
activation = "relu"
):
super().__init__()
self.with_grid = with_grid
self.padding = padding
self.layers = len(wavenumber)
self.learning_rate = learning_rate
self.step_size = step_size
self.gamma = gamma
self.weight_decay = weight_decay
if loss == 'l1':
self.criterion = nn.L1Loss()
elif loss == 'l2':
self.criterion = nn.MSELoss()
elif loss == 'smooth_l1':
self.criterion = nn.SmoothL1Loss()
elif loss == "rel_l2":
self.criterion =LpLoss()
if with_grid == True:
dim_input+=2
if lifting is None:
self.lifting = FC_nn([dim_input, features_//2, features_],
activation = "relu",
outermost_norm=False
)
else:
self.lifting = lifting
if proj is None:
self.proj = FC_nn([features_, features_//2, 1],
activation = "relu", drop = drop,
outermost_norm=False
)
else:
self.proj = proj
self.fno = []
for l in range(self.layers):
self.fno.append(IO_Block(features_ = features_,
wavenumber=[wavenumber[l]]*2,
drop= drop,
activation= activation))
self.fno =nn.Sequential(*self.fno)
def forward(self, x: torch.Tensor):
if self.with_grid == True:
grid = get_grid2D(x.shape, x.device)
x = torch.cat((x, grid), dim=-1)
x = self.lifting(x)
x = x.permute(0, 3, 1, 2)
x = F.pad(x, [0,self.padding, 0,self.padding])
x = self.fno(x)
x = x[..., :-self.padding, :-self.padding]
x = x.permute(0, 2, 3, 1 )
x =self.proj(x)
return x
def training_step(self, batch: torch.Tensor, batch_idx):
# training_step defines the train loop.
# it is independent of forward
x, y = batch
batch_size = x.shape[0]
out= self(x)
loss = self.criterion(out.view(batch_size,-1), y.view(batch_size,-1))
self.log("loss", loss, on_epoch=True, prog_bar=True, logger=True)
return loss
def validation_step(self, val_batch: torch.Tensor, batch_idx):
x, y = val_batch
batch_size = x.shape[0]
out= self(x)
val_loss = self.criterion(out.view(batch_size,-1), y.view(batch_size,-1))
self.log('val_loss', val_loss, on_epoch=True, prog_bar=True, logger=True)
return val_loss
def configure_optimizers(self, optimizer=None, scheduler=None):
if optimizer is None:
optimizer = optim.AdamW(self.parameters(), lr=self.learning_rate, weight_decay=self.weight_decay)
if scheduler is None:
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=self.step_size, gamma=self.gamma)
return {
"optimizer": optimizer,
"lr_scheduler": {
"scheduler": scheduler
},
}
| 5,862
| 35.64375
| 110
|
py
|
Fine-tuning-NOs
|
Fine-tuning-NOs-master/models/sFNO_epsilon_v2_updated.py
|
import pytorch_lightning as pl
import torch
from torch import optim, nn
from .FNO import fourier_conv_2d
from .basics_model import LayerNorm, get_grid2D, set_activ, GroupNorm
import torch.nn.functional as F
from utilities import LpLoss
from timm.models.layers import DropPath, trunc_normal_
import os
from .sFNO_epsilon_v1 import IO_layer
class Mlp(nn.Module):
"""
Implementation of MLP with 1*1 convolutions.
Input: tensor with shape [B, C, H, W]
"""
def __init__(self, in_features,
hidden_features=None,
out_features=None,
activation = "leaky_relu"):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Conv2d(in_features, hidden_features, 1)
self.act = set_activ(activation)
self.fc2 = nn.Conv2d(hidden_features, out_features, 1)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Conv2d):
trunc_normal_(m.weight, std=.02)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.fc2(x)
return x
#######################################
# Transformer look-alike block with Neural Operators
#######################################
class NOFormerBlock(nn.Module):
def __init__(self, features_,
wavenumber,
drop = 0.,
drop_path= 0.,
activation = "leaky_relu",
use_layer_scale=True,
layer_scale_init_value=1e-5,
norm_layer=GroupNorm,
mlp_ratio=4):
super().__init__()
self.IO = IO_layer(features_=features_,
wavenumber=wavenumber,
drop= drop,
activation = activation)
self.norm1 = norm_layer(features_)
self.norm2 = norm_layer(features_)
self.act = set_activ(activation) if activation is not None else set_activ("gelu")
mlp_hidden_features = int(features_ * mlp_ratio)
self.mlp = Mlp(in_features=features_,
hidden_features=mlp_hidden_features,
activation=activation)
self.drop_path= DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.use_layer_scale = use_layer_scale
if use_layer_scale:
self.layer_scale_1= nn.Parameter(torch.ones((features_))*layer_scale_init_value, requires_grad=True)
self.layer_scale_2= nn.Parameter(torch.ones((features_))*layer_scale_init_value, requires_grad=True)
def forward(self, x):
if self.use_layer_scale:
x = x+ self.drop_path(self.layer_scale_1.unsqueeze(-1).unsqueeze(-1)*self.IO(self.norm1(x)))
x = x+ self.drop_path(self.layer_scale_2.unsqueeze(-1).unsqueeze(-1)*self.mlp(self.norm2(x)))
else:
x = x+ self.drop_path(self.IO(self.norm1(x)))
x = x+ self.drop_path(self.mlp(self.norm2(x)))
return x
#######################################
class sFNO_epsilon_v2_updated(pl.LightningModule):
def __init__(self,
stage_list,
features_stage_list,
wavenumber_stage_list,
dim_input = None,
dim_output = None,
proj= None,
lifting=None,
activation="leaky_relu",
norm_layer=GroupNorm,
drop_rate= 0.,
drop_path_rate= 0.,
use_layer_scale=True,
layer_scale_init_value=1e-5,
with_grid=True,
padding=9,
loss = "rel_l2",
learning_rate = 1e-3,
step_size= 70,
gamma= 0.5,
weight_decay= 1e-5,
mlp_ratio=4):
super().__init__()
self.save_hyperparameters()
if loss == 'l1':
self.criterion = nn.L1Loss()
elif loss == 'l2':
self.criterion = nn.MSELoss()
elif loss == 'smooth_l1':
self.criterion = nn.SmoothL1Loss()
elif loss == "rel_l2":
self.criterion =LpLoss()
self.padding = padding
self.with_grid = with_grid
self.padding = padding
self.learning_rate = learning_rate
self.step_size = step_size
self.gamma = gamma
self.weight_decay = weight_decay
if with_grid == True:
dim_input+=2
if lifting is None:
self.lifting = Mlp(in_features=dim_input,
out_features=features_stage_list[0],
hidden_features=features_stage_list[0],
activation=activation)
else:
self.lifting = lifting
if proj is None:
self.proj = Mlp(in_features=features_stage_list[-1],
out_features=dim_output,
hidden_features=features_stage_list[-1],
activation=activation)
else:
self.proj = proj
assert len(features_stage_list) == len(wavenumber_stage_list) == len(stage_list)
network = []
dp_rates=[x.item() for x in torch.linspace(0, drop_path_rate, sum(stage_list))]
cur = 0
for i in range(len(stage_list)):
stage = self.Ensemble_stage(features_=features_stage_list[i],
index= i,
layers=stage_list,
wavenumber_stage=wavenumber_stage_list[i],
mlp_ratio=mlp_ratio,
activation=activation,
norm_layer=norm_layer,
drop_rate=drop_rate,
drop_path_rate=dp_rates[cur:cur+stage_list[i]],
use_layer_scale=use_layer_scale,
layer_scale_init_value=layer_scale_init_value)
network.append(stage)
cur += stage_list[i]
self.network = nn.ModuleList(network)
#######################################
def Ensemble_stage(self, features_,
index,
layers,
wavenumber_stage,
mlp_ratio,
activation,
norm_layer,
drop_rate,
drop_path_rate,
use_layer_scale,
layer_scale_init_value,
):
"""
generate the ensemble of blocks
return: NOFormerBlock
"""
blocks = []
for j in range(layers[index]):
blocks.append(NOFormerBlock(features_= features_,
wavenumber= [wavenumber_stage]*2,
norm_layer=norm_layer,
drop= drop_rate,
drop_path= drop_path_rate[j],
use_layer_scale=use_layer_scale,
layer_scale_init_value=layer_scale_init_value,
mlp_ratio=mlp_ratio,
activation=activation))
blocks = nn.Sequential(*blocks)
return blocks
def forward_NOFormer(self, x):
"""
forward the NOFormer
"""
for stage in self.network:
x = stage(x)
return x
def add_grid(self, x):
"""
add grid to the input
"""
grid = get_grid2D(x.shape, x.device)
x = torch.cat((x, grid), dim=-1)
del grid
return x
def forward(self, x):
if self.with_grid == True:
x = self.add_grid(x)
x = self.lifting(x.permute(0, 3, 1, 2))
x = F.pad(x, [0,self.padding, 0,self.padding])
x = self.forward_NOFormer(x)
x =x[..., :-self.padding, :-self.padding]
x = self.proj(x)
return x.permute(0, 2, 3, 1)
def training_step(self, batch: torch.Tensor, batch_idx):
# training_step defines the train loop.
# it is independent of forward
x, y = batch
batch_size = x.shape[0]
out= self(x)
loss = self.criterion(out.view(batch_size,-1), y.view(batch_size,-1))
self.log("loss", loss, on_epoch=True, prog_bar=True, logger=True)
return loss
def validation_step(self, val_batch: torch.Tensor, batch_idx):
x, y = val_batch
batch_size = x.shape[0]
out= self(x)
val_loss = self.criterion(out.view(batch_size,-1), y.view(batch_size,-1))
self.log('val_loss', val_loss, on_epoch=True, prog_bar=True, logger=True)
return val_loss
def test_step(self, test_batch: torch.Tensor, batch_idx):
x, y = test_batch
batch_size = x.shape[0]
out= self(x)
test_loss = self.criterion(out.view(batch_size,-1), y.view(batch_size,-1))
self.log('test_loss', test_loss, on_epoch=True, prog_bar=True, logger=True)
return test_loss
def configure_optimizers(self, optimizer=None, scheduler=None):
if optimizer is None:
optimizer = optim.AdamW(self.parameters(), lr=self.learning_rate, weight_decay=self.weight_decay)
if scheduler is None:
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=self.step_size, gamma=self.gamma)
return {
"optimizer": optimizer,
"lr_scheduler": {
"scheduler": scheduler
},
}
| 10,270
| 38.35249
| 112
|
py
|
Fine-tuning-NOs
|
Fine-tuning-NOs-master/models/__init__.py
|
from .basics_model import *
from .FNO import FNO
from .FNO_residual import FNO_residual
from .sFNO_epsilon_v2 import sFNO_epsilon_v2, sFNO_epsilon_v2_proj
from .sFNO import sFNO
from .sFNO_epsilon_v1 import sFNO_epsilon_v1
from .sFNO_epsilon_v2_updated import sFNO_epsilon_v2_updated
| 283
| 39.571429
| 66
|
py
|
Fine-tuning-NOs
|
Fine-tuning-NOs-master/make_graph/make_box_plots.py
|
import seaborn as sns
import pandas as pd
import matplotlib.pyplot as plt
import argparse
if __name__ == '__main__':
parser = argparse.ArgumentParser('Freq', add_help=False)
parser.add_argument('-f','--freq', type=int,
default=7)
parser.add_argument('-min','--min', type=float,
default=0.)
parser.add_argument('-max','--max', type=float,
default=0.5)
args=parser.parse_args()
df = pd.read_csv(f"make_graph/test_GRF_{args.freq}Hz.csv")
print(df)
fig, ax = plt.subplots(figsize=(10,6))
#ax.set_yscale("log")
''' box = sns.boxplot(
data=df,
notch=False, showcaps=True,
flierprops={"marker": "x"},
boxprops={"facecolor": (.4, .6, .8, .5)},
medianprops={"color": "coral"}, ax=ax, linewidth =2, orient = "h"
)'''
box = sns.violinplot(
data=df,
flierprops={"marker": "x"}, inner="stick",
palette= "pastel6", width=0.8,
ax=ax, linewidth =2.5, orient = "h",
scale="width",
)
#box.set(xscale="log")
ax.set_xlim([args.min, args.max])
# Tweak the visual presentation
ax.xaxis.grid(False)
ax.yaxis.grid(True)
sns.despine(trim=True, left=True)
box.set_title(f"Test Loss (rel. L2): Helmholtz {args.freq} Hz")
plt.savefig(f'make_graph/box_plot_test_loss_{args.freq}.png')
| 1,441
| 31.772727
| 73
|
py
|
Fine-tuning-NOs
|
Fine-tuning-NOs-master/utilities/model_factory.py
|
from models import *
def choosing_model(config):
c_nn = config["model"]
c_train = config["train"]
# 7 Hz data only contains the real part of the field
if config["Project"]["database"]=='GRF_7Hz':
if config["Project"]["name"] == "FNO":
model =FNO(
wavenumber = c_nn["modes_list"],
features_ = c_nn["features"],
learning_rate = c_train["lr"],
step_size= c_train["step_size"],
gamma= c_train["gamma"],
weight_decay= c_train["weight_decay"],
activation = c_nn["activ"]
)
elif config["Project"]["name"] == "sFNO":
model =sFNO(
wavenumber = c_nn["modes_list"],
drop = c_nn["drop"],
features_ = c_nn["features"],
learning_rate = c_train["lr"],
step_size= c_train["step_size"],
gamma= c_train["gamma"],
weight_decay= c_train["weight_decay"],
activation = c_nn["activ"]
)
elif config["Project"]["name"] == "FNO_residual":
model =FNO_residual(
wavenumber = c_nn["modes_list"],
drop = c_nn["drop"],
drop_path = c_nn["drop_path"],
features_ = c_nn["features"],
learning_rate = c_train["lr"],
step_size= c_train["step_size"],
gamma= c_train["gamma"],
weight_decay= c_train["weight_decay"],
activation = c_nn["activ"]
)
elif config["Project"]["name"] == "sFNO+epsilon_v1":
model =sFNO_epsilon_v1(
wavenumber = c_nn["modes_list"],
drop = c_nn["drop"],
drop_path = c_nn["drop_path"],
features_ = c_nn["features"],
learning_rate = c_train["lr"],
step_size= c_train["step_size"],
gamma= c_train["gamma"],
weight_decay= c_train["weight_decay"],
activation = c_nn["activ"]
)
elif config["Project"]["name"] == "sFNO+epsilon_v2":
model =sFNO_epsilon_v2(
modes = c_nn["modes_list"],
drop_path_rate = c_nn["drop_path"],
drop = c_nn["drop"],
depths = c_nn["depths"],
dims = c_nn["dims"],
learning_rate = c_train["lr"],
step_size= c_train["step_size"],
gamma= c_train["gamma"],
weight_decay= c_train["weight_decay"],
activation = c_nn["activ"]
)
# 12/15 Hz data only contains real and imaginary part of the field
elif config["Project"]["database"]==('GRF_12Hz') or ('GRF_15Hz'):
if config["Project"]["name"] == "FNO":
Proj = torch.nn.Linear(c_nn["features"], 2, dtype=torch.float)
model =FNO(
wavenumber = c_nn["modes_list"],
features_ = c_nn["features"],
learning_rate = c_train["lr"],
step_size= c_train["step_size"],
gamma= c_train["gamma"],
weight_decay= c_train["weight_decay"],
proj = Proj,
activation = c_nn["activ"]
)
elif config["Project"]["name"] == "sFNO":
Proj = torch.nn.Linear(c_nn["features"], 2, dtype=torch.float)
model =sFNO(
wavenumber = c_nn["modes_list"],
drop = c_nn["drop"],
features_ = c_nn["features"],
learning_rate = c_train["lr"],
step_size= c_train["step_size"],
gamma= c_train["gamma"],
weight_decay= c_train["weight_decay"],
proj = Proj,
activation = c_nn["activ"]
)
elif config["Project"]["name"] == "FNO_residual":
Proj = torch.nn.Linear(c_nn["features"], 2, dtype=torch.float)
model =FNO_residual(
wavenumber = c_nn["modes_list"],
drop = c_nn["drop"],
drop_path = c_nn["drop_path"],
features_ = c_nn["features"],
learning_rate = c_train["lr"],
step_size= c_train["step_size"],
gamma= c_train["gamma"],
weight_decay= c_train["weight_decay"],
proj = Proj,
activation = c_nn["activ"]
)
elif config["Project"]["name"] == "sFNO+epsilon_v1":
Proj = torch.nn.Linear(c_nn["features"], 2, dtype=torch.float)
model =sFNO_epsilon_v1(
wavenumber = c_nn["modes_list"],
drop = c_nn["drop"],
drop_path = c_nn["drop_path"],
features_ = c_nn["features"],
learning_rate = c_train["lr"],
step_size= c_train["step_size"],
gamma= c_train["gamma"],
weight_decay= c_train["weight_decay"],
proj = Proj,
activation = c_nn["activ"]
)
elif config["Project"]["name"] == "sFNO+epsilon_v2":
#sFNO_epsilon_v2_proj is the same arch as sFNO_epsilon_v2
Proj = torch.nn.Linear(c_nn["dims"][-1], 2, dtype=torch.float)
model =sFNO_epsilon_v2_proj(
modes = c_nn["modes_list"],
drop_path_rate = c_nn["drop_path"],
drop = c_nn["drop"],
depths = c_nn["depths"],
dims = c_nn["dims"],
learning_rate = c_train["lr"],
step_size= c_train["step_size"],
gamma= c_train["gamma"],
weight_decay= c_train["weight_decay"],
proj = Proj,
activation = c_nn["activ"]
)
elif config["Project"]["name"] == "sFNO+epsilon_v2_updated":
#sFNO_epsilon_v2_proj is the same arch as sFNO_epsilon_v2
#we just allow to have an independent projection layer.
#Proj = torch.nn.Linear(c_nn["dims"][-1], 2, dtype=torch.float)
model =sFNO_epsilon_v2_updated(
stage_list = c_nn["depths"],
features_stage_list = c_nn["dims"],
wavenumber_stage_list = c_nn["modes_list"],
dim_input = 1,
dim_output = 2,
#proj = Proj,
activation = c_nn["activ"],
drop_rate = c_nn["drop"],
drop_path_rate = c_nn["drop_path"],
learning_rate = c_train["lr"],
step_size= c_train["step_size"],
gamma= c_train["gamma"],
weight_decay= c_train["weight_decay"],
)
return model
| 7,309
| 42.254438
| 84
|
py
|
Fine-tuning-NOs
|
Fine-tuning-NOs-master/utilities/loss.py
|
import torch
#loss function with rel/abs Lp loss
class LpLoss(object):
def __init__(self, d=2, p=2, size_average=True, reduction=True):
super(LpLoss, self).__init__()
#Dimension and Lp-norm type are postive
assert d > 0 and p > 0
self.d = d
self.p = p
self.reduction = reduction
self.size_average = size_average
def abs(self, x, y):
num_examples = x.size()[0]
#Assume uniform mesh
h = 1.0 / (x.size()[1] - 1.0)
all_norms = (h**(self.d/self.p))*torch.norm(x.view(num_examples,-1) - y.view(num_examples,-1), self.p, 1)
if self.reduction:
if self.size_average:
return torch.mean(all_norms)
else:
return torch.sum(all_norms)
return all_norms
def rel(self, x, y):
num_examples = x.size()[0]
diff_norms = torch.norm(x.reshape(num_examples,-1) - y.reshape(num_examples,-1), self.p, 1)
y_norms = torch.norm(y.reshape(num_examples,-1), self.p, 1)
if self.reduction:
if self.size_average:
return torch.mean(diff_norms/y_norms)
else:
return torch.sum(diff_norms/y_norms)
return diff_norms/y_norms
def __call__(self, x, y):
return self.rel(x, y)
| 1,326
| 27.234043
| 113
|
py
|
Fine-tuning-NOs
|
Fine-tuning-NOs-master/utilities/loading_data.py
|
import numpy as np
import torch
from bisect import bisect
import os
from torch.utils.data import Dataset, DataLoader
def to_numpy(x):
return x.detach().cpu().numpy()
#files Loader
def MyLoader(GL, do = "train", config = None, args=None):
if config is not None:
batch_size = config['train']['batchsize']
workers = config['data']['load_workers']
database = config['Project']['database']
if database == 'GRF_7Hz':
size = 128
elif database in {'GRF_12Hz','GRF_15Hz'}:
size = 64
elif args is not None:
batch_size = args.batchsize
workers = args.load_workers
database = args.database
if database == 'GRF_7Hz':
size = 128
elif database in {'GRF_12Hz', 'GRF_15Hz'}:
size = 64
else:
batch_size = 50
workers = 4
if do == 'train':
list_x_train, list_y_train = GL('train')
list_x_valid, list_y_valid = GL('validation')
Train_Data_set = File_Loader(list_x_train,list_y_train, size = size, data=database)
Valid_Data_set = File_Loader(list_x_valid,list_y_valid, size = size, data=database)
##### setting the data Loader
train_loader = DataLoader(dataset = Train_Data_set,
shuffle = True,
batch_size = batch_size,
num_workers= workers)
valid_loader = DataLoader(dataset = Valid_Data_set,
shuffle = False,
batch_size =batch_size,
num_workers= workers)
return train_loader, valid_loader
elif do == 'test':
list_x_test, list_y_test = GL('test')
Test_Data_set = File_Loader(list_x_test, list_y_test, size = size, data=database)
##### setting the data Loader
test_loader = DataLoader(dataset = Test_Data_set,
shuffle = False,
batch_size = batch_size,
num_workers= workers)
return test_loader
class GettingLists(object):
#Generating the list for train/valid/test--> each sample is 5000 velocity/data
def __init__(self, data_for_training,
wave_eq = "acoustic",
data_base = "GRF_7Hz",
PATH = 'databases',
batch_data_size = int(5000)):
super(GettingLists, self).__init__()
self.PATH = os.path.join(PATH, wave_eq, data_base)
self.batch_data = batch_data_size
valid_limit = data_for_training//self.batch_data
self.valid_limit = valid_limit
if data_base == 'GRF_7Hz':
self.end = int(6)
elif data_base in {'GRF_12Hz', 'GRF_15Hz'} :
self.end = int(10)
def get_list(self, do):
if do == 'train':
in_limit_train = np.array([os.path.join(self.PATH,
'model',
f'velocity{k}.npy') for k in \
range(1,self.valid_limit+1)])
out_limit_train = np.array([os.path.join(self.PATH,
'data',
f'pressure{k}.npy')for k in \
range(1,self.valid_limit+1)])
return in_limit_train, out_limit_train
elif do == 'validation':
in_limit_valid = np.array([os.path.join(self.PATH,
'model',
f'velocity{k}.npy') for k in \
range(self.end,self.end+1)])
out_limit_valid= np.array([os.path.join(self.PATH,
'data',
f'pressure{k}.npy') for k in \
range(self.end,self.end+1)])
return in_limit_valid, out_limit_valid
elif do =='test':
in_limit_test = np.array([os.path.join(self.PATH,
'model', f'velocity{k}.npy') for k in \
range(self.valid_limit+1, self.end+1)])
out_limit_test = np.array([os.path.join(self.PATH,
'data', f'pressure{k}.npy')for k in \
range(self.valid_limit+1, self.end+1)])
return in_limit_test, out_limit_test
def __call__(self, do = 'train'):
return self.get_list(do)
class File_Loader(Dataset):
#data loader file
def __init__(self, data_paths, target_paths, size =128, data = "GRF"):
self.size = size
self.data = data
if self.data == "GRF_7Hz":
self.data_memmaps = [np.load(path, mmap_mode='r') for path in data_paths]
self.target_memmaps = [np.load(path, mmap_mode='r') for path in target_paths]
elif self.data == ("GRF_12Hz") or ("GRF_15Hz") :
self.data_memmaps = [np.load(path, mmap_mode='r').view(float) for path in data_paths]
self.target_memmaps = [np.load(path, mmap_mode='r').view(float) for path in target_paths]
elif self.data == ("GRF_12Hz_vz") or ("GRF_15Hz_vz"):
self.data_memmaps = [np.load(path, mmap_mode='r').view(float) for path in data_paths]
self.target_memmaps = [np.load(path, mmap_mode='r').view(float).reshape(2,self.size,self.size,2) for path in target_paths]
self.start_indices = [0] * len(data_paths)
self.data_count = 0
for index, memmap in enumerate(self.data_memmaps):
self.start_indices[index] = self.data_count
self.data_count += memmap.shape[0]
def __len__(self):
return self.data_count
def __getitem__(self, index):
memmap_index = bisect(self.start_indices, index) - 1
index_in_memmap = index - self.start_indices[memmap_index]
data = np.copy(self.data_memmaps[memmap_index][index_in_memmap])
target = np.copy(self.target_memmaps[memmap_index][index_in_memmap])
if self.data == "GRF_7Hz":
return torch.tensor(data*1e-3, dtype=torch.float).view(self.size,self.size,1), torch.tensor(target, dtype=torch.float).view(self.size,self.size,1)
elif self.data == ("GRF_12Hz") or ("GRF_15Hz"):
return torch.tensor(data*1e-3, dtype=torch.float).view(self.size,self.size,1), torch.tensor(target, dtype=torch.float).view(self.size,self.size,2)
elif self.data == ("GRF_12Hz_vz") or ("GRF_15Hz_vz"):
return torch.tensor(data*1e-3, dtype=torch.float).view(self.size,self.size,1), torch.tensor(target, dtype=torch.float).view(2,self.size,self.size,2)
| 6,671
| 43.18543
| 158
|
py
|
Fine-tuning-NOs
|
Fine-tuning-NOs-master/utilities/__init__.py
|
from .loading_data import *
from .loss import LpLoss
from .model_factory import choosing_model
from .plotting_data import *
from .saving_npy_output import *
| 156
| 30.4
| 41
|
py
|
Fine-tuning-NOs
|
Fine-tuning-NOs-master/utilities/plotting_data.py
|
import matplotlib.pyplot as plt
from .loading_data import to_numpy
import os
def plotting(in_, NN_out, out, name, database, PATH,
list_to_plot = None, vmin=-0.5, vmax =0.5,
shrink = 0.8, ksample = 0):
if list_to_plot is None:
list_to_plot = [0,1,2,3,4,5]
print("list_to_plot is None, so we plot the first 6 samples")
assert in_.shape[0] >= len(list_to_plot); "list of samples to plot is bigger than the input size"
in_ = to_numpy(in_)[:list_to_plot[-1]+1,...]
NN_out = to_numpy(NN_out)[:list_to_plot[-1]+1,...]
out = to_numpy(out)[:list_to_plot[-1]+1,...]
s = in_.shape[1]
for k in list_to_plot:
print(f"plotting sample {k}")
in_k = in_[k,...].reshape(s,s)
out_k = out[k,...].reshape(s,s,-1)
NN_k =NN_out[k,...].reshape(s,s,-1)
plt.figure(figsize=(20,10))
plt.subplot(231)
plt.imshow(in_k, vmin=1., vmax =5., cmap = 'jet')
plt.colorbar(shrink =shrink)
plt.title(f'wavespeed: {k}')
plt.subplot(232)
plt.imshow(out_k[:,:,0].reshape(s,s), vmin=vmin, vmax =vmax, cmap = 'seismic')
plt.colorbar(shrink =shrink)
plt.title(f'HDG (real) sample: {k}')
plt.subplot(233)
plt.imshow(NN_k[:,:,0].reshape(s,s), vmin=vmin, vmax =vmax, cmap = 'seismic')
plt.colorbar(shrink =shrink)
plt.title(f'{name} (real) sample: {k}')
plt.subplot(235)
plt.imshow(out_k[:,:,1].reshape(s,s), vmin=vmin, vmax =vmax, cmap = 'seismic')
plt.colorbar(shrink =shrink)
plt.title(f'HDG (imaginary) sample: {k}')
plt.subplot(236)
plt.imshow(NN_k[:,:,1].reshape(s,s), vmin=vmin, vmax =vmax, cmap = 'seismic')
plt.colorbar(shrink =shrink)
plt.title(f'{name} (imaginary) sample: {k}')
saving_dir = os.path.join(PATH, "figures", database, name, f"realization_{ksample}")
if not os.path.exists(saving_dir):
os.makedirs(saving_dir)
plt.savefig(f"{saving_dir}/ex_{k}.png")
| 2,069
| 39.588235
| 104
|
py
|
Fine-tuning-NOs
|
Fine-tuning-NOs-master/utilities/saving_npy_output.py
|
import numpy as np
import os
from .loading_data import to_numpy
def saving_files(in_files, out_files, NN_out_files, NN_name, database, PATH, realization_k):
"""
Saving the files in the directory OOD/database/realization_k
"""
saving_dir = f'{PATH}/{database}/realization_{realization_k}'
if not os.path.exists(saving_dir):
os.makedirs(saving_dir)
if not isinstance(in_files, np.ndarray):
in_files = to_numpy(in_files)
if not isinstance(out_files, np.ndarray):
out_files = to_numpy(out_files)
if not isinstance(NN_out_files, np.ndarray):
NN_out_files = to_numpy(NN_out_files)
np.save(os.path.join(saving_dir, f"wavespeed_{database}.npy"), in_files)
np.save(os.path.join(saving_dir, f"pressure_{database}.npy"), out_files)
np.save(os.path.join(saving_dir, f"pressure_{NN_name}_{database}.npy"), NN_out_files)
| 896
| 39.772727
| 94
|
py
|
Fine-tuning-NOs
|
Fine-tuning-NOs-master/visualization_code/._create_trajectory.py
|