repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
Pytorch-implementation-of-SRNet | Pytorch-implementation-of-SRNet-master/utils/utils.py | """This module provides utility function for training."""
import os
import re
from typing import Any, Dict
import torch
from torch import nn
from opts.options import arguments
opt = arguments()
def saver(state: Dict[str, float], save_dir: str, epoch: int) -> None:
torch.save(state, save_dir + "net_" + str(epoch) + ".pt")
def latest_checkpoint() -> int:
"""Returns latest checkpoint."""
if os.path.exists(opt.checkpoints_dir):
all_chkpts = "".join(os.listdir(opt.checkpoints_dir))
if len(all_chkpts) > 0:
latest = max(map(int, re.findall("\d+", all_chkpts)))
else:
latest = None
else:
latest = None
return latest
def adjust_learning_rate(optimizer: Any, epoch: int) -> None:
"""Sets the learning rate to the initial learning_rate and decays by 10
every 30 epochs."""
learning_rate = opt.lr * (0.1 ** (epoch // 30))
for param_group in optimizer.param_groups:
param_group["lr"] = learning_rate
# Weight initialization for conv layers and fc layers
def weights_init(param: Any) -> None:
"""Initializes weights of Conv and fully connected."""
if isinstance(param, nn.Conv2d):
torch.nn.init.xavier_uniform_(param.weight.data)
if param.bias is not None:
torch.nn.init.constant_(param.bias.data, 0.2)
elif isinstance(param, nn.Linear):
torch.nn.init.normal_(param.weight.data, mean=0.0, std=0.01)
torch.nn.init.constant_(param.bias.data, 0.0)
| 1,504 | 29.714286 | 75 | py |
Pytorch-implementation-of-SRNet | Pytorch-implementation-of-SRNet-master/model/utils.py | """This module provide building blocks for SRNet."""
from torch import nn
from torch import Tensor
class ConvBn(nn.Module):
"""Provides utility to create different types of layers."""
def __init__(self, in_channels: int, out_channels: int) -> None:
"""Constructor.
Args:
in_channels (int): no. of input channels.
out_channels (int): no. of output channels.
"""
super().__init__()
self.conv = nn.Conv2d(
in_channels,
out_channels,
kernel_size=3,
stride=1,
padding=1,
bias=False,
)
self.batch_norm = nn.BatchNorm2d(out_channels)
def forward(self, inp: Tensor) -> Tensor:
"""Returns Conv2d followed by BatchNorm.
Returns:
Tensor: Output of Conv2D -> BN.
"""
return self.batch_norm(self.conv(inp))
class Type1(nn.Module):
"""Creates type 1 layer of SRNet."""
def __init__(self, in_channels: int, out_channels: int) -> None:
super().__init__()
self.convbn = ConvBn(in_channels, out_channels)
self.relu = nn.ReLU()
def forward(self, inp: Tensor) -> Tensor:
"""Returns type 1 layer of SRNet.
Args:
inp (Tensor): input tensor.
Returns:
Tensor: Output of type 1 layer.
"""
return self.relu(self.convbn(inp))
class Type2(nn.Module):
"""Creates type 2 layer of SRNet."""
def __init__(self, in_channels: int, out_channels: int) -> None:
super().__init__()
self.type1 = Type1(in_channels, out_channels)
self.convbn = ConvBn(in_channels, out_channels)
def forward(self, inp: Tensor) -> Tensor:
"""Returns type 2 layer of SRNet.
Args:
inp (Tensor): input tensor.
Returns:
Tensor: Output of type 2 layer.
"""
return inp + self.convbn(self.type1(inp))
class Type3(nn.Module):
"""Creates type 3 layer of SRNet."""
def __init__(self, in_channels: int, out_channels: int) -> None:
super().__init__()
self.conv1 = nn.Conv2d(
in_channels,
out_channels,
kernel_size=1,
stride=2,
padding=0,
bias=False,
)
self.batch_norm = nn.BatchNorm2d(out_channels)
self.type1 = Type1(in_channels, out_channels)
self.convbn = ConvBn(out_channels, out_channels)
self.pool = nn.AvgPool2d(kernel_size=3, stride=2, padding=1)
def forward(self, inp: Tensor) -> Tensor:
"""Returns type 3 layer of SRNet.
Args:
inp (Tensor): input tensor.
Returns:
Tensor: Output of type 3 layer.
"""
out = self.batch_norm(self.conv1(inp))
out1 = self.pool(self.convbn(self.type1(inp)))
return out + out1
class Type4(nn.Module):
"""Creates type 4 layer of SRNet."""
def __init__(self, in_channels: int, out_channels: int) -> None:
super().__init__()
self.type1 = Type1(in_channels, out_channels)
self.convbn = ConvBn(out_channels, out_channels)
self.gap = nn.AdaptiveAvgPool2d(output_size=1)
def forward(self, inp: Tensor) -> Tensor:
"""Returns type 4 layer of SRNet.
Args:
inp (Tensor): input tensor.
Returns:
Tensor: Output of type 4 layer.
"""
return self.gap(self.convbn(self.type1(inp)))
if __name__ == "__main__":
import torch
tensor = torch.randn((1, 1, 256, 256))
lt1 = Type1(1, 64)
output = lt1(tensor)
print(output.shape)
| 3,652 | 27.317829 | 68 | py |
Pytorch-implementation-of-SRNet | Pytorch-implementation-of-SRNet-master/model/model.py | """ This module creates SRNet model."""
import torch
from torch import Tensor
from torch import nn
from model.utils import Type1, Type2, Type3, Type4
class Srnet(nn.Module):
"""This is SRNet model class."""
def __init__(self) -> None:
"""Constructor."""
super().__init__()
self.type1s = nn.Sequential(Type1(1, 64), Type1(64, 16))
self.type2s = nn.Sequential(
Type2(16, 16),
Type2(16, 16),
Type2(16, 16),
Type2(16, 16),
Type2(16, 16),
)
self.type3s = nn.Sequential(
Type3(16, 16),
Type3(16, 64),
Type3(64, 128),
Type3(128, 256),
)
self.type4 = Type4(256, 512)
self.dense = nn.Linear(512, 2)
self.softmax = nn.LogSoftmax(dim=1)
def forward(self, inp: Tensor) -> Tensor:
"""Returns logits for input images.
Args:
inp (Tensor): input image tensor of shape (Batch, 1, 256, 256)
Returns:
Tensor: Logits of shape (Batch, 2)
"""
out = self.type1s(inp)
out = self.type2s(out)
out = self.type3s(out)
out = self.type4(out)
out = out.view(out.size(0), -1)
out = self.dense(out)
return self.softmax(out)
if __name__ == "__main__":
image = torch.randn((1, 1, 256, 256))
net = Srnet()
print(net(image).shape)
| 1,424 | 26.403846 | 74 | py |
Seq-Att-Affect | Seq-Att-Affect-master/utils.py | import numpy as np
import re
import cv2
from operator import truediv
import matplotlib
#matplotlib.use('Agg')
import matplotlib.pyplot as plt
from pathlib import Path
#import tensorflow as tf
import random
import csv
#from config import *
from scipy.integrate.quadrature import simps
import math
from scipy.stats import multivariate_normal
import os
from random import randint
import glob
from scipy.integrate import simps
from PIL import Image,ImageFilter,ImageEnhance
from math import isnan
import torch
def weights_init_uniform_rule(m):
classname = m.__class__.__name__
# for every Linear layer in a model..
if classname.find('Linear') != -1:
# get the number of the inputs
n = m.in_features
y = 1.0/np.sqrt(n)
m.weight.data.uniform_(-y, y)
m.bias.data.fill_(0)
if classname.find('Conv2d') != -1:
#print('applying mother fucker')
n = m.in_channels
y = 1.0/np.sqrt(n)
m.weight.data.uniform_(-y,y)
def update_lr_ind(opt, lr):
"""Decay learning rates of the generator and discriminator."""
for param_group in opt.param_groups:
param_group['lr'] = lr
def update_lr( lr, optimizer):
"""Decay learning rates of the generator and discriminator."""
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def reset_grad(optimizer):
"""Reset the gradient buffers."""
optimizer.zero_grad()
def denorm( x):
"""Convert the range from [-1, 1] to [0, 1]."""
out = (x + 1) / 2
return out.clamp_(0, 1)
def gradient_penalty( y, x):
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
"""Compute gradient penalty: (L2_norm(dy/dx) - 1)**2."""
weight = torch.ones(y.size()).to(device)
dydx = torch.autograd.grad(outputs=y,
inputs=x,
grad_outputs=weight,
retain_graph=True,
create_graph=True,
only_inputs=True)[0]
dydx = dydx.view(dydx.size(0), -1)
dydx_l2norm = torch.sqrt(torch.sum(dydx**2, dim=1))
return torch.mean((dydx_l2norm-1)**2)
def label2onehot( labels, dim):
"""Convert label indices to one-hot vectors."""
batch_size = labels.size(0)
out = torch.zeros(batch_size, dim)
out[np.arange(batch_size), labels.long()] = 1
return out
def print_network( model, name):
"""Print out the network information."""
num_params = 0
for p in model.parameters():
num_params += p.numel()
print(model)
print(name)
print("The number of parameters: {}".format(num_params))
def worker_init_fn(worker_id):
np.random.seed(np.random.get_state()[1][0] + worker_id)
def OpenCVtoPIL(opencv_image = None) :
cv2_im = cv2.cvtColor(opencv_image,cv2.COLOR_BGR2RGB)
pil_im = Image.fromarray(cv2_im)
return pil_im
def PILtoOpenCV(pil_image = None):
open_cv_image = np.array(pil_image)
open_cv_image = open_cv_image[:, :, ::-1].copy()
return open_cv_image
def checkDirMake(directory):
import os
if not os.path.exists(directory):
os.makedirs(directory)
def convertToOneHot(vector, num_classes=None):
"""
Converts an input 1-D vector of integers into an output
2-D array of one-hot vectors, where an i'th input value
of j will set a '1' in the i'th row, j'th column of the
output array.
Example:
v = np.array((1, 0, 4))
one_hot_v = convertToOneHot(v)
print one_hot_v
[[0 1 0 0 0]
[1 0 0 0 0]
[0 0 0 0 1]]
"""
assert isinstance(vector, np.ndarray)
assert len(vector) > 0
if num_classes is None:
num_classes = np.max(vector)+1
else:
assert num_classes > 0
assert num_classes >= np.max(vector)
result = np.zeros(shape=(len(vector), num_classes))
result[np.arange(len(vector)), vector] = 1
return result.astype(int)
#print (convertToOneHot(np.array([7]),num_classes = 8))
def readCSV(fileName):
if '.csv' in fileName :
list_dta = []
with open(fileName, 'r') as csvFile:
reader = csv.reader(csvFile,delimiter=';')
for row in reader:
#print('frame' in row[0],row[0].split(',')[1])
if not 'frame' in row[0] :
list_dta.append([float(row[0].split(',')[1])])
return list_dta
#print(fileName,'data : ',list_dta)
#return sorted(list_dta)
#print(fileName,'data : ',list_dta)
else :
return None
def generalNoise(tImageB = None,noiseType = 0,noiseParam = 0):
if noiseType == 0 :
tImageB = tImageB
elif noiseType == 1: #downsample
oWidth, oHeight = tImageB.size
for i in range(int(noiseParam)) :#Scale down (/2) blurLevel times
width, height = tImageB.size
tImageB = tImageB.resize((width//2,height//2))
#print(tImageB.size)
tImageB = tImageB.resize((oWidth,oHeight))
elif noiseType == 2 : #Gaussian blur
tImageB = tImageB.filter(ImageFilter.GaussianBlur(noiseParam))
elif noiseType == 3 : #Gaussian noise
#tImageB = addNoise(tImageB)
#convert to opencv
opencvImage = cv2.cvtColor(np.array(tImageB), cv2.COLOR_RGB2BGR)
#print(opencvImage)
opencvImage = addNoise(opencvImage,var=noiseParam)
pilImage = cv2.cvtColor(opencvImage,cv2.COLOR_BGR2RGB)
#tImageB = Image.fromarray(random_noise(opencvImage))
tImageB = Image.fromarray(pilImage)
elif noiseType == 4 : #Brightness :
#tImageB.show()
#print(noiseParam)
e = ImageEnhance.Brightness(tImageB)
tImageB = e.enhance(noiseParam)
#tImageB.show()
#tImageB.show()
#opencvImage = cv2.cvtColor(np.array(tImageB), cv2.COLOR_RGB2BGR)
#print('before',opencvImage)
#opencvImage = np.asarray(opencvImage*noiseParam,dtype=np.int32)
#print(opencvImage.shape)
#test = opencvImage.astype(np.float64)*noiseParam
'''print(opencvImage)
print(test)'''
#opencvImage = test.astype(np.uint8)
'''for i in range(0,opencvImage.shape[0]):
for j in range(0,opencvImage.shape[1]):
#print(opencvImage[i,j],noiseParam)
opencvImage[i,j,0] = round(opencvImage[i,j,0] * noiseParam)
opencvImage[i,j,1] = round(opencvImage[i,j,1] * noiseParam)
opencvImage[i,j,2] = round(opencvImage[i,j,2] * noiseParam)
#print(opencvImage[i,j])
#print('after',opencvImage)
'''
#pilImage = cv2.cvtColor(opencvImage,cv2.COLOR_BGR2RGB)
#tImageB = Image.fromarray(pilImage)
elif noiseType == 5 :
tImageB = tImageB.convert('L')
np_img = np.array(tImageB, dtype=np.uint8)
np_img = np.dstack([np_img, np_img, np_img])
tImageB = Image.fromarray(np_img, 'RGB')
return tImageB
def addNoise (image,noise_type="gauss",var = .01):
"""
Generate noise to a given Image based on required noise type
Input parameters:
image: ndarray (input image data. It will be converted to float)
noise_type: string
'gauss' Gaussian-distrituion based noise
'poission' Poission-distribution based noise
's&p' Salt and Pepper noise, 0 or 1
'speckle' Multiplicative noise using out = image + n*image
where n is uniform noise with specified mean & variance
"""
row,col,ch= image.shape
if noise_type == "gauss":
mean = 0.0
#var = 0.001
sigma = var**0.5
gauss = np.array(image.shape)
gauss = np.random.normal(mean,sigma,(row,col,ch))
gauss = gauss.reshape(row,col,ch)
#print(gauss)
noisy = image + gauss*255
return noisy.astype('uint8')
elif noise_type == "s&p":
s_vs_p = 0.5
amount = 0.09
out = image
# Generate Salt '1' noise
num_salt = np.ceil(amount * image.size * s_vs_p)
coords = [np.random.randint(0, i - 1, int(num_salt))
for i in image.shape]
out[coords] = 255
# Generate Pepper '0' noise
num_pepper = np.ceil(amount* image.size * (1. - s_vs_p))
coords = [np.random.randint(0, i - 1, int(num_pepper))
for i in image.shape]
out[coords] = 0
return out
elif noise_type == "poisson":
vals = len(np.unique(image))
vals = 2 ** np.ceil(np.log2(vals))
noisy = np.random.poisson(image * vals) / float(vals)
return noisy
elif noise_type =="speckle":
gauss = np.random.randn(row,col,ch)
gauss = gauss.reshape(row,col,ch)
noisy = image + image * gauss
return noisy
else:
return image
class UnNormalize(object):
def __init__(self, mean, std):
self.mean = mean
self.std = std
def __call__(self, tensor):
"""
Args:
tensor (Tensor): Tensor image of size (C, H, W) to be normalized.
Returns:
Tensor: Normalized image.
"""
for t, m, s in zip(tensor, self.mean, self.std):
t.mul_(s).add_(m)
# The normalize code -> t.sub_(m).div_(s)
return tensor
def imageLandmarking(img, ldmrk, isPIL = True,inputGT = None):
if isPIL :
#convert the image to the opencv format
print(img)
theImage = cv2.cvtColor(np.array(img),cv2.COLOR_RGB2BGR)
else :
theImage = img.copy()
for y in range(68) :
cv2.circle(theImage,(int(ldmrk[y]),int(ldmrk[y+68])),2,(0,255,0) )
if inputGT is not None :
cv2.circle(theImage,(int(inputGT[y]),int(inputGT[y+68])),2,(0,0,255) )
return theImage
def unnormalizedToCV(input = [],customNormalize = None):
output = []
#unorm = UnNormalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225))
unorm = UnNormalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))
for i in range(input.shape[0]) :
#Unnormalized it, convert to numpy and multiple by 255.
if customNormalize is None :
theImage = (unorm(input[i]).numpy()*255).transpose((1,2,0))
else :
theImage = (input[i].numpy().transpose(1,2,0) + customNormalize)
#Then transpose to be height,width,channel, to Int and BGR formate
theImage = cv2.cvtColor(theImage.astype(np.uint8 ),cv2.COLOR_RGB2BGR)
output.append(theImage)
return output
def unnormalizedAndLandmark(input = [], inputPred = [],inputGT = None,customNormalize = None,ldmarkNumber=68):
#input is unnormalized [batch_size, channel, height, width] tensor from pytorch
#inputGT is [batch_size, 136] tensor landmarks
#Output is [batch_size, height,width,channel] BGR, 0-255 Intensities opencv list of landmarked image
output = []
inputPred = inputPred.numpy()
if inputGT is not None :
inputGT = inputGT.numpy()
#unorm = UnNormalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225))
unorm = UnNormalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))
for i in range(inputPred.shape[0]) :
#Unnormalized it, convert to numpy and multiple by 255.
if customNormalize is None :
theImage = (unorm(input[i]).numpy()*255).transpose((1,2,0))
else :
theImage = (input[i].numpy().transpose(1,2,0) + customNormalize)
#Then transpose to be height,width,channel, to Int and BGR formate
theImage = cv2.cvtColor(theImage.astype(np.uint8 ),cv2.COLOR_RGB2BGR)
#Now landmark it.
for y in range(ldmarkNumber) :
cv2.circle(theImage,(int(scale(inputPred[i,y])),int(scale(inputPred[i,y+ldmarkNumber]))),2,(0,255,0) )
if inputGT is not None :
cv2.circle(theImage,(int(scale(inputGT[i,y])),int(scale(inputGT[i,y+ldmarkNumber]))),2,(0,0,255) )
output.append(theImage)
return output
def scale(input):
if input > 99999 :
input = 99999
elif input < -99999 :
input = -99999
elif isnan(input):
input = 0
return input
def plotImages(input = [], title = None, n_row = 4, n_col = 4, fromOpenCV = True,fileName = None,show=False):
#Function to plot row,col image.
#Given [n_row*n_col,image_width, image_height, channel] input
#tittle [n_row*n_col]
fig = plt.figure()
for i in range(n_row * n_col) :
#print('the i ',i)
ax = fig.add_subplot(n_row,n_col,i+1)
if title is not None :
ax.set_title(title[i])
if fromOpenCV :
plt.imshow(cv2.cvtColor(input[i],cv2.COLOR_BGR2RGB))
else :
plt.imshow(input[i])
if fileName :
plt.savefig(fileName)
if show :
plt.show()
def calc_bb_IOU(boxA, boxB):
# determine the (x, y)-coordinates of the intersection rectangle
xA = max(boxA[0], boxB[0])
yA = max(boxA[1], boxB[1])
xB = min(boxA[2], boxB[2])
yB = min(boxA[3], boxB[3])
# compute the area of intersection rectangle
interArea = max(0, xB - xA + 1) * max(0, yB - yA + 1)
# compute the area of both the prediction and ground-truth
# rectangles
boxAArea = (boxA[2] - boxA[0] + 1) * (boxA[3] - boxA[1] + 1)
boxBArea = (boxB[2] - boxB[0] + 1) * (boxB[3] - boxB[1] + 1)
# compute the intersection over union by taking the intersection
# area and dividing it by the sum of prediction + ground-truth
# areas - the interesection area
iou = interArea / float(boxAArea + boxBArea - interArea)
# return the intersection over union value
return iou
def read_kp_file(filename,flatten = False):
x = []
if ('pts' in filename) :
with open(filename) as file:
data2 = [re.split(r'\t+',l.strip()) for l in file]
for i in range(len(data2)) :
if(i not in [0,1,2,len(data2)-1]):
x.append([ float(j) for j in data2[i][0].split()] )
if flatten :
return np.asarray(x).flatten('F')
else :
return np.asarray(x)
def read_kp_file_text(filename):
x = []
y = []
if ('txt' in filename) :
with open(filename) as file:
data2 = [re.split(r'\t+',l.strip()) for l in file]
#print(data2)
tmp = 0
for i in range(len(data2)) :
if(i not in [0,1,len(data2)-1]):
for j in data2[i][0].split() :
if tmp % 2 == 0 :
x.append(float(j))
else :
y.append(float(j))
tmp+=1
return np.concatenate((x,y))
def read_bb_file(filename):
x = []
if ('pts' in filename) :
with open(filename) as file:
data2 = [re.split(r'\t+',l.strip()) for l in file]
for i in range(len(data2)) :
if(i not in [0,1,2,len(data2)-1]):
x.append([ float(j) for j in data2[i][0].split()] )
return np.asarray(x)
def errCalcNoisy(catTesting = 1, localize = False, t_dir = "300W-Test/01_Indoor/",name='Re3A',is3D=False,ext = ".txt",makeFlag = False):
catTesting = catTesting;
l_error = []
if localize is False :#image dir
if is3D:
dir = curDir + 'images/300VW-Test_M/cat'+str(catTesting)+'/'
else :
dir = curDir + 'images/300VW-Test/cat'+str(catTesting)+'/'
else :
dir = curDir + t_dir +'/'
list_txt = glob.glob1(dir,"*"+ext)
for x in list_txt:
print(("Opening " +dir+x))
file = open(dir+x)
for line in file :
#print float(line)
l_error.append(float(line))
file.close()
all_err = np.array(l_error)
if makeFlag :
list_txt = glob.glob1(dir,"*"+ext)
l_tr = []
l_d = []
for x in list_txt:
print(("Opening " +dir+x))
file = open(dir+x)
for line in file :
data = [ float(j) for j in line.split()]
#print(data)
l_tr.append(float(data[1]))
l_d.append(float(data[0]))
file.close()
if localize is False :
fileName = "src/result_compared/cat"+str(catTesting)+"/"
aboveT = makeErrTxt(all_err,fileName= fileName+name+".txt",threshold = .08,lim = 1.1005)
if makeFlag :
l_tr = np.asarray(l_tr);
l_d = np.asarray(l_d);
f = open(curDir+fileName+"flag.txt",'w')
am_r = truediv(len(l_tr[np.where(l_tr > 0 )]),len(l_tr));
am_d = truediv(len(l_d[np.where(l_d == 0 )]),len(l_d));
f.write("%.4f %.4f\n" % (am_r,am_d));
f.close()
print(("Above T ",name," : "+str(aboveT)))
plot_results(catTesting,resFolder= 'src/result_compared/cat'+str(catTesting),addition=[name],is3D=is3D)
else : #error dir
arrName = ['src/result_compared/300W/Indoor','src/result_compared/300W/Outdoor','src/result_compared/300W/InOut']
aboveT = makeErrTxt(all_err,fileName= arrName[catTesting]+"/"+name+".txt",threshold = .08,lim =.35005, step = .0005)
print(("Above T ",name," : "+str(aboveT)))
plot_results(catTesting+4,resFolder= arrName[catTesting],addition=[name],is3D=is3D)
return all_err
#print(("All error : "+str(all_err)))
def errCalc(catTesting = 1, localize = False, t_dir = "300W-Test/01_Indoor/",name='Re3A',is3D=False,ext = ".txt",makeFlag = False):
catTesting = catTesting;
l_error = []
if localize is False :#image dir
if is3D:
dir = curDir + 'images/300VW-Test_M/cat'+str(catTesting)+'/'
else :
dir = curDir + 'images/300VW-Test/cat'+str(catTesting)+'/'
else :
dir = curDir + t_dir +'/'
list_txt = glob.glob1(dir,"*"+ext)
for x in list_txt:
print(("Opening " +dir+x))
file = open(dir+x)
for line in file :
#print float(line)
l_error.append(float(line))
file.close()
all_err = np.array(l_error)
if makeFlag :
list_txt = glob.glob1(dir,"*"+ext)
l_tr = []
l_d = []
for x in list_txt:
print(("Opening " +dir+x))
file = open(dir+x)
for line in file :
data = [ float(j) for j in line.split()]
#print(data)
l_tr.append(float(data[1]))
l_d.append(float(data[0]))
file.close()
if localize is False :
fileName = "src/result_compared/cat"+str(catTesting)+"/"
aboveT = makeErrTxt(all_err,fileName= fileName+name+".txt",threshold = .08,lim = 1.1005)
if makeFlag :
l_tr = np.asarray(l_tr);
l_d = np.asarray(l_d);
f = open(curDir+fileName+"flag.txt",'w')
am_r = truediv(len(l_tr[np.where(l_tr > 0 )]),len(l_tr));
am_d = truediv(len(l_d[np.where(l_d == 0 )]),len(l_d));
f.write("%.4f %.4f\n" % (am_r,am_d));
f.close()
print(("Above T ",name," : "+str(aboveT)))
plot_results(catTesting,resFolder= 'src/result_compared/cat'+str(catTesting),addition=[name],is3D=is3D)
else : #error dir
arrName = ['src/result_compared/300W/Indoor','src/result_compared/300W/Outdoor','src/result_compared/300W/InOut']
aboveT = makeErrTxt(all_err,fileName= arrName[catTesting]+"/"+name+".txt",threshold = .08,lim =.35005, step = .0005)
print(("Above T ",name," : "+str(aboveT)))
plot_results(catTesting+4,resFolder= arrName[catTesting],addition=[name],is3D=is3D)
return all_err
#print(("All error : "+str(all_err)))
def makeErrTxt(error,fileName = 'result_compared/Decky.txt',threshold = .08,lim = .35005, step = .0001):
print("Making errr")
bin = np.arange(0,lim,step)#0.35005,0.0005), 300vw 1.1005
#res = np.array([len(bin)])
#creating the file
f = open(curDir+fileName,'w')
f.write('300W Challenge 2013 Result\n');
f.write('Participant: Decky.\n');
f.write('-----------------------------------------------------------\n');
f.write('Bin 68_all 68_indoor 68_outdoor 51_all 51_indoor 51_outdoor\n');
for i in range(len(bin)) :
err = truediv(len(error[np.where(error < bin[i])]),len(error))
f.write("%.4f %.4f %.4f %.4f %.4f %.4f %.4f\n" % (bin[i],err, err,err, err, err, err));
f.close()
err_above = truediv(len(error[np.where(error > threshold )]),len(error));
print((error[np.where(error > threshold )]))
return err_above
def plot_results(version, resFolder = 'result_compared',x_limit=0.08, colors=None, markers=None, linewidth=3,
fontsize=12, figure_size=(11, 6),addition = None,is3D = False,All = False):
"""
Method that generates the 300W Faces In-The-Wild Challenge (300-W) results
in the form of Cumulative Error Distributions (CED) curves. The function
renders the indoor, outdoor and indoor + outdoor results based on both 68
and 51 landmark points in 6 different figures.
Please cite:
C. Sagonas, E. Antonakos, G. Tzimiropoulos, S. Zafeiriou, M. Pantic. "300
Faces In-The-Wild Challenge: Database and Results", Image and Vision
Computing, 2015.
Parameters
----------
version : 1 or 2
The version of the 300W challenge to use. If 1, then the reported
results are the ones of the first conduct of the competition in the
ICCV workshop 2013. If 2, then the reported results are the ones of
the second conduct of the competition in the IMAVIS Special Issue 2015.
x_limit : float, optional
The maximum value of the horizontal axis with the errors.
colors : list of colors or None, optional
The colors of the lines. If a list is provided, a value must be
specified for each curve, thus it must have the same length as the
number of plotted curves. If None, then the colours are linearly sampled
from the jet colormap. Some example colour values are:
'r', 'g', 'b', 'c', 'm', 'k', 'w', 'orange', 'pink', etc.
or
(3, ) ndarray with RGB values
linewidth : float, optional
The width of the rendered lines.
fontsize : int, optional
The font size that is applied on the axes and the legend.
figure_size : (float, float) or None, optional
The size of the figure in inches.
"""
if not is3D :
title = "300VW 2D "
else :
title = "300VW 3DA-2D "
# Check version
if version == 1:
participants = ['Dlssvm_Cfss', 'MD_CFSS', 'Mdnet_DlibERT', 'Meem_Cfss', 'Spot_Cfss']
if not All :
title += 'category 1'
elif version == 2:
participants = ['ccot_cfss', 'MD_CFSS', 'spot_cfss', 'srdcf_cfss']
if not All :
title += 'category 2'
elif version == 3:
participants = ['ccot_cfss', 'MD_CFSS', 'meem_cfss', 'srdcf_cfss','staple_cfss']
if not All :
title += 'category 3'
elif version in [4,5,6]:
if is3D :
print("in if")
participants=[]
l_participants = ['Re3A_3D','Re3A_C_3D','FA_3D']
for z in l_participants :
if z not in participants :
participants.append(z)
else:
print("in else")
participants = ['Baltrusaitis', 'Hasan', 'Jaiswal','Milborrow','Yan','Zhou']
#participants = []
participants.append('Re3A')
participants.append('Re3A_C')
participants.append('FA')
arrName = ['Indoor','Outdoor','Indoor + Outdoor']
if not All :
title = arrName[version - 4]
else:
raise ValueError('version must be either 1 or 2')
if All :
title += " All Category "
#participants = []
if version in [1,2,3]:
participants = []
mapName = []
if is3D :
#participants = []
#participants.append('Re3A_3D')
#participants.append('Re3A_C_3D')
participants.append('RT_MT_3D')
participants.append('RT_2_3D')
participants.append('RT_4_3D')
participants.append('RT_8_3D')
participants.append('RT_16_3D')
participants.append('RT_32_3D')
participants.append('FA_MD_3D')
participants.append('FA_MT_3D')
participants.append('3DFFA_MD_3D')
participants.append('3DFFA_MT_3D')
mapName.append('FLL_MT_3D')
mapName.append('CRCN_2_3D')
mapName.append('CRCN_4_3D')
mapName.append('CRCN_8_3D')
mapName.append('CRCN_16_3D')
mapName.append('CRCN_32_3D')
mapName.append('FA_MD_3D')
mapName.append('FA_MT_3D')
mapName.append('3DFFA_MD_3D')
mapName.append('3DFFA_MT_3D')
colors = ['b','red','orange','yellow','yellow','yellow','green','brown','k','purple']
else:
participants.append('RT_MT')
participants.append('RT_2')
participants.append('RT_4')
participants.append('RT_8')
participants.append('RT_16')
participants.append('RT_32')
participants.append('YANG')
participants.append('MD_CFSS')
participants.append('ME_CFSS')
mapName.append('FLL_MT')
mapName.append('CRCN_2')
mapName.append('CRCN_4')
mapName.append('CRCN_8')
mapName.append('CRCN_16')
mapName.append('CRCN_32')
mapName.append('YANG')
mapName.append('MD_CFSS')
mapName.append('ME_CFSS')
#participants.append('FA_MD')
#participants.append('FA_MT')
colors = ['b','red','orange','yellow','yellow','yellow','g','brown','k']
#participants.append('Re3A')
#participants.append('Re3A_C')
#participants.append('FA_MD')
#participants = []
if addition is not None :
for i in addition :
if i not in participants :
participants.append(i)
# Initialize lists
ced68 = []
ced68_indoor = []
ced68_outdoor = []
ced51 = []
ced51_indoor = []
ced51_outdoor = []
legend_entries = []
# Load results
results_folder = curDir+resFolder
i = 0
for f in participants:
# Read file
if 'Re3A' in f or version in [1,2,3,6]:
index = 1
elif version == 4 :#indoor
index = 2;
elif version == 5 :#outdoor
index = 3;
filename = f + '.txt'
tmp = np.loadtxt(str(Path(results_folder) / filename), skiprows=4)
print(str(Path(results_folder) / filename))
# Get CED values
bins = tmp[:, 0]
ced68.append(tmp[:, index])
'''ced68_indoor.append(tmp[:, 2])
ced68_outdoor.append(tmp[:, 3])
ced51.append(tmp[:, 4])
ced51_indoor.append(tmp[:, 5])
ced51_outdoor.append(tmp[:, 6])'''
# Update legend entries
legend_entries.append(mapName[i])# + ' et al.')
i+=1
print(bins,x_limit)
if version < 4 :
idx = [x[0] for x in np.where(bins==x_limit+.0001)] #.0810
else :
idx = [x[0] for x in np.where(bins==x_limit+.005)] #.0810
real_bins = bins[:idx[0]]
print(idx,real_bins)
for i in range(len(ced68)) :
real_ced = ced68[i][:idx[0]]
#print(real_ced)
#AUC = str(round(simps(real_ced,real_bins) * (1/x_limit),3))
AUC = str(round(simps(real_ced,real_bins) * (1/x_limit),5))
FR = str(round(1. - real_ced[-1],5)) #[-3]
#print(real_bins[-1])
#print(legend_entries[i] + " : "+str(simps(real_ced,real_bins) * (1/x_limit)))
print(legend_entries[i] + " : " +AUC+" FR : "+FR)
#legend_entries[i]+=" [AUC : "+AUC+"]"#+"] [FR : "+FR+"]"
#plt.plot(real_bins,real_ced)
#plt.show()
# 68 points, indoor + outdoor
_plot_curves(bins, ced68, legend_entries, title, x_limit=x_limit,
colors=colors, linewidth=linewidth, fontsize=fontsize,
figure_size=figure_size)
'''# 68 points, indoor
title = 'Indoor, 68 points'
_plot_curves(bins, ced68_indoor, legend_entries, title, x_limit=x_limit,
colors=colors, linewidth=linewidth, fontsize=fontsize,
figure_size=figure_size)
# 68 points, outdoor
title = 'Outdoor, 68 points'
_plot_curves(bins, ced68_outdoor, legend_entries, title, x_limit=x_limit,
colors=colors, linewidth=linewidth, fontsize=fontsize,
figure_size=figure_size)
# 51 points, indoor + outdoor
title = 'Indoor + Outdoor, 51 points'
_plot_curves(bins, ced51, legend_entries, title, x_limit=x_limit,
colors=colors, linewidth=linewidth, fontsize=fontsize,
figure_size=figure_size)
# 51 points, indoor
title = 'Indoor, 51 points'
_plot_curves(bins, ced51_indoor, legend_entries, title, x_limit=x_limit,
colors=colors, linewidth=linewidth, fontsize=fontsize,
figure_size=figure_size)
# 51 points, outdoor
title = 'Outdoor, 51 points'
_plot_curves(bins, ced51_outdoor, legend_entries, title, x_limit=x_limit,
colors=colors, linewidth=linewidth, fontsize=fontsize,
figure_size=figure_size)'''
def _plot_curves(bins, ced_values, legend_entries, title, x_limit=0.08,
colors=None, linewidth=3, fontsize=12, figure_size=None):
# number of curves
n_curves = len(ced_values)
# if no colors are provided, sample them from the jet colormap
if colors is None:
cm = plt.get_cmap('jet')
colors = [cm(1.*i/n_curves)[:3] for i in range(n_curves)]
# plot all curves
fig = plt.figure()
ax = plt.gca()
for i, y in enumerate(ced_values):
plt.plot(bins, y, color=colors[i],
linestyle='-',
linewidth=linewidth,
label=legend_entries[i])
#print bins.shape, y.shape
# legend
ax.legend(prop={'size': fontsize}, loc=4)
# axes
for l in (ax.get_xticklabels() + ax.get_yticklabels()):
l.set_fontsize(fontsize)
ax.set_xlabel('Normalized Point-to-Point Error', fontsize=fontsize)
ax.set_ylabel('Images Proportion', fontsize=fontsize)
ax.set_title(title, fontsize=fontsize)
# set axes limits
ax.set_xlim([0., x_limit])
ax.set_ylim([0., 1.])
ax.set_yticks(np.arange(0., 1.1, 0.1))
# grid
plt.grid('on', linestyle='--', linewidth=0.5)
# figure size
if figure_size is not None:
fig.set_size_inches(np.asarray(figure_size))
plt.show()
def make_heatmap(image_name,t_image,add,y_batch,isRandom = True,percent_heatmap = .1,percent_heatmap_e = .05):
tBase = os.path.basename(image_name)
tName,tExt = os.path.splitext(tBase)
theDir = os.path.dirname(image_name)+"/../heatmap-"+add+"/"
if not os.path.exists(theDir):
os.makedirs(theDir)
fName =theDir+tName+".npy"
#print(fName)
try :
b_channel,g_channel,r_channel = t_image[:,:,0],t_image[:,:,1],t_image[:,:,2]
except :
print(image_name)
if os.path.isfile(fName) and isRandom:
newChannel = np.load(fName)
print("using saved npy")
else :
print("make npy "+add)
newChannel = b_channel.copy(); newChannel[:] = 0
y_t = y_batch
if isRandom :
t0,t1,t2,t3 = get_bb(y_t[0:int(n_o//2)], y_t[int(n_o//2):],68,False,
random.uniform( -.25, .25 ),
random.uniform( -.25, .25 ),
random.uniform( -.25, .25 ),
random.uniform( -.25, .25 ),
random.uniform( -.25, .25 ))
else :
t0,t1,t2,t3 = get_bb(y_t[0:int(n_o//2)], y_t[int(n_o//2):],68,False)
#print(t0,t1,t2,t3)
l_cd,rv = get_list_heatmap(0,None,t2-t0,t3-t1,percent_heatmap)
l_cd_e,rv_e = get_list_heatmap(0,None,t2-t0,t3-t1,percent_heatmap_e)
height, width,_ = t_image.shape
scaler = 255/np.max(rv)
#addOne = randint(0,2),addTwo = randint(0,2)
for iter in range(68) :
#print(height,width)
if random:
ix,iy = int(y_t[iter]),int(y_t[iter+68])
else :
ix,iy = int(y_t[iter])+randint(0,2),int(y_t[iter+68])+randint(0,2)
#Now drawing given the center
if iter in range(36,48):
l_cd_t = l_cd_e
rv_t = rv_e
else :
l_cd_t = l_cd
rv_t = rv
for iter2 in range(len(l_cd_t)) :
value = int(rv_t[iter2]*scaler)
if newChannel[inBound(iy+l_cd_t[iter2][0],0,height-1), inBound(ix + l_cd_t[iter2][1],0,width-1)] < value :
newChannel[inBound(iy+l_cd_t[iter2][0],0,height-1), inBound(ix + l_cd_t[iter2][1],0,width-1)] = int(rv_t[iter2]*scaler)#int(heatmapValue/2 + rv[iter2] * heatmapValue)
#np.save(fName,newChannel)
return newChannel
def get_enlarged_bb(the_kp = None, div_x = 2, div_y = 2, images = None,is_bb = False, displacement = 0,
displacementxy = None,n_points = 68):
if not is_bb :
if displacementxy is not None :
t = get_bb(x_list = the_kp[:n_points],y_list = the_kp[n_points:],
adding_xmin=displacementxy,adding_xmax=displacementxy,
adding_ymin=displacementxy,adding_ymax=displacementxy)
else :
t = get_bb(x_list = the_kp[:n_points],y_list = the_kp[n_points:],length = n_points,adding = displacement)
else :
t = the_kp
l_x = (t[2]-t[0])/div_x
l_y = (t[3]-t[1])/div_y
x1 = int(max(t[0] - l_x,0))
y1 = int(max(t[1] - l_y,0))
x_min = x1; y_min = y1;
#print tImage.shape
x2 = int(min(t[2] + l_x,images.shape[1]))
y2 = int(min(t[3] + l_y,images.shape[0]))
return t,l_x,l_y,x1,y1,x_min,y_min,x2,y2
def inBoundN(input,min,max):
if input < min :
return min
elif input > max :
return max
return input
def inBound(input,min,max):
if input < min :
return int(min)
elif input > max :
return int(max)
return int(input)
def inBound_tf(input,min,max):
if input < min :
return int(min)
elif input > max :
return int(max)
return int(input)
def eval(input):
if input < 0 :
return 0
else :
return input
def ClipIfNotNone(grad):
if grad is None:
return grad
return tf.clip_by_value(grad, -1, 1)
def initialize_uninitialized_global_variables(sess):
"""
Only initializes the variables of a TensorFlow session that were not
already initialized.
:param sess: the TensorFlow session
:return:
"""
# List all global variables
global_vars = tf.global_variables()
# Find initialized status for all variables
is_var_init = [tf.is_variable_initialized(var) for var in global_vars]
is_initialized = sess.run(is_var_init)
# List all variables that were not initialized previously
not_initialized_vars = [var for (var, init) in
zip(global_vars, is_initialized) if not init]
# Initialize all uninitialized variables found, if any
if len(not_initialized_vars):
sess.run(tf.variables_initializer(not_initialized_vars))
def addPadd(im) :
#im = cv2.imread("./test-frontal.png")
height, width, channels =im.shape
desired_size = np.max(np.array([height,width]))
add_x,add_y = 0,0
old_size = im.shape[:2] # old_size is in (height, width) format
ratio = float(desired_size)/max(old_size)
new_size = tuple([int(x*ratio) for x in old_size])
# new_size should be in (width, height) format
im = cv2.resize(im, (new_size[1], new_size[0]))
delta_w = desired_size - new_size[1]
delta_h = desired_size - new_size[0]
top, bottom = delta_h//2, delta_h-(delta_h//2)
left, right = delta_w//2, delta_w-(delta_w//2)
if height > width : #so shift x
add_x = left
else:
add_y = top
color = [0, 0, 0]
new_im = cv2.copyMakeBorder(im, top, bottom, left, right, cv2.BORDER_CONSTANT,
value=color)
#print top,bottom,left,right
'''cv2.imshow("image", new_im)
cv2.waitKey(0)
cv2.destroyAllWindows()'''
return new_im,add_x,add_y
def transformation(input, gt, type, info,length = 68 ):
mapping =[
[0,16],
[1,15],
[2,14],
[3,13],
[4,12],
[5,11],
[6,10],
[7,9],
[8,8],
[9,7],
[10,6],
[11,5],
[12,4],
[13,3],
[14,2],
[15,1],
[16,0],
[17,26],
[18,25],
[19,24],
[20,23],
[21,22],
[22,21],
[23,20],
[24,19],
[25,18],
[26,17],
[27,27],
[28,28],
[29,29],
[30,30],
[31,35],
[32,34],
[33,33],
[34,32],
[35,31],
[36,45],
[37,44],
[38,43],
[39,42],
[40,47],
[41,46],
[42,39],
[43,38],
[44,37],
[45,36],
[46,41],
[47,40],
[48,54],
[49,53],
[50,52],
[51,51],
[52,50],
[53,49],
[54,48],
[55,59],
[56,58],
[57,57],
[58,56],
[59,55],
[60,64],
[61,63],
[62,62],
[63,61],
[64,60],
[65,67],
[66,66],
[67,65],
]
mapping84 =[
[0,32],
[1,31],
[2,30],
[3,29],
[4,28],
[5,27],
[6,26],
[7,25],
[8,24],
[9,23],
[10,22],
[11,21],
[12,20],
[13,19],
[14,18],
[15,17],
[16,16],
[17,15],
[18,14],
[19,13],
[20,12],
[21,11],
[22,10],
[23,9],
[24,8],
[25,7],
[26,6],
[27,5],
[28,4],
[29,3],
[30,2],
[31,1],
[32,0],
[33,42],
[34,41],
[35,40],
[36,39],
[37,38],
[38,37],
[39,36],
[40,35],
[41,34],
[42,33],
[43,46],
[44,45],
[45,44],
[46,43],
[47,51],
[48,50],
[49,49],
[50,48],
[51,47],
[52,57],
[53,56],
[54,55],
[55,54],
[56,53],
[57,52],
[58,63],
[59,62],
[60,61],
[61,60],
[62,59],
[63,58],
[64,70],
[65,69],
[66,68],
[67,67],
[68,66],
[69,65],
[70,64],
[71,75],
[72,74],
[73,73],
[74,72],
[75,71],
[76,80],
[77,79],
[78,78],
[79,77],
[80,76],
[81,83],
[82,82],
[83,81],
]
if length > 68:
mapping = np.asarray(mapping84)
else :
mapping = np.asarray(mapping)
if type == 1 :
#print("Flippping") #info is 0,1
gt_o = gt.copy()
height, width,_ = input.shape
if info == 0 : #vertical
#print("Flipping vertically ^v")
output = cv2.flip(input,0)
for i in range(length) :
if gt_o[i+length] > (height/2) : #y
gt_o[i+length] = height/2 - (gt[i+length] -(height/2))
if gt_o[i+length] < (height/2) : #y
gt_o[i+length] = height/2 + ((height/2)-gt[i+length])
elif info == 1 : #horizontal
t_map = mapping[:,1]
#gt_o_t = gt.copy()
#print("Flipping Horizontally <- -> ")
#return np.fliplr(input)
output = cv2.flip(input,1)
for i in range(length) :
if gt[i] > (width/2) : #x
#gt_o_t[i] = (width/2) - (gt[i] - (width/2))
gt_o[t_map[i]] = (width/2) - (gt[i] - (width/2))
if gt[i] < (width/2) : #x
#gt_o_t[i] = (width/2) + ((width/2) - gt[i])
gt_o[t_map[i]] = (width/2) + ((width/2) - gt[i])
#get the new index
#gt_o[t_map[i]] = gt_o_t[i]
gt_o[t_map[i]+length] = gt[i+length]
#needs to be transformed.
return [output,gt_o]
elif type == 2 :
#print("Rotate") # info is 1,2,3
#output = np.rot90(input,info)
rows,cols,_ = input.shape
M = cv2.getRotationMatrix2D((cols/2,rows/2),info,1)
output = cv2.warpAffine(input,M,(cols,rows))
gt_o = np.array([gt[:length]-(cols/2),gt[length:]-(rows/2)])
theta = np.radians(-info)
c, s = np.cos(theta), np.sin(theta)
R = np.array(((c,-s), (s, c)))
gt_o = np.dot(R,gt_o)
gt_o = np.concatenate((gt_o[0]+(cols/2),gt_o[1]+(rows/2)),axis = 0)
'''
print R.shape, gt_o.shape
print gt_o.shape'''
return [output,gt_o]
elif type == 3 : #info is 0 to 1
#print("Occlusion")
output = input.copy()
gt_o = gt.copy()
lengthW = 0.5
lengthH = 0.4
s_row = 15
s_col = 7
imHeight,imWidth,_ = input.shape
#Now filling the occluder
l_w = imHeight//s_row
l_h = imWidth//s_col
for ix in range(s_row):
for jx in range(s_col):
#print ix,jx,l_w,l_h
#y1:y2, x1:x2
#print(ix*b_size,outerH ,jx*b_size,outerW,'--',outerImgH,',',outerImgW )
#print(ix*l_w,ix*l_w+l_w ,jx*l_h,jx*l_h+l_h )
output[ix*l_w:ix*l_w+int(l_w*lengthH) ,jx*l_h:jx*l_h+int(l_h*lengthW) ] = np.full([int(l_w*lengthH),int(l_h*lengthW),3],255)
return [output,gt_o]
def calcListNormalizedDistance(pred,gt):
'''
input :
pred : num_images,num points
gt : num_images, num points
'''
err = np.zeros(len(pred))
print((pred.shape))
num_points = pred.shape[2]
for i in range(len(pred)) :
if num_points == 68 :
i_d = np.sqrt(np.square(pred[i,36] - gt[i,45]))
else :
i_d = np.sqrt(np.square(pred[i,19] - gt[i,28]))
sum = 0
for j in range(num_points) :
sum += np.sqrt(np.square(pred[i,j]-gt[i,j]))
err[i] = sum/(num_points * i_d)
return err
def calcNormalizedDistance(pred,gt):
'''
input :
pred : 1,num points
gt : 1, num points
'''
num_points = pred.shape[0]
#print(num_points)
'''if num_points == 68*2 :
i_d = np.sqrt(np.square(pred[36] - gt[45]) + np.square(pred[36+68] - gt[45+68]))
else :
i_d = np.sqrt(np.square(pred[19] - gt[28]) + np.square(pred[19+68] - gt[28+68]))
'''
if num_points == 68*2 :
i_d = np.sqrt(np.square(gt[36] - gt[45]) + np.square(gt[36+68] - gt[45+68]))
else :
i_d = np.sqrt(np.square(gt[19] - gt[28]) + np.square(gt[19+68] - gt[28+68]))
t_sum = 0
num_points_norm = num_points//2
for j in range(num_points_norm) :
t_sum += np.sqrt(np.square(pred[j]-gt[j])+np.square(pred[j+num_points_norm]-gt[j+num_points_norm]))
err = t_sum/(num_points_norm * i_d)
return err
#assumes p_a and p_b are both positive numbers that sum to 100
def myRand(a, p_a, b, p_b):
return a if random.uniform(0,100) < p_a else b
def calcLandmarkErrorListTF(pred,gt):
all_err = []
batch = pred.get_shape()[0]
seq = pred.get_shape()[1]
for i in range(batch) :
for z in range(seq):
bb = get_bb_tf(gt[i,z,0:68],gt[i,z,68:])
width = tf.abs(bb[2] - bb[0])
height = tf.abs(bb[3] - bb[1])
gt_bb = tf.sqrt(tf.square(width) + tf.square(height))
num_points = pred.get_shape()[2]
num_points_norm = num_points//2
sum = []
for j in range(num_points_norm) :
sum.append( tf.sqrt(tf.square(pred[i,z,j]-gt[i,z,j])+tf.square(pred[i,z,j+num_points_norm]-gt[i,z,j+num_points_norm])))
err = tf.divide(tf.stack(sum),gt_bb*num_points_norm)
all_err.append(err)
return tf.reduce_mean(tf.stack(all_err))
def calcLandmarkError(pred,gt): #for 300VW
'''
input :
pred : 1,num points
gt : 1, num points
according to IJCV
Normalized by bounding boxes
'''
#print pred,gt
num_points = pred.shape[0]
num_points_norm = num_points//2
bb = get_bb(gt[:68],gt[68:])
#print(gt)
#print(bb)
'''width = np.abs(bb[2] - bb[0])
height = np.abs(bb[3] - bb[1])
gt_bb = np.sqrt(np.square(width) + np.square(height))
print("1 : ",width,height,gt_bb)'''
width = np.abs(bb[2] - bb[0])
height = np.abs(bb[3] - bb[1])
gt_bb = math.sqrt((width*width) +(height*height))
#print("2 : ",width,height,(width^2) +(height^2),gt_bb)
'''print(bb)
print(gt_bb)
print("BB : ",gt)
print("pred : ",pred)'''
'''print(num_points_norm)
print("BB : ",bb)
print("GT : ",gt)
print("PR : ",pred)'''
#print(num_points)
'''error = np.mean(np.sqrt(np.square(pred-gt)))/gt_bb
return error'''
summ = 0
for j in range(num_points_norm) :
#summ += np.sqrt(np.square(pred[j]-gt[j])+np.square(pred[j+num_points_norm]-gt[j+num_points_norm]))
summ += math.sqrt(((pred[j]-gt[j])*(pred[j]-gt[j])) + ((pred[j+num_points_norm]-gt[j+num_points_norm])*(pred[j+num_points_norm]-gt[j+num_points_norm])))
#err = summ/(num_points_norm * (gt_bb))
err = summ/(num_points_norm*gt_bb)
return err
def showGates(tg = None, batch_index_to_see = 0, n_to_see = 64, n_neurons = 1024,toShow = False, toSave = False, fileName = "gates.jpg"):
#Total figure : 1024/64 data per image : 16 row per gate then *6 gate : 96
t_f_row = n_neurons/n_to_see
n_column = 6
fig = plt.figure()
for p_i in range(t_f_row) :
inputGate = tg[:,0,batch_index_to_see,p_i * n_to_see:p_i*n_to_see+n_to_see] #all sequence, gate 1, batch 0, 200 neurons
newInputGate= tg[:,1,batch_index_to_see,p_i * n_to_see:p_i*n_to_see+n_to_see] #all sequence, gate 2, batch 0, 200 neurons
forgetGate = tg[:,2,batch_index_to_see,p_i * n_to_see:p_i*n_to_see+n_to_see] #all sequence, gate 3, batch 0, 200 neurons
outputGate = tg[:,3,batch_index_to_see,p_i * n_to_see:p_i*n_to_see+n_to_see] #all sequence, gate 4, batch 0, 200 neurons
cellState = tg[:,4,batch_index_to_see,p_i * n_to_see:p_i*n_to_see+n_to_see]
outputState = tg[:,5,batch_index_to_see,p_i * n_to_see:p_i*n_to_see+n_to_see]
#print p_i
ax = fig.add_subplot(t_f_row,n_column,p_i*(n_column) + 1)
if p_i == 0 :
ax.set_title('Input Gate')
plt.imshow(inputGate,vmin=0,vmax=1)
'''
for temp in inputGate :
for temp2 in temp :
if temp2 < 0 :
print temp2'''
ax = fig.add_subplot(t_f_row,n_column,p_i*(n_column) + 2)
if p_i == 0 :
ax.set_title('New Input Gate')
plt.imshow(newInputGate,vmin=0,vmax=1)
ax = fig.add_subplot(t_f_row,n_column,p_i*(n_column) + 3)
if p_i == 0 :
ax.set_title('Forget Gate')
plt.imshow(forgetGate,vmin=0,vmax=1)
ax = fig.add_subplot(t_f_row,n_column,p_i*(n_column) + 4)
if p_i == 0 :
ax.set_title('Output Gate')
plt.imshow(outputGate,vmin=0,vmax=1)
ax = fig.add_subplot(t_f_row,n_column,p_i*(n_column) + 5)
if p_i == 0 :
ax.set_title('Cell State')
plt.imshow(cellState,vmin=0,vmax=1)
ax = fig.add_subplot(t_f_row,n_column,p_i*(n_column) + 6)
if p_i == 0 :
ax.set_title('Output')
plt.imshow(outputState,vmin=0,vmax=1)
#plt.colorbar(orientation='vertical')
if toShow :
plt.show()
if toSave :
fig.savefig(fileName)
def get_list_heatmap(center,cov,image_size_x,image_size_y,percent_radius,exact_radius = None) :
radius_x = int(image_size_x * percent_radius)
radius_y = int(image_size_y * percent_radius)
#print(radius_x,radius_y)
l_cd = []
t_radius_x = radius_x
t_radius_y = radius_y
if t_radius_x <= 0 :
t_radius_x = 1
if t_radius_y <= 0 :
t_radius_y = 1
if exact_radius is not None :
t_radius_x = cov
t_radius_y = cov
#print(t_radius_x,t_radius_y,"radius")
for x in range(center-t_radius_x,center+t_radius_x) :
'''print((center-x)/t_radius_y)
print(math.acos((center-x)/t_radius_y))
print(math.sin(math.acos((center-x)/t_radius_y)))'''
yspan = t_radius_y*math.sin(math.acos(inBoundN((center-x)/t_radius_y,-1,1)));
for y in range (int(center-yspan),int(center+yspan)) :
l_cd.append([x,y])
l_cd = np.asarray(l_cd)
mean = [center,center]
if cov is None :
rv = multivariate_normal.pdf(l_cd,mean = mean, cov = [t_radius_x,t_radius_y])
else :
rv = multivariate_normal.pdf(l_cd,mean = mean, cov = [cov,cov])
return l_cd,rv
def get_bb(x_list, y_list, length = 68,swap = False,adding = 0,adding_xmin=None, adding_xmax = None,adding_ymin = None, adding_ymax = None,show=False):
#print x_list,y_list
xMin = 999999;xMax = -9999999;yMin = 9999999;yMax = -99999999;
if show :
print(x_list, y_list)
for i in range(length): #x
if xMin > x_list[i]:
xMin = int(x_list[i])
if xMax < x_list[i]:
xMax = int(x_list[i])
if yMin > y_list[i]:
yMin = int(y_list[i])
if yMax < y_list[i]:
yMax = int(y_list[i])
#if show :
# print("ymin : ",yMin,'ymax : ',yMax)
l_x = xMax - xMin
l_y = yMax - yMin
#print(xMin,xMax,yMin,yMax)
if swap :
return [xMin,xMax,yMin,yMax]
else :
if adding_xmin is None:
if show :
print("return ",[xMin-adding*l_x,yMin-adding*l_y,xMax+adding*l_x,yMax+adding*l_y])
return [xMin-adding*l_x,yMin-adding*l_y,xMax+adding*l_x,yMax+adding*l_y]
else :
return [xMin+adding_xmin*l_x,yMin+adding_ymin*l_y,xMax+adding_xmax*l_x,yMax+adding_ymax*l_y]
def get_bb_tf(x_list, y_list, length = 68,adding = 0, axMin = None, axMax = None, ayMin = None, ayMax = None):
#print x_list,y_list
xMin = tf.constant(999999.0);xMax = tf.constant(-9999999.0);yMin = tf.constant(9999999.0);yMax = tf.constant(-99999999.0);
for i in range(length): #x
xMin = tf.minimum(x_list[i],xMin)
xMax = tf.maximum(x_list[i],xMax)
yMin = tf.minimum(y_list[i],yMin)
yMax = tf.maximum(y_list[i],yMax)
l_x = xMax - xMin
l_y = yMax - yMin
#adding ranging from 0 to 1
if axMin is None :
return xMin-adding*l_x,yMin-adding*l_y,xMax+adding*l_x,yMax+adding*l_y
else :
return xMin+axMin*l_x,yMin+ayMin*l_y,xMax+axMax*l_x,yMax+ayMax*l_y
def padding(image):
def get_padding_size(image):
h, w, _ = image.shape
longest_edge = max(h, w)
top, bottom, left, right = (0, 0, 0, 0)
if h < longest_edge:
dh = longest_edge - h
top = dh // 2
bottom = dh - top
elif w < longest_edge:
dw = longest_edge - w
left = dw // 2
right = dw - left
else:
pass
return top, bottom, left, right
top, bottom, left, right = get_padding_size(image)
BLACK = [0, 0, 0]
constant = cv2.copyMakeBorder(image, top , bottom, left, right, cv2.BORDER_CONSTANT, value=BLACK)
return constant
def get_bb_face(seq_size=2,synthetic = False,path= "images/bb/"):
list_gt = []
list_labels = []
list_labels_t = []
for f in file_walker.walk(curDir +path):
#print(f.name, f.full_path) # Name is without extension
if f.isDirectory: # Check if object is directory
for sub_f in f.walk():
if sub_f.isFile:
if('txt' in sub_f.full_path):
#print(sub_f.name, sub_f.full_path) #this is the groundtruth
list_labels_t.append(sub_f.full_path)
if sub_f.isDirectory: # Check if object is directory
list_img = []
for sub_sub_f in sub_f.walk(): #this is the image
list_img.append(sub_sub_f.full_path)
list_gt.append(sorted(list_img))
list_gt = sorted(list_gt)
list_labels_t = sorted(list_labels_t)
for lbl in list_labels_t :
with open(lbl) as file:
x = [re.split(r',+',l.strip()) for l in file]
y = [ list(map(int, i)) for i in x]
list_labels.append(y)
if seq_size is not None :
list_images = []
list_ground_truth = []
for i in range(0,len(list_gt)):
counter = 0
for j in range(0,int(len(list_gt[i])/seq_size)):
temp = []
temp2 = []
for z in range(counter,counter+seq_size):
temp.append(list_gt[i][z])
#temp2.append([list_labels[i][z][2],list_labels[i][z][3],list_labels[i][z][0],list_labels[i][z][1],list_labels[i][z][0]+list_labels[i][z][2],list_labels[i][z][1]+list_labels[i][z][3]])
if not synthetic :
temp2.append([list_labels[i][z][0],list_labels[i][z][1],list_labels[i][z][0]+list_labels[i][z][2],list_labels[i][z][1]+list_labels[i][z][3]])
else :
#temp2.append([list_labels[i][z][0],list_labels[i][z][1],list_labels[i][z][2],list_labels[i][z][3]])
temp2.append([list_labels[i][z][0],list_labels[i][z][1],list_labels[i][z][2],list_labels[i][z][3]])
counter+=seq_size
#print counter
list_images.append(temp)
list_ground_truth.append(temp2)
else :
list_images = []
list_ground_truth = []
for i in range(0,len(list_gt)): #per folder
temp = []
temp2 = []
for j in range(0,len(list_gt[i])):#per number of seq * number of data/seq_siz
temp.append(list_gt[i][j])
#temp2.append([list_labels[i][z][2],list_labels[i][z][3],list_labels[i][z][0],list_labels[i][z][1],list_labels[i][z][0]+list_labels[i][z][2],list_labels[i][z][1]+list_labels[i][z][3]])
if not synthetic :
temp2.append([list_labels[i][j][0],list_labels[i][j][1],list_labels[i][j][0]+list_labels[i][j][2],list_labels[i][j][1]+list_labels[i][j][3]])
else :
#temp2.append([list_labels[i][z][0],list_labels[i][z][1],list_labels[i][z][2],list_labels[i][z][3]])
temp2.append([list_labels[i][j][0],list_labels[i][j][1],list_labels[i][j][2],list_labels[i][j][3]])
list_images.append(temp)
list_ground_truth.append(temp2)
'''
print len(list_images)
print len(list_ground_truth)
print (list_images[0])
print (list_ground_truth[0])
img = cv2.imread(list_images[0][0])
cv2.rectangle(img,(list_ground_truth[0][0][2],list_ground_truth[0][0][3]),(list_ground_truth[0][0][4],list_ground_truth[0][0][5]),(255,0,255),1)
cv2.imshow('jim',img)
cv2.waitKey(0)
'''
return[list_images,list_ground_truth]#2d list of allsize, seqlength, (1 for image,6 for bb)
def makeGIF(files,filename):
import imageio
image = []
for i in files :
cv2_im = cv2.cvtColor(i,cv2.COLOR_BGR2RGB)
image.append(cv2_im)
#pil_im = Image.fromarray(cv2_im)
#print np.asarray(image).shape
imageio.mimsave(filename,image,'GIF')
def get_kp_face_temp(seq_size=None,data_list = ["300VW-Train"],per_folder = False,n_skip = 1,is3D = False,is84 = False, dir_name = None,theCurDir = None):
list_gt = []
list_labels_t = []
list_labels = []
if theCurDir is not None :
theDir = theCurDir
else :
theDir = curDir + "images/"
counter_image = 0
i = 0
if dir_name is not None :
annot_name = dir_name
else :
if is84 :
annot_name = 'annot84'
elif is3D :
annot_name = 'annot2'
else :
annot_name = 'annot'
for data in data_list :
print(("Opening "+data))
for f in file_walker.walk(theDir):
if f.isDirectory: # Check if object is directory
print((f.name, f.full_path)) # Name is without extension
for sub_f in f.walk():
if sub_f.isDirectory: # Check if object is directory
list_dta = []
#print sub_f.name
for sub_sub_f in sub_f.walk(): #this is the data
if(".npy" not in sub_sub_f.full_path):
list_dta.append(sub_sub_f.full_path)
if(sub_f.name == annot_name) : #If that's annot, add to labels_t
list_labels_t.append(sorted(list_dta))
elif(sub_f.name == 'img'): #Else it is the image
list_gt.append(sorted(list_dta))
counter_image+=len(list_dta)
'''
print len(list_gt[2])
print len(list_labels_t[2])
'''
#print list_gt
#print list_labels_t
print("Now opening keylabels")
for lbl in list_labels_t :
#print lbl
lbl_68 = [] #Per folder
for lbl_sub in lbl :
print(lbl_sub)
if ('pts' in lbl_sub) :
x = []
with open(lbl_sub) as file:
data2 = [re.split(r'\t+',l.strip()) for l in file]
#print data
for i in range(len(data2)) :
if(i not in [0,1,2,len(data2)-1]):
x.append([ float(j) for j in data2[i][0].split()] )
#y = [ list(map(int, i)) for i in x]
#print len(x)
lbl_68.append(x) #1 record
list_labels.append(lbl_68)
#print len(list_gt[2]) #dim : numfolder, num_data
#print len(list_labels[2]) #dim : num_folder, num_data, 68
list_images = []
max_width = max_height = -9999
min_width = min_height = 9999
mean_width = mean_height = 0
print(("Total data : "+str(counter_image)))
print("Now partitioning data if required")
if seq_size is not None :
list_ground_truth = np.zeros([int(counter_image/(seq_size*n_skip)),seq_size,136])
indexer = 0;
for i in range(0,len(list_gt)): #For each dataset
counter = 0
for j in range(0,int(len(list_gt[i])/(seq_size*n_skip))): #for number of data/batchsize
temp = []
temp2 = np.zeros([seq_size,136])
i_temp = 0
for z in range(counter,counter+(seq_size*n_skip),n_skip):#1 to seq_size
temp.append(list_gt[i][z])
temp2[i_temp] = np.array(list_labels[i][z]).flatten('F')
i_temp+=1
list_images.append(temp)
list_ground_truth[indexer] = temp2
indexer += 1
counter+=seq_size*n_skip
#print counter
else :
if per_folder : #divide per folder
print("Per folder")
list_ground_truth = []
indexer = 0;
for i in range(0,len(list_gt)): #For each dataset
temp = []
temp2 = []
for j in range(0,len(list_gt[i]),n_skip): #for number of data/batchsize
#print len(list_gt[i])
#print len(list_labels[i])
#print(list_gt[i][j],list_labels[i][j])
temp.append(list_gt[i][j])
temp2.append(np.array(list_labels[i][j]).flatten('F'))
list_images.append(temp)
list_ground_truth.append(temp2)
else : #make as one long list, for localisation
if dir_name is not None :
list_ground_truth = np.zeros([counter_image,204])
elif is84:
list_ground_truth = np.zeros([counter_image,168])
else :
list_ground_truth = np.zeros([counter_image,136])
indexer = 0;
for i in range(0,len(list_gt)): #For each dataset
for j in range(0,len(list_gt[i]),n_skip): #for number of data
#print(("{}/{} {}/{}".format(i,len(list_gt),j,len(list_gt[i]))))
tmpImage = cv2.imread(list_gt[i][j])
list_images.append(list_gt[i][j])
#print(list_gt[i][j])
list_ground_truth[indexer] = np.array(list_labels[i][j]).flatten('F')
indexer += 1
#print counter
mean_width/= indexer
mean_height/= indexer
return list_images,list_ground_truth,[mean_width,mean_height, min_width,max_width, min_height, max_height]
def get_kp_face(seq_size=None,data_list = ["300VW-Train"],per_folder = False,n_skip = 1,is3D = False,is84 = False, dir_name = None,theCurDir = None):
list_gt = []
list_labels_t = []
list_labels = []
if theCurDir is not None :
theDir = theCurDir
else :
theDir = curDir + "images/"
counter_image = 0
i = 0
if dir_name is not None :
annot_name = dir_name
else :
if is84 :
annot_name = 'annot84'
elif is3D :
annot_name = 'annot2'
else :
annot_name = 'annot'
for data in data_list :
print(("Opening "+data))
for f in file_walker.walk(theDir++data+"/"):
if f.isDirectory: # Check if object is directory
print((f.name, f.full_path)) # Name is without extension
for sub_f in f.walk():
if sub_f.isDirectory: # Check if object is directory
list_dta = []
#print sub_f.name
for sub_sub_f in sub_f.walk(): #this is the data
if(".npy" not in sub_sub_f.full_path):
list_dta.append(sub_sub_f.full_path)
if(sub_f.name == annot_name) : #If that's annot, add to labels_t
list_labels_t.append(sorted(list_dta))
elif(sub_f.name == 'img'): #Else it is the image
list_gt.append(sorted(list_dta))
counter_image+=len(list_dta)
'''
print len(list_gt[2])
print len(list_labels_t[2])
'''
#print list_gt
#print list_labels_t
print("Now opening keylabels")
for lbl in list_labels_t :
#print lbl
lbl_68 = [] #Per folder
for lbl_sub in lbl :
print(lbl_sub)
if ('pts' in lbl_sub) :
x = []
with open(lbl_sub) as file:
data2 = [re.split(r'\t+',l.strip()) for l in file]
#print data
for i in range(len(data2)) :
if(i not in [0,1,2,len(data2)-1]):
x.append([ float(j) for j in data2[i][0].split()] )
#y = [ list(map(int, i)) for i in x]
#print len(x)
lbl_68.append(x) #1 record
list_labels.append(lbl_68)
#print len(list_gt[2]) #dim : numfolder, num_data
#print len(list_labels[2]) #dim : num_folder, num_data, 68
list_images = []
max_width = max_height = -9999
min_width = min_height = 9999
mean_width = mean_height = 0
print(("Total data : "+str(counter_image)))
print("Now partitioning data if required")
if seq_size is not None :
list_ground_truth = np.zeros([int(counter_image/(seq_size*n_skip)),seq_size,136])
indexer = 0;
for i in range(0,len(list_gt)): #For each dataset
counter = 0
for j in range(0,int(len(list_gt[i])/(seq_size*n_skip))): #for number of data/batchsize
temp = []
temp2 = np.zeros([seq_size,136])
i_temp = 0
for z in range(counter,counter+(seq_size*n_skip),n_skip):#1 to seq_size
temp.append(list_gt[i][z])
temp2[i_temp] = np.array(list_labels[i][z]).flatten('F')
i_temp+=1
list_images.append(temp)
list_ground_truth[indexer] = temp2
indexer += 1
counter+=seq_size*n_skip
#print counter
else :
if per_folder : #divide per folder
print("Per folder")
list_ground_truth = []
indexer = 0;
for i in range(0,len(list_gt)): #For each dataset
temp = []
temp2 = []
'''print(len(list_gt[i]))
print(list_gt[i][0])
print(len(list_labels[i]))'''
for j in range(0,len(list_gt[i]),n_skip): #for number of data/batchsize
#print len(list_gt[i])
#print len(list_labels[i])
#print(list_gt[i][j],list_labels[i][j])
temp.append(list_gt[i][j])
temp2.append(np.array(list_labels[i][j]).flatten('F'))
list_images.append(temp)
list_ground_truth.append(temp2)
else : #make as one long list, for localisation
if dir_name is not None :
list_ground_truth = np.zeros([counter_image,204])
elif is84:
list_ground_truth = np.zeros([counter_image,168])
else :
list_ground_truth = np.zeros([counter_image,136])
indexer = 0;
for i in range(0,len(list_gt)): #For each dataset
for j in range(0,len(list_gt[i]),n_skip): #for number of data
#print(("{}/{} {}/{}".format(i,len(list_gt),j,len(list_gt[i]))))
tmpImage = cv2.imread(list_gt[i][j])
'''height, width, channels = tmpImage.shape
mean_width+=width;
mean_height+=height;
if max_width<width :
max_width = width
if max_height<height :
max_height = height
if min_width>width :
min_width = width
if min_height>height :
min_height = height'''
list_images.append(list_gt[i][j])
#print(list_gt[i][j])
list_ground_truth[indexer] = np.array(list_labels[i][j]).flatten('F')
indexer += 1
#print counter
mean_width/= indexer
mean_height/= indexer
'''
im_width = 240
im_height = 180
img = cv2.imread(list_images[500])
height, width, channels = img.shape
img = cv2.resize(img,(im_width,im_height))
ratioWidth = truediv(im_width,width)
ratioHeight = truediv(im_height,height)
print ratioWidth,im_width,width
print ratioHeight,im_height,height
x_list = list_ground_truth[500,0:68] * ratioWidth
y_list = list_ground_truth[500,68:136] * ratioHeight
#getting the bounding box of x and y
bb = get_bb(x_list,y_list)
cv2.rectangle(img,(bb[0],bb[1]),(bb[2],bb[3]),(255,0,255),1)
for i in range(68) :
cv2.circle(img,(int(x_list[i]),int(y_list[i])),3,(0,0,255))
cv2.imshow('jim',img)
cv2.waitKey(0)'''
return list_images,list_ground_truth,[mean_width,mean_height, min_width,max_width, min_height, max_height]
def get_kp_face_localize(seq_size=None,data = "300W/01_Indoor"):
list_gt = []
list_labels_t = []
list_labels = []
counter_image = 0
i = 0
print(("Opening "+data))
for f in file_walker.walk(curDir + "images/"+data+"/"):
print((f.name, f.full_path)) # Name is without extension
if f.isDirectory: # Check if object is directory
for sub_f in f.walk():
if sub_f.isDirectory: # Check if object is directory
list_dta = []
#print sub_f.name
for sub_sub_f in sub_f.walk(): #this is the data
list_dta.append(sub_sub_f.full_path)
if(sub_f.name == 'annot') : #If that's annot, add to labels_t
list_labels_t.append(sorted(list_dta))
elif(sub_f.name == 'img'): #Else it is the image
list_gt.append(sorted(list_dta))
counter_image+=len(list_dta)
'''
print len(list_gt[2])
print len(list_labels_t[2])
'''
print("Now opening keylabels")
for lbl in list_labels_t :
#print lbl
lbl_68 = [] #Per folder
for lbl_sub in lbl :
#print lbl_sub
if ('pts' in lbl_sub) :
x = []
with open(lbl_sub) as file:
data = [re.split(r'\t+',l.strip()) for l in file]
#print data
for i in range(len(data)) :
if(i not in [0,1,2,len(data)-1]):
x.append([ float(j) for j in data[i][0].split()] )
#y = [ list(map(int, i)) for i in x]
#print len(x)
lbl_68.append(x) #1 record
list_labels.append(lbl_68)
#print len(list_gt[2]) #dim : numfolder, num_data
#print len(list_labels[2]) #dim : num_folder, num_data, 68
list_images = []
list_ground_truth = []
max_width = max_height = -9999
min_width = min_height = 9999
mean_width = mean_height = 0
print(("Total data : "+str(counter_image)))
print("Now partitioning data if required")
indexer = 0;
if seq_size is None :
for i in range(0,len(list_gt)): #For each dataset
temp = []
temp2 = []
for j in range(0,len(list_gt[i])): #for number of data/batchsize
t_temp = []
t_temp2 = []
for k in range (2) :
t_temp.append(list_gt[i][j])
t_temp2.append(np.array(list_labels[i][j]).flatten('F'))
temp.append(t_temp)
temp2.append(t_temp2)
list_images.append(temp)
list_ground_truth.append(temp2)
else :
for i in range(0,len(list_gt)): #For each dataset
for j in range(0,len(list_gt[i])): #for number of data/batchsize
t_temp = []
t_temp2 = []
for k in range (2) :
t_temp.append(list_gt[i][j])
t_temp2.append(np.array(list_labels[i][j]).flatten('F'))
list_images.append(t_temp)
list_ground_truth.append(t_temp2)
#print list_images
return list_images,list_ground_truth,[mean_width,mean_height, min_width,max_width, min_height, max_height]
def write_kp_file(finalTargetL,arr,length = 68):
file = open(finalTargetL,'w')
file.write('version: 1\n')
file.write('n_points: '+str(length)+'\n')
file.write('{\n')
for j in range(length) :
file.write(str(arr[j])+' '+str(arr[j+length])+'\n')
file.write('}')
file.close()
def writeLdmarkFile(fileName, ldmark):
file = open(fileName,'w')
file.write('version: 1\n')
file.write('n_points: 68\n')
file.write('{\n')
for i in range(68) :
file.write(str(ldmark[i])+' '+str(ldmark[i+68])+'\n')
file.write('}')
file.close()
return
def test():
import numpy as np
#z = np.array(((1,1),(-1,1),(-1,-1),(1,-1),(1,1)))
v = z[:,0]
a = z[:,1]
vc = (v>0).astype(int)
ac = (a>0).astype(int)
q0 = (vc+ac>1).astype(int)*1
vc = (v<0).astype(int)
ac = (a>0).astype(int)
q1 = (vc+ac>1).astype(int)*2
vc = (v<0).astype(int)
ac = (a<0).astype(int)
q2 = (vc+ac>1).astype(int)*3
vc = (v>0).astype(int)
ac = (a<0).astype(int)
q3 = (vc+ac>1).astype(int)*4
qtotal = (q0+q1+q2+q3)-1
print(q0,q1,q2,q3)
print(qtotal)
def toQuadrant(z):
v = z[:,0]
a = z[:,1]
vc = (v>0).astype(int)
ac = (a>0).astype(int)
q0 = (vc+ac>1).astype(int)*1
vc = (v<0).astype(int)
ac = (a>0).astype(int)
q1 = (vc+ac>1).astype(int)*2
vc = (v<0).astype(int)
ac = (a<0).astype(int)
q2 = (vc+ac>1).astype(int)*3
vc = (v>0).astype(int)
ac = (a<0).astype(int)
q3 = (vc+ac>1).astype(int)*4
qtotal = (q0+q1+q2+q3)-1
#print(q0,q1,q2,q3)
#print(qtotal)
return(qtotal)
#test() | 78,260 | 31.676827 | 204 | py |
Seq-Att-Affect | Seq-Att-Affect-master/model.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from operator import truediv
class Combiner(nn.Module):
"""Combiner based on discriminator"""
def __init__(self, image_size=128, conv_dim=64, c_dim=5, repeat_num=4, inputC = 3):
super(Combiner, self).__init__()
self.tanh = nn.Tanh()
self.relu = nn.ReLU()
self.lrelu = nn.LeakyReLU(0.01)
self.conv1 = nn.Conv2d(inputC, conv_dim, kernel_size=4, stride=2, padding=1)
'''curr_dim = conv_dim
self.conv21 = nn.Conv2d(curr_dim, curr_dim*2, kernel_size=4, stride=2, padding=1)
curr_dim = curr_dim * 2
self.conv22 = nn.Conv2d(curr_dim, curr_dim*2, kernel_size=4, stride=2, padding=1)
curr_dim = curr_dim * 2
kernel_size = int(inputC / np.power(2, repeat_num))
print('kernelsize : ',kernel_size)
self.conv31 = nn.Conv2d(curr_dim, 1, kernel_size=3, stride=1, padding=1, bias=False)
self.conv32 = nn.Conv2d(curr_dim, c_dim, kernel_size=kernel_size, bias=False)
self.linear1 = nn.Linear(46, 112)'''
curr_dim = conv_dim
self.conv21 = nn.Conv2d(curr_dim, curr_dim*2, kernel_size=4, stride=2, padding=1)
curr_dim = curr_dim * 2
self.conv22 = nn.Conv2d(curr_dim, curr_dim*2, kernel_size=4, stride=2, padding=1)
curr_dim = curr_dim * 2
self.conv23 = nn.Conv2d(curr_dim, curr_dim*2, kernel_size=4, stride=2, padding=1)
curr_dim = curr_dim * 2
self.conv24 = nn.Conv2d(curr_dim, curr_dim*2, kernel_size=4, stride=2, padding=1)
curr_dim = curr_dim * 2
kernel_size = int(inputC / np.power(2, repeat_num))
self.l1 = nn.Linear(curr_dim, c_dim)
#self.conv31 = nn.Conv2d(curr_dim, 1, kernel_size=3, stride=1, padding=1, bias=False)
#self.conv32 = nn.Conv2d(curr_dim, c_dim, kernel_size=kernel_size, bias=False)
################################################################
def forward(self, x, s = None, z = None ):
'''debug = False
if s is not None :
s2 = self.tanh(self.linear1(s))
#print(s2)
#print(s2.shape)
s2 = torch.unsqueeze(torch.unsqueeze(s2,1),2)
#print(s2.shape,s2.size(0))
#s2 = s2.repeat(1, 1, x.size(2), x.size(3))
s2 = s2.expand(s2.size(0),1,112,112)
#print(s2.shape,x.shape)
#print(x)
x = torch.cat([x, s2], dim=1)
x1 = self.lrelu(self.conv1(x))
#print('x1s',x1.shape)
if z is not None :
#print('combining')
x21 = self.lrelu(self.conv21(x1))+z
else :
x21 = self.lrelu(self.conv21(x1))
h = self.lrelu(self.conv22(x21))
#h = self.lrelu(self.conv23(x22))
if debug :
print('D-x',x.shape)
print('D-x0',x1.shape)
print('x1',x1.shape)
print('x21',x21.shape)
#print('x22',x22.shape)
print('xh',h.shape)
#h = self.main(x)
out_src = self.conv31(h)
tmp = self.conv32(h)
out_cls = tmp
return out_src, out_cls.view(out_cls.size(0), out_cls.size(1))'''
debug = False
#print(x.shape)
x1 = self.lrelu(self.conv1(x))
x21 = self.lrelu(self.conv21(x1))
x22 = self.lrelu(self.conv22(x21))
x23 = self.lrelu(self.conv23(x22))
x24 = self.lrelu(self.conv24(x23))
x25 = x24.view(x24.size(0),x24.size(1))
if debug :
print(x.shape)
print(x1.shape)
print(x21.shape)
print(x22.shape)
print(x23.shape)
print(x24.shape)
'''print(x24.shape)
h = self.lrelu(self.conv25(x24))'''
out = self.l1(x25)
return out
#h = self.main(x)
'''out_src = self.conv31(h)
tmp = self.conv32(h)
out_cls = tmp
return out_src, out_cls.view(out_cls.size(0), out_cls.size(1))'''
class CombinerSeq(nn.Module):
"""Combiner based on discriminator"""
def __init__(self, image_size=128, conv_dim=64, c_dim=5, repeat_num=4, inputC = 3,lstmNeuron = 512,seq_length = 2,batch_length = 10):
super(CombinerSeq, self).__init__()
self.tanh = nn.Tanh()
self.relu = nn.ReLU()
self.lrelu = nn.LeakyReLU(0.01)
self.btl = batch_length
self.sql = seq_length
self.dim_out = c_dim
self.conv1 = nn.Conv2d(inputC, conv_dim, kernel_size=4, stride=2, padding=1)
curr_dim = conv_dim
self.conv21 = nn.Conv2d(curr_dim, curr_dim*2, kernel_size=4, stride=2, padding=1)
curr_dim = curr_dim * 2
self.conv22 = nn.Conv2d(curr_dim, curr_dim*2, kernel_size=4, stride=2, padding=1)
curr_dim = curr_dim * 2
self.conv23 = nn.Conv2d(curr_dim, curr_dim*2, kernel_size=4, stride=2, padding=1)
curr_dim = curr_dim * 2
self.conv24 = nn.Conv2d(curr_dim, curr_dim*2, kernel_size=4, stride=2, padding=1)
curr_dim = curr_dim * 2
self.lstmNeuron = lstmNeuron
self.l1 = nn.LSTM(curr_dim, self.lstmNeuron)
self.l2 = nn.Linear(self.lstmNeuron, self.dim_out)
self.initialize(self.btl)
################################################################
def forward(self, x, s = None, z = None ):
debug = False
#print(x.shape)
x1 = self.lrelu(self.conv1(x))
x21 = self.lrelu(self.conv21(x1))
x22 = self.lrelu(self.conv22(x21))
x23 = self.lrelu(self.conv23(x22))
x24 = self.lrelu(self.conv24(x23))
x25 = x24.view(x24.size(0),x24.size(1))
if debug :
print(x.shape)
print(x1.shape)
print(x21.shape)
print(x22.shape)
print(x23.shape)
print(x24.shape)
x25 = torch.unsqueeze(x25,0)
x3,self.linear1_hdn = self.l1(x25,self.linear1_hdn)
out = self.l2(torch.squeeze(x3,0))
return out;
def initialize(self,batch_size = 10):
self.linear1_hdn = (torch.zeros(1, batch_size, self.lstmNeuron).cuda(),torch.zeros(1, batch_size, self.lstmNeuron).cuda())
class CombinerSeqAttReplace(nn.Module):
"""Combiner based on discriminator"""
def __init__(self, image_size=128, conv_dim=64, c_dim=5, repeat_num=4, inputC = 3,
lstmNeuron = 512,seq_length = 2,batch_length = 10, useCH=0):
super(CombinerSeqAttReplace, self).__init__()
self.tanh = nn.Tanh()
self.relu = nn.ReLU()
self.lrelu = nn.LeakyReLU(0.01)
self.btl = batch_length
self.sql = seq_length
self.dim_out = c_dim
self.useCH = useCH
self.conv1 = nn.Conv2d(inputC, conv_dim, kernel_size=4, stride=2, padding=1)
curr_dim = conv_dim
self.conv21 = nn.Conv2d(curr_dim, curr_dim*2, kernel_size=4, stride=2, padding=1)
curr_dim = curr_dim * 2
self.conv22 = nn.Conv2d(curr_dim, curr_dim*2, kernel_size=4, stride=2, padding=1)
curr_dim = curr_dim * 2
self.conv23 = nn.Conv2d(curr_dim, curr_dim*2, kernel_size=4, stride=2, padding=1)
curr_dim = curr_dim * 2
self.conv24 = nn.Conv2d(curr_dim, curr_dim*2, kernel_size=4, stride=2, padding=1)
curr_dim = curr_dim * 2
self.lstmNeuron = lstmNeuron
if self.useCH :
self.attn = nn.Linear(2*(self.lstmNeuron + self.lstmNeuron), 1)
else :
self.attn = nn.Linear(self.lstmNeuron + self.lstmNeuron, 1)
self.lstm1 = nn.LSTM(curr_dim, self.lstmNeuron)
self.l2 = nn.Linear(self.lstmNeuron, self.dim_out)
self.initialize(self.btl)
################################################################
def forward(self, x, s = None, z = None, prev_h = None, ret_w=False ):
debug = False
#print(x.shape)
x1 = self.lrelu(self.conv1(x))
x21 = self.lrelu(self.conv21(x1))
x22 = self.lrelu(self.conv22(x21))
x23 = self.lrelu(self.conv23(x22))
x24 = self.lrelu(self.conv24(x23))
x25 = x24.view(x24.size(0),x24.size(1))
if debug :
print(x.shape)
print(x1.shape)
print(x21.shape)
print(x22.shape)
print(x23.shape)
print(x24.shape)
#x25 = torch.unsqueeze(x25,0)
input_lstm = torch.unsqueeze(x25,0)
normalized_weights = []
#Now add the attention if required
if prev_h is not None :
prev_h = torch.stack(prev_h)
if debug :
print('prevh',prev_h.shape)
weights = []
for i in range(len(prev_h)):
if debug :
print(self.lstm1_hdn[0][0].shape)
print(prev_h[0].shape)
if self.useCH :
curHidden = torch.cat((self.lstm1_hdn[0][0],self.lstm1_hdn[1][0]),1)
else :
curHidden = self.lstm1_hdn[0][0]
weights.append(self.attn(torch.cat((curHidden,
prev_h[i]), dim = 1)))
normalized_weights = F.softmax(torch.cat(weights, 1), 1)
if debug :
print(normalized_weights.shape)
if self.useCH :
attn_applied = torch.bmm(normalized_weights.unsqueeze(1),
prev_h.view(prev_h.shape[1], -1, self.lstmNeuron*2))
attn_applied = attn_applied.squeeze(1)
h = attn_applied[:,:self.lstmNeuron].unsqueeze(0)
c = attn_applied[:,self.lstmNeuron:].unsqueeze(1)
#print(h.shape,c.shape)
new_hidden =(h,c)
else :
attn_applied = torch.bmm(normalized_weights.unsqueeze(1),
prev_h.view(prev_h.shape[1], -1, self.lstmNeuron))
#print(attn_applied.shape,self.lstm1_hdn[1].shape)
attn_applied = attn_applied.squeeze(1)
new_hidden = (attn_applied.unsqueeze(0),self.lstm1_hdn[1])
########################
if debug :
print('attn applied',attn_applied.shape)
print('x25',x25.shape)
if debug :
print(input_lstm.shape)
x3,self.lstm1_hdn = self.lstm1(input_lstm,self.lstm1_hdn)
out = self.l2(torch.squeeze(x3,0))
else :
if debug :
print('x25 shape : ',x25.shape)
x3,self.lstm1_hdn = self.lstm1(input_lstm,self.lstm1_hdn)
out = self.l2(torch.squeeze(x3,0))
if ret_w :
return out,normalized_weights;
else :
return out;
def initialize(self,batch_size = 10):
self.lstm1_hdn = (torch.zeros(1, batch_size, self.lstmNeuron).cuda(),torch.zeros(1, batch_size, self.lstmNeuron).cuda())
class CombinerSeqAtt(nn.Module):
"""Combiner based on discriminator"""
def __init__(self, image_size=128, conv_dim=64, c_dim=5, repeat_num=4, inputC = 3,
lstmNeuron = 512,seq_length = 2,batch_length = 10, useCH=0):
super(CombinerSeqAtt, self).__init__()
self.tanh = nn.Tanh()
self.relu = nn.ReLU()
self.lrelu = nn.LeakyReLU(0.01)
self.btl = batch_length
self.sql = seq_length
self.dim_out = c_dim
self.useCH = useCH
self.conv1 = nn.Conv2d(inputC, conv_dim, kernel_size=4, stride=2, padding=1)
curr_dim = conv_dim
self.conv21 = nn.Conv2d(curr_dim, curr_dim*2, kernel_size=4, stride=2, padding=1)
curr_dim = curr_dim * 2
self.conv22 = nn.Conv2d(curr_dim, curr_dim*2, kernel_size=4, stride=2, padding=1)
curr_dim = curr_dim * 2
self.conv23 = nn.Conv2d(curr_dim, curr_dim*2, kernel_size=4, stride=2, padding=1)
curr_dim = curr_dim * 2
self.conv24 = nn.Conv2d(curr_dim, curr_dim*2, kernel_size=4, stride=2, padding=1)
curr_dim = curr_dim * 2
self.lstmNeuron = lstmNeuron
if self.useCH :
self.attn = nn.Linear(2*(self.lstmNeuron + self.lstmNeuron), 1)
self.lstm1 = nn.LSTM(curr_dim+self.lstmNeuron+self.lstmNeuron, self.lstmNeuron)
else :
self.attn = nn.Linear(self.lstmNeuron + self.lstmNeuron, 1)
self.lstm1 = nn.LSTM(curr_dim+self.lstmNeuron, self.lstmNeuron)
self.l2 = nn.Linear(self.lstmNeuron, self.dim_out)
self.initialize(self.btl)
################################################################
def forward(self, x, s = None, z = None, prev_h = None,ret_w=False ):
debug = False
#print(x.shape)
x1 = self.lrelu(self.conv1(x))
x21 = self.lrelu(self.conv21(x1))
x22 = self.lrelu(self.conv22(x21))
x23 = self.lrelu(self.conv23(x22))
x24 = self.lrelu(self.conv24(x23))
x25 = x24.view(x24.size(0),x24.size(1))
if debug :
print(x.shape)
print(x1.shape)
print(x21.shape)
print(x22.shape)
print(x23.shape)
print(x24.shape)
#x25 = torch.unsqueeze(x25,0)
normalized_weights = []
#Now add the attention if required
if prev_h is not None :
prev_h = torch.stack(prev_h)
if debug :
print('prevh',prev_h.shape)
weights = []
for i in range(len(prev_h)):
if debug :
print(self.lstm1_hdn[0][0].shape)
print(prev_h[0].shape)
if self.useCH :
curHidden = torch.cat((self.lstm1_hdn[0][0],self.lstm1_hdn[1][0]),1)
else :
curHidden = self.lstm1_hdn[0][0]
weights.append(self.attn(torch.cat((curHidden,
prev_h[i]), dim = 1)))
normalized_weights = F.softmax(torch.cat(weights, 1), 1)
if debug :
print(normalized_weights.shape)
if self.useCH :
attn_applied = torch.bmm(normalized_weights.unsqueeze(1),
prev_h.view(prev_h.shape[1], -1, self.lstmNeuron*2))
else :
attn_applied = torch.bmm(normalized_weights.unsqueeze(1),
prev_h.view(prev_h.shape[1], -1, self.lstmNeuron))
########################
if debug :
print('attn applied',attn_applied.shape)
print('x25',x25.shape)
input_lstm = torch.cat((x25,attn_applied.squeeze(1)),dim=1)
if debug :
print(input_lstm.shape)
input_lstm = torch.unsqueeze(input_lstm,0)
x3,self.lstm1_hdn = self.lstm1(input_lstm,self.lstm1_hdn)
out = self.l2(torch.squeeze(x3,0))
else :
if debug :
print('x25 shape : ',x25.shape)
if self.useCH:
input_lstm = torch.cat((x25,torch.zeros(x25.shape[0],self.lstmNeuron*2).cuda()),dim=1)
else :
input_lstm = torch.cat((x25,torch.zeros(x25.shape[0],self.lstmNeuron).cuda()),dim=1)
input_lstm = torch.unsqueeze(input_lstm,0)
x3,self.lstm1_hdn = self.lstm1(input_lstm,self.lstm1_hdn)
out = self.l2(torch.squeeze(x3,0))
if ret_w:
return out,normalized_weights;
else :
return out;
def initialize(self,batch_size = 10):
self.lstm1_hdn = (torch.zeros(1, batch_size, self.lstmNeuron).cuda(),torch.zeros(1, batch_size, self.lstmNeuron).cuda())
class CombinerSeqL(nn.Module):
"""Combiner based on discriminator"""
def __init__(self, image_size=128, conv_dim=64, c_dim=5, repeat_num=4, inputC = 3,lstmNeuron = 512,seq_length = 2,batch_length = 10):
super(CombinerSeqL, self).__init__()
self.tanh = nn.Tanh()
self.relu = nn.ReLU()
self.lrelu = nn.LeakyReLU(0.01)
self.btl = batch_length
self.sql = seq_length
self.dim_out = c_dim
self.conv1 = nn.Conv2d(inputC, conv_dim, kernel_size=4, stride=2, padding=1)
curr_dim = conv_dim
self.conv21 = nn.Conv2d(curr_dim, curr_dim*2, kernel_size=4, stride=2, padding=1)
curr_dim = curr_dim * 2
self.conv22 = nn.Conv2d(curr_dim, curr_dim*2, kernel_size=4, stride=2, padding=1)
curr_dim = curr_dim * 2
self.conv23 = nn.Conv2d(curr_dim, curr_dim*2, kernel_size=4, stride=2, padding=1)
curr_dim = curr_dim * 2
self.conv24 = nn.Conv2d(curr_dim, curr_dim*2, kernel_size=4, stride=2, padding=1)
curr_dim = curr_dim * 2
self.lstmNeuron = lstmNeuron
self.ls1 = nn.LSTM(curr_dim, self.lstmNeuron)
self.ls2 = nn.LSTM(self.lstmNeuron, self.lstmNeuron)
self.l1 = nn.Linear(self.lstmNeuron, self.dim_out)
self.initialize(self.btl)
################################################################
def forward(self, x, s = None, z = None ):
debug = False
#print(x.shape)
x1 = self.lrelu(self.conv1(x))
x21 = self.lrelu(self.conv21(x1))
x22 = self.lrelu(self.conv22(x21))
x23 = self.lrelu(self.conv23(x22))
x24 = self.lrelu(self.conv24(x23))
x25 = x24.view(x24.size(0),x24.size(1))
if debug :
print(x.shape)
print(x1.shape)
print(x21.shape)
print(x22.shape)
print(x23.shape)
print(x24.shape)
x25 = torch.unsqueeze(x25,0)
x3,self.linear1_hdn = self.ls1(x25,self.linear1_hdn)
x3,self.linear2_hdn = self.ls2(x3,self.linear2_hdn)
out = self.l1(torch.squeeze(x3,0))
return out;
def initialize(self,batch_size = 10):
self.linear1_hdn = (torch.zeros(1, batch_size, self.lstmNeuron).cuda(),torch.zeros(1, batch_size, self.lstmNeuron).cuda())
self.linear2_hdn = (torch.zeros(1, batch_size, self.lstmNeuron).cuda(),torch.zeros(1, batch_size, self.lstmNeuron).cuda())
'''
def __init__(self, image_size=128, conv_dim=64, c_dim=5, repeat_num=6):
super(DiscriminatorM, self).__init__()
self.tanh = nn.Tanh()
self.relu = nn.ReLU()
self.lrelu = nn.LeakyReLU(0.01)
self.conv1 = nn.Conv2d(3, conv_dim, kernel_size=4, stride=2, padding=1)
curr_dim = conv_dim
self.conv21 = nn.Conv2d(curr_dim, curr_dim*2, kernel_size=4, stride=2, padding=1)
curr_dim = curr_dim * 2
self.conv22 = nn.Conv2d(curr_dim, curr_dim*2, kernel_size=4, stride=2, padding=1)
curr_dim = curr_dim * 2
self.conv23 = nn.Conv2d(curr_dim, curr_dim*2, kernel_size=4, stride=2, padding=1)
curr_dim = curr_dim * 2
self.conv24 = nn.Conv2d(curr_dim, curr_dim*2, kernel_size=4, stride=2, padding=1)
curr_dim = curr_dim * 2
self.conv25 = nn.Conv2d(curr_dim, curr_dim*2, kernel_size=4, stride=2, padding=1)
curr_dim = curr_dim * 2
kernel_size = int(image_size / np.power(2, repeat_num))
self.conv31 = nn.Conv2d(curr_dim, 1, kernel_size=3, stride=1, padding=1, bias=False)
self.conv32 = nn.Conv2d(curr_dim, c_dim, kernel_size=kernel_size, bias=False)
self.linear41 = nn.Linear(4, c_dim)
def forward(self, x):
x1 = self.lrelu(self.conv1(x))
x21 = self.lrelu(self.conv21(x1))
x22 = self.lrelu(self.conv22(x21))
x23 = self.lrelu(self.conv23(x22))
x24 = self.lrelu(self.conv24(x23))
h = self.lrelu(self.conv25(x24))
#h = self.main(x)
out_src = self.conv31(h)
tmp = self.conv32(h)
out_cls = tmp
return out_src, out_cls.view(out_cls.size(0), out_cls.size(1))'''
class CombiningBottleNeckO(nn.Module):
def __init__(self, dim_in, dim_out,toCombine = False):
'''
input :
1. the z of G
2. the prev results
3. the rought estimation from D
combinations :
1. series of 2d conv
2. linear with previous
'''
super(CombiningBottleNeckO, self).__init__()
'''#from generator
self.conv1 = nn.Conv2d(3+c_dim, conv_dim, kernel_size=7, stride=1, padding=3, bias=False)
self.i1 = nn.InstanceNorm2d(conv_dim, affine=True, track_running_stats=True)
self.relu = nn.ReLU(inplace=True)
layers.append(nn.Conv2d(3+c_dim, conv_dim, kernel_size=7, stride=1, padding=3, bias=False))
layers.append(nn.InstanceNorm2d(conv_dim, affine=True, track_running_stats=True))
layers.append(nn.ReLU(inplace=True))
layers.append(nn.Conv2d(curr_dim, 3, kernel_size=7, stride=1, padding=3, bias=False))
layers.append(nn.Tanh())
#from discriminator
layers.append(nn.Conv2d(3, conv_dim, kernel_size=4, stride=2, padding=1))
layers.append(nn.LeakyReLU(0.01))
self.conv1 = nn.Conv2d(curr_dim, 1, kernel_size=3, stride=1, padding=1, bias=False)
self.conv2 = nn.Conv2d(curr_dim, c_dim, kernel_size=kernel_size, bias=False)
######################'''
self.relu = nn.ReLU(inplace=True)
self.lrelu = nn.LeakyReLU(0.01)
#8x8, 32/4 = 8 . 32x32x256
self.conv1 = nn.Conv2d(dim_in, 512, kernel_size=8, stride=4, padding=2, bias=False)
#4x4, 8/2 = 4
self.conv2 = nn.Conv2d(512, 1024, kernel_size=6, stride=2, padding=1, bias=False)
#4x4, 4/2 = 4
#3x3, 4/2 = 2~1
self.conv3 = nn.Conv2d(1024, 2048, kernel_size=4, stride=2, padding=1, bias=False)
self.toCombine = toCombine
if self.toCombine :
self.linear11 = nn.Linear(2048, 512)
else :
self.linear1 = nn.Linear(2048, 1024)
self.linear2 = nn.Linear(1024, dim_out)
'''self.main = nn.Sequential(
nn.Conv2d(dim_in, dim_out, kernel_size=3, stride=1, padding=1, bias=False),
nn.InstanceNorm2d(dim_out, affine=True, track_running_stats=True),
nn.ReLU(inplace=True),
nn.Conv2d(dim_out, dim_out, kernel_size=3, stride=1, padding=1, bias=False),
nn.InstanceNorm2d(dim_out, affine=True, track_running_stats=True))
self.conv1 = nn.Conv2d(curr_dim, curr_dim*2, kernel_size=4, stride=2, padding=1)
self.conv2 = nn.Conv2d(curr_dim, curr_dim*2, kernel_size=4, stride=2, padding=1)'''
def forward(self, x,y = None):
#print(x.shape)
x1 = self.lrelu(self.conv1(x))
#print(x1.shape)
x2 = self.lrelu(self.conv2(x1))
#print(x2.shape)
x3 = self.lrelu(self.conv3(x2))
#print(x3.shape)
x3 = x3.view(x3.size(0),-1)
#print(x3.shape)
#x4 = self.relu(self.linear1(x3))
if not self.toCombine :
x4 = self.lrelu(self.linear1(x3))
else :
x41 = self.lrelu(self.linear11(x3))
#x42 = torch.tensor(y)
x42 = y.view(y.size(0),1)
#print('x42 : ',x42.shape)
#1, 1, x.size(2), x.size(3)
#print(x41.size(1))
x42 = x42.repeat(1,x41.size(1)).float()
#print(x41.shape,x42.shape)
x4 = torch.cat((x41,x42),1)
x5 = self.linear2(x4)
#print(x4.shape)
return x5
#return x + self.main(x)
class CombiningBottleNeck(nn.Module):
def __init__(self, dim_in, dim_out,toCombine = False):
'''
input :
1. the z of G
2. the prev results
3. the rought estimation from D
combinations :
1. series of 2d conv
2. linear with previous
'''
super(CombiningBottleNeck, self).__init__()
'''#from generator
self.conv1 = nn.Conv2d(3+c_dim, conv_dim, kernel_size=7, stride=1, padding=3, bias=False)
self.i1 = nn.InstanceNorm2d(conv_dim, affine=True, track_running_stats=True)
self.relu = nn.ReLU(inplace=True)
layers.append(nn.Conv2d(3+c_dim, conv_dim, kernel_size=7, stride=1, padding=3, bias=False))
layers.append(nn.InstanceNorm2d(conv_dim, affine=True, track_running_stats=True))
layers.append(nn.ReLU(inplace=True))
layers.append(nn.Conv2d(curr_dim, 3, kernel_size=7, stride=1, padding=3, bias=False))
layers.append(nn.Tanh())
#from discriminator
layers.append(nn.Conv2d(3, conv_dim, kernel_size=4, stride=2, padding=1))
layers.append(nn.LeakyReLU(0.01))
self.conv1 = nn.Conv2d(curr_dim, 1, kernel_size=3, stride=1, padding=1, bias=False)
self.conv2 = nn.Conv2d(curr_dim, c_dim, kernel_size=kernel_size, bias=False)
######################'''
self.relu = nn.ReLU(inplace=True)
self.lrelu = nn.LeakyReLU(0.01)
self.selu = nn.SELU()
#8x8, 32/4 = 8 . 32x32x256
self.conv1 = nn.Conv2d(dim_in, 512, kernel_size=8, stride=4, padding=2, bias=False)
#4x4, 8/2 = 4
self.conv2 = nn.Conv2d(512, 1024, kernel_size=6, stride=2, padding=1, bias=False)
#4x4, 4/2 = 4
#3x3, 4/2 = 2~1
self.conv3 = nn.Conv2d(1024, 2048, kernel_size=4, stride=2, padding=1, bias=False)
self.toCombine = toCombine
if self.toCombine :
self.linear11 = nn.Linear(2048, 512)
else :
self.linear1 = nn.Linear(2048, 1024)
self.linear2 = nn.Linear(1024, dim_out)
'''self.main = nn.Sequential(
nn.Conv2d(dim_in, dim_out, kernel_size=3, stride=1, padding=1, bias=False),
nn.InstanceNorm2d(dim_out, affine=True, track_running_stats=True),
nn.ReLU(inplace=True),
nn.Conv2d(dim_out, dim_out, kernel_size=3, stride=1, padding=1, bias=False),
nn.InstanceNorm2d(dim_out, affine=True, track_running_stats=True))
self.conv1 = nn.Conv2d(curr_dim, curr_dim*2, kernel_size=4, stride=2, padding=1)
self.conv2 = nn.Conv2d(curr_dim, curr_dim*2, kernel_size=4, stride=2, padding=1)'''
def forward(self, x,y = None):
#print(x.shape)
x1 = self.lrelu(self.conv1(x))
#print(x1.shape)
x2 = self.lrelu(self.conv2(x1))
#print(x2.shape)
x3 = self.lrelu(self.conv3(x2))
#print(x3.shape)
x3 = x3.view(x3.size(0),-1)
#print(x3.shape)
#x4 = self.relu(self.linear1(x3))
if not self.toCombine :
x4 = self.lrelu(self.linear1(x3))
else :
x41 = self.lrelu(self.linear11(x3))
#x42 = torch.tensor(y)
x42 = y.view(y.size(0),1)
#print('x42 : ',x42.shape)
#1, 1, x.size(2), x.size(3)
#print(x41.size(1))
x42 = x42.repeat(1,x41.size(1)).float()
#print(x41.shape,x42.shape)
x4 = torch.cat((x41,x42),1)
x5 = self.linear2(x4)
#print(x4.shape)
return x5
#return x + self.main(x)
class CombiningBottleNeckSeq(nn.Module):
def __init__(self, dim_in, dim_out,toCombine = False,batch_length = 10, seq_length = 2, withPrev = False,reduced = False,
lstmNeuron = 512):
'''
input :
1. the z of G
2. the prev results
3. the rought estimation from D
combinations :
1. series of 2d conv
2. linear with previous
'''
super(CombiningBottleNeckSeq, self).__init__()
self.relu = nn.ReLU(inplace=True)
self.lrelu = nn.LeakyReLU(0.01)
self.btl = batch_length
self.sql = seq_length
self.dim_out = dim_out
self.lstmNeuron = lstmNeuron
#8x8, 32/4 = 8 . 32x32x256
self.conv1 = nn.Conv2d(dim_in, 512, kernel_size=8, stride=4, padding=2, bias=False)
#4x4, 8/2 = 4
self.conv2 = nn.Conv2d(512, 1024, kernel_size=6, stride=2, padding=1, bias=False)
#4x4, 4/2 = 4
#3x3, 4/2 = 2~1
self.conv3 = nn.Conv2d(1024, 2048, kernel_size=4, stride=2, padding=1, bias=False)
self.toCombine = toCombine
if self.toCombine :
self.linear11 = nn.Linear(2048, 512)
else :
self.linear1 = nn.Linear(2048, 1024)
self.withPrev = withPrev
if self.withPrev : #this is to use the prev result
self.linear2p = nn.Linear(1024, 512)
self.linear2 = nn.LSTM(1024, self.lstmNeuron)
self.linear3 = nn.Linear(self.lstmNeuron, int(truediv(self.lstmNeuron,2)))
self.linear4 = nn.Linear(int(truediv(self.lstmNeuron,2)), dim_out)
self.initialize(self.btl)
def forward(self, x,y = None, y_prev = None):
#print(x.shape)
x1 = self.lrelu(self.conv1(x))
#print(x1.shape)
x2 = self.lrelu(self.conv2(x1))
#print(x2.shape)
x3 = self.lrelu(self.conv3(x2))
#print(x3.shape)
x3 = x3.view(x3.size(0),-1)
#print(x3.shape)
#x4 = self.relu(self.linear1(x3))
if not self.toCombine :
x4 = self.relu(self.linear1(x3))
else :
x41 = self.relu(self.linear11(x3))
#x42 = torch.tensor(y)
x42 = y.view(y.size(0),1)
#print('x42 : ',x42.shape)
#1, 1, x.size(2), x.size(3)
#print(x41.size(1))
x42 = x42.repeat(1,x41.size(1)).float()
#print(x41.shape,x42.shape)
x4 = torch.cat((x41,x42),1)
if self.withPrev: # y_prev is not None :
x4 = self.relu(self.linear2p(x4))
x43 = y_prev.view(y_prev.size(0),1)
x43 = x43.repeat(1,x4.size(1)).float()
#print(x43.shape,x4.shape,'shape')
x4 = torch.cat((x43,x4),1)
#The input dimensions are (seq_len, batch, input_size).
#print(x4.shape,self.linear2_hdn[0].shape,self.linear2_hdn[1].shape)
x4 = torch.unsqueeze(x4,0)
#print('x4 shape ',x4.shape,self.linear2_hdn[0].shape,self.linear2_hdn[1].shape)
x4,self.linear2_hdn = self.linear2(x4,self.linear2_hdn)
x5 = self.lrelu(self.linear3(torch.squeeze(x4,0)))
x6 = self.linear4(x5)
#print(x4.shape)
return x6
#return x + self.main(x)
def initialize(self,batch_size = 10):
#self.linear2Hidden = (torch.randn(1, 1, 3),
#torch.randn(1, 1, 3))
self.linear2_hdn = (torch.zeros(1, batch_size, self.lstmNeuron).cuda(),torch.zeros(1, batch_size, self.lstmNeuron).cuda())
class ResidualBlock(nn.Module):
"""Residual Block with instance normalization."""
def __init__(self, dim_in, dim_out, use_skip = True):
super(ResidualBlock, self).__init__()
self.main = nn.Sequential(
nn.Conv2d(dim_in, dim_out, kernel_size=3, stride=1, padding=1, bias=False),
nn.InstanceNorm2d(dim_out, affine=True, track_running_stats=True),
nn.ReLU(inplace=True),
nn.Conv2d(dim_out, dim_out, kernel_size=3, stride=1, padding=1, bias=False),
nn.InstanceNorm2d(dim_out, affine=True, track_running_stats=True))
self.use_skip = use_skip
def forward(self, x):
if self.use_skip :
return x + self.main(x)
else :
return self.main(x)
class GeneratorM(nn.Module):
"""Generator network."""
def __init__(self, conv_dim=64, c_dim=5, repeat_num=6, use_skip = True, compressLatent = False):
super(GeneratorM, self).__init__()
self.compressLatent = compressLatent
self.conv1 = nn.Conv2d(3+c_dim, conv_dim, kernel_size=7, stride=1, padding=3, bias=False)
self.i1 = nn.InstanceNorm2d(conv_dim, affine=True, track_running_stats=True)
self.relu = nn.ReLU(inplace=True)
# Down-sampling layers.
curr_dim = conv_dim
self.conv21 = nn.Conv2d(curr_dim, curr_dim*2, kernel_size=4, stride=2, padding=1, bias=False)
self.i21 = nn.InstanceNorm2d(curr_dim*2, affine=True, track_running_stats=True)
curr_dim = curr_dim * 2
self.conv22 = nn.Conv2d(curr_dim, curr_dim*2, kernel_size=4, stride=2, padding=1, bias=False)
self.i22 = nn.InstanceNorm2d(curr_dim*2, affine=True, track_running_stats=True)
curr_dim = curr_dim * 2
#Bottleneck layers
self.conv31 = ResidualBlock(dim_in=curr_dim, dim_out=curr_dim,use_skip = True)
self.conv32 = ResidualBlock(dim_in=curr_dim, dim_out=curr_dim,use_skip = True)
#self.conv34 = ResidualBlock(dim_in=curr_dim, dim_out=curr_dim,use_skip = use_skip)
#Latent space if required
if self.compressLatent :
self.linear331 = nn.Linear(262144, 512)
self.linear332 = nn.Linear(512, 262144)
else :
self.conv33 = ResidualBlock(dim_in=curr_dim, dim_out=curr_dim,use_skip = use_skip)
self.conv34 = ResidualBlock(dim_in=curr_dim, dim_out=curr_dim,use_skip = True)
self.conv35 = ResidualBlock(dim_in=curr_dim, dim_out=curr_dim,use_skip = True)
#self.conv36 = ResidualBlock(dim_in=curr_dim, dim_out=curr_dim,use_skip = True)
#Upsampling layers
self.conv41 = nn.ConvTranspose2d(curr_dim, curr_dim//2, kernel_size=4, stride=2, padding=1, bias=False)
self.i41 = nn.InstanceNorm2d(curr_dim//2, affine=True, track_running_stats=True)
curr_dim = curr_dim // 2
self.conv42 = nn.ConvTranspose2d(curr_dim, curr_dim//2, kernel_size=4, stride=2, padding=1, bias=False)
self.i42 = nn.InstanceNorm2d(curr_dim//2, affine=True, track_running_stats=True)
curr_dim = curr_dim // 2
#Last Layer
self.conv51 = nn.Conv2d(curr_dim, 3, kernel_size=7, stride=1, padding=3, bias=False)
self.tanh = nn.Tanh()
##################################################################################
'''
layers = []
layers.append(nn.Conv2d(3+c_dim, conv_dim, kernel_size=7, stride=1, padding=3, bias=False))
layers.append(nn.InstanceNorm2d(conv_dim, affine=True, track_running_stats=True))
layers.append(nn.ReLU(inplace=True))
# Down-sampling layers.
curr_dim = conv_dim
for i in range(2):
layers.append(nn.Conv2d(curr_dim, curr_dim*2, kernel_size=4, stride=2, padding=1, bias=False))
layers.append(nn.InstanceNorm2d(curr_dim*2, affine=True, track_running_stats=True))
layers.append(nn.ReLU(inplace=True))
curr_dim = curr_dim * 2
# Bottleneck layers.
for i in range(repeat_num/2):
layers.append(ResidualBlock(dim_in=curr_dim, dim_out=curr_dim,use_skip = True))
layers.append(ResidualBlock(dim_in=curr_dim, dim_out=curr_dim,use_skip = False))
for i in range(repeat_num/2):
layers.append(ResidualBlock(dim_in=curr_dim, dim_out=curr_dim,use_skip = True))
# Up-sampling layers.
for i in range(2):
layers.append(nn.ConvTranspose2d(curr_dim, curr_dim//2, kernel_size=4, stride=2, padding=1, bias=False))
layers.append(nn.InstanceNorm2d(curr_dim//2, affine=True, track_running_stats=True))
layers.append(nn.ReLU(inplace=True))
curr_dim = curr_dim // 2
layers.append(nn.Conv2d(curr_dim, 3, kernel_size=7, stride=1, padding=3, bias=False))
layers.append(nn.Tanh())
self.main = nn.Sequential(*layers)
'''
def forward(self, x, c = None,returnInter = False):
# Replicate spatially and concatenate domain information.
# Note that this type of label conditioning does not work at all if we use reflection padding in Conv2d.
# This is because instance normalization ignores the shifting (or bias) effect.
if c is not None :
c = c.view(c.size(0), c.size(1), 1, 1)
c = c.repeat(1, 1, x.size(2), x.size(3))
x = torch.cat([x, c], dim=1)
x1 = self.relu(self.i1(self.conv1(x)))
x21 = self.relu(self.i21(self.conv21(x1)))
x22 = self.relu(self.i22(self.conv22(x21)))
x31 = self.conv31(x22)
x32 = self.conv32(x31)
if self.compressLatent :
z = self.relu(self.linear331(x32.view(x32.size(0), -1)))
x33 = self.relu(self.linear332(z)).view(z.size(0), 256,32,32)
#print(' z shape : ',z.shape)
else :
x33 = self.conv33(x32)
x34 = self.conv34(x33)
x35 = self.conv35(x34)
#x37 = self.conv37(x36)
x41 = self.relu(self.i41(self.conv41(x35)))
x42 = self.relu(self.i42(self.conv42(x41)))
x51 = self.tanh(self.conv51(x42))
#return self.main(x)
if returnInter :
return x51, x33
else :
return x51
class DiscriminatorM112(nn.Module):
"""Discriminator network with PatchGAN."""
def __init__(self, image_size=112, conv_dim=64, c_dim=5, repeat_num=6):
super(DiscriminatorM112, self).__init__()
self.tanh = nn.Tanh()
self.relu = nn.ReLU()
self.lrelu = nn.LeakyReLU(0.01)
self.conv1 = nn.Conv2d(3, conv_dim, kernel_size=4, stride=2, padding=1)
curr_dim = conv_dim
self.conv21 = nn.Conv2d(curr_dim, curr_dim*2, kernel_size=4, stride=2, padding=1)
curr_dim = curr_dim * 2
self.conv22 = nn.Conv2d(curr_dim, curr_dim*2, kernel_size=4, stride=2, padding=1)
curr_dim = curr_dim * 2
self.conv23 = nn.Conv2d(curr_dim, curr_dim*2, kernel_size=4, stride=2, padding=1)
curr_dim = curr_dim * 2
self.conv24 = nn.Conv2d(curr_dim, curr_dim*2, kernel_size=4, stride=2, padding=1)
curr_dim = curr_dim * 2
self.conv25 = nn.Conv2d(curr_dim, curr_dim*2, kernel_size=4, stride=2, padding=1)
curr_dim = curr_dim * 2
kernel_size = int(image_size / np.power(2, repeat_num))
self.conv31 = nn.Conv2d(curr_dim, 1, kernel_size=3, stride=1, padding=1, bias=False)
self.conv32 = nn.Conv2d(curr_dim, c_dim, kernel_size=kernel_size, bias=False)
self.linear41 = nn.Linear(4, c_dim)
################################################################
def forward(self, x,printH = False):
x1 = self.lrelu(self.conv1(x))
x21 = self.lrelu(self.conv21(x1))
x22 = self.lrelu(self.conv22(x21))
x23 = self.lrelu(self.conv23(x22))
x24 = self.lrelu(self.conv24(x23))
h = self.lrelu(self.conv25(x24))
#h = self.main(x)
out_src = self.conv31(h)
tmp = self.conv32(h)
if printH and False :
print('tmp : ',tmp[:2])
out_cls = tmp
return out_src, out_cls.view(out_cls.size(0), out_cls.size(1))
class GeneratorMZ(nn.Module):
"""Generator network with internal Z, reduced."""
def __init__(self, conv_dim=64, c_dim=5, repeat_num=6, use_skip = True, compressLatent = False):
super(GeneratorMZ, self).__init__()
self.compressLatent = compressLatent
self.conv1 = nn.Conv2d(3+c_dim, conv_dim, kernel_size=7, stride=1, padding=3, bias=False)
self.i1 = nn.InstanceNorm2d(conv_dim, affine=True, track_running_stats=True)
self.relu = nn.ReLU(inplace=True)
# Down-sampling layers.
curr_dim = conv_dim
self.conv21 = nn.Conv2d(curr_dim, curr_dim*2, kernel_size=4, stride=2, padding=1, bias=False)
self.i21 = nn.InstanceNorm2d(curr_dim*2, affine=True, track_running_stats=True)
curr_dim = curr_dim * 2
self.conv22 = nn.Conv2d(curr_dim, curr_dim*2, kernel_size=4, stride=2, padding=1, bias=False)
self.i22 = nn.InstanceNorm2d(curr_dim*2, affine=True, track_running_stats=True)
curr_dim = curr_dim * 2
#Bottleneck layers
self.conv3 = ResidualBlock(dim_in=curr_dim, dim_out=curr_dim,use_skip = use_skip)
#Upsampling layers
self.conv41 = nn.ConvTranspose2d(curr_dim, curr_dim//2, kernel_size=4, stride=2, padding=1, bias=False)
self.i41 = nn.InstanceNorm2d(curr_dim//2, affine=True, track_running_stats=True)
curr_dim = curr_dim // 2
self.conv42 = nn.ConvTranspose2d(curr_dim, curr_dim//2, kernel_size=4, stride=2, padding=1, bias=False)
self.i42 = nn.InstanceNorm2d(curr_dim//2, affine=True, track_running_stats=True)
curr_dim = curr_dim // 2
#Last Layer
self.conv51 = nn.Conv2d(curr_dim, 3, kernel_size=7, stride=1, padding=3, bias=False)
self.tanh = nn.Tanh()
def forward(self, x, c = None,returnInter = False):
# Replicate spatially and concatenate domain information.
# Note that this type of label conditioning does not work at all if we use reflection padding in Conv2d.
# This is because instance normalization ignores the shifting (or bias) effect.
if c is not None :
c = c.view(c.size(0), c.size(1), 1, 1)
c = c.repeat(1, 1, x.size(2), x.size(3))
x = torch.cat([x, c], dim=1)
debug = False
x1 = self.relu(self.i1(self.conv1(x)))
x21 = self.relu(self.i21(self.conv21(x1)))
x22 = self.relu(self.i22(self.conv22(x21)))
'''x31 = self.conv31(x22)
x32 = self.conv32(x31) #2d latent
x33 = self.conv33(x32)'''
x3 = self.conv3(x22) #2d latent
x41 = self.relu(self.i41(self.conv41(x3)))
x42 = self.relu(self.i42(self.conv42(x41)))
x51 = self.tanh(self.conv51(x42))
if debug :
print('G-x0',x.shape)
print('x1',x1.shape)
print('x21',x21.shape)
print('x22',x22.shape)
'''print('x31',x31.shape)
print('x32',x32.shape)
print('x33',x33.shape)'''
#print('x31',x31.shape)
print('x3',x3.shape)
#print('x33',x33.shape)
print('x41',x41.shape)
print('x42',x42.shape)
print('x51',x51.shape)
#return self.main(x)
if returnInter :
#return x51, x33
return x51, x3
else :
return x51
class DiscriminatorMZ(nn.Module):
"""Discriminator network with with external z """
def __init__(self, image_size=128, conv_dim=64, c_dim=5, repeat_num=6, inputC = 3):
super(DiscriminatorMZ, self).__init__()
self.tanh = nn.Tanh()
self.relu = nn.ReLU()
self.lrelu = nn.LeakyReLU(0.01)
self.conv1 = nn.Conv2d(inputC, conv_dim, kernel_size=4, stride=2, padding=1)
curr_dim = conv_dim
self.conv21 = nn.Conv2d(curr_dim, curr_dim*2, kernel_size=4, stride=2, padding=1)
curr_dim = curr_dim * 2
self.conv22 = nn.Conv2d(curr_dim, curr_dim*2, kernel_size=4, stride=2, padding=1)
curr_dim = curr_dim * 2
self.conv23 = nn.Conv2d(curr_dim, curr_dim*2, kernel_size=4, stride=2, padding=1)
curr_dim = curr_dim * 2
self.conv24 = nn.Conv2d(curr_dim, curr_dim*2, kernel_size=4, stride=2, padding=2)
curr_dim = curr_dim * 2
self.conv25 = nn.Conv2d(curr_dim, curr_dim*2, kernel_size=4, stride=2, padding=1)
curr_dim = curr_dim * 2
kernel_size = int(image_size / np.power(2, repeat_num))
self.conv31 = nn.Conv2d(curr_dim, 1, kernel_size=3, stride=1, padding=1, bias=False)
self.conv32 = nn.Conv2d(curr_dim, c_dim, kernel_size=kernel_size, bias=False)
self.linear1 = nn.Linear(46, 112)
################################################################
def forward(self, x, s = None, z = None ):
debug = False
if s is not None :
s2 = self.tanh(self.linear1(s))
#print(s2)
#print(s2.shape)
s2 = torch.unsqueeze(torch.unsqueeze(s2,1),2)
#print(s2.shape,s2.size(0))
#s2 = s2.repeat(1, 1, x.size(2), x.size(3))
s2 = s2.expand(s2.size(0),1,112,112)
#print(s2.shape,x.shape)
#print(x)
x = torch.cat([x, s2], dim=1)
x1 = self.lrelu(self.conv1(x))
if z is not None :
#print('combining')
x21 = self.lrelu(self.conv21(x1))+z
else :
x21 = self.lrelu(self.conv21(x1))
x22 = self.lrelu(self.conv22(x21))
x23 = self.lrelu(self.conv23(x22))
x24 = self.lrelu(self.conv24(x23))
h = self.lrelu(self.conv25(x24))
if debug :
print('D-x0',x1.shape)
print('x1',x1.shape)
print('x21',x21.shape)
print('x22',x22.shape)
print('x23',x23.shape)
print('x24',x24.shape)
print('xh',h.shape)
#h = self.main(x)
out_src = self.conv31(h)
tmp = self.conv32(h)
out_cls = tmp
return out_src, out_cls.view(out_cls.size(0), out_cls.size(1))
class DiscriminatorM(nn.Module):
"""Discriminator network with PatchGAN."""
def __init__(self, image_size=128, conv_dim=64, c_dim=5, repeat_num=6):
super(DiscriminatorM, self).__init__()
self.tanh = nn.Tanh()
self.relu = nn.ReLU()
self.lrelu = nn.LeakyReLU(0.01)
self.conv1 = nn.Conv2d(3, conv_dim, kernel_size=4, stride=2, padding=1)
curr_dim = conv_dim
self.conv21 = nn.Conv2d(curr_dim, curr_dim*2, kernel_size=4, stride=2, padding=1)
curr_dim = curr_dim * 2
self.conv22 = nn.Conv2d(curr_dim, curr_dim*2, kernel_size=4, stride=2, padding=1)
curr_dim = curr_dim * 2
self.conv23 = nn.Conv2d(curr_dim, curr_dim*2, kernel_size=4, stride=2, padding=1)
curr_dim = curr_dim * 2
self.conv24 = nn.Conv2d(curr_dim, curr_dim*2, kernel_size=4, stride=2, padding=1)
curr_dim = curr_dim * 2
self.conv25 = nn.Conv2d(curr_dim, curr_dim*2, kernel_size=4, stride=2, padding=1)
curr_dim = curr_dim * 2
kernel_size = int(image_size / np.power(2, repeat_num))
self.conv31 = nn.Conv2d(curr_dim, 1, kernel_size=3, stride=1, padding=1, bias=False)
self.conv32 = nn.Conv2d(curr_dim, c_dim, kernel_size=kernel_size, bias=False)
self.linear41 = nn.Linear(4, c_dim)
################################################################
'''layers = []
layers.append(nn.Conv2d(3, conv_dim, kernel_size=4, stride=2, padding=1))
layers.append(nn.LeakyReLU(0.01))
curr_dim = conv_dim
for i in range(1, repeat_num):
layers.append(nn.Conv2d(curr_dim, curr_dim*2, kernel_size=4, stride=2, padding=1))
layers.append(nn.LeakyReLU(0.01))
curr_dim = curr_dim * 2
kernel_size = int(image_size / np.power(2, repeat_num))
self.main = nn.Sequential(*layers)
self.conv1 = nn.Conv2d(curr_dim, 1, kernel_size=3, stride=1, padding=1, bias=False)
self.conv2 = nn.Conv2d(curr_dim, c_dim, kernel_size=kernel_size, bias=False)
self.linear1 = nn.Linear(4, c_dim)'''
def forward(self, x):
x1 = self.lrelu(self.conv1(x))
x21 = self.lrelu(self.conv21(x1))
x22 = self.lrelu(self.conv22(x21))
x23 = self.lrelu(self.conv23(x22))
x24 = self.lrelu(self.conv24(x23))
h = self.lrelu(self.conv25(x24))
#h = self.main(x)
out_src = self.conv31(h)
tmp = self.conv32(h)
out_cls = tmp
return out_src, out_cls.view(out_cls.size(0), out_cls.size(1))
'''def forward(self, x,useTanh = True,printH = False):
x1 = self.lrelu(self.conv1(x))
x21 = self.lrelu(self.conv21(x1))
x22 = self.lrelu(self.conv22(x21))
x23 = self.lrelu(self.conv23(x22))
x24 = self.lrelu(self.conv24(x23))
h = self.lrelu(self.conv25(x24))
#h = self.main(x)
out_src = self.conv31(h)
tmp = self.conv32(h)
if printH and False :
print('tmp : ',tmp[:2])
if useTanh :
out_cls = self.tanh(tmp)
else :
out_cls = tmp
return out_src, out_cls.view(out_cls.size(0), out_cls.size(1))'''
class DiscriminatorMST(nn.Module):
"""Discriminator network -single task """
def __init__(self, image_size=128, conv_dim=64, c_dim=5, repeat_num=6, asDiscriminator = False):
super(DiscriminatorMST, self).__init__()
self.tanh = nn.Tanh()
self.relu = nn.ReLU()
self.lrelu = nn.LeakyReLU(0.01)
self.asD = asDiscriminator
self.conv1 = nn.Conv2d(3, conv_dim, kernel_size=4, stride=2, padding=1)
curr_dim = conv_dim
self.conv21 = nn.Conv2d(curr_dim, curr_dim*2, kernel_size=4, stride=2, padding=1)
curr_dim = curr_dim * 2
self.conv22 = nn.Conv2d(curr_dim, curr_dim*2, kernel_size=4, stride=2, padding=1)
curr_dim = curr_dim * 2
self.conv23 = nn.Conv2d(curr_dim, curr_dim*2, kernel_size=4, stride=2, padding=1)
curr_dim = curr_dim * 2
self.conv24 = nn.Conv2d(curr_dim, curr_dim*2, kernel_size=4, stride=2, padding=1)
curr_dim = curr_dim * 2
self.conv25 = nn.Conv2d(curr_dim, curr_dim*2, kernel_size=4, stride=2, padding=1)
curr_dim = curr_dim * 2
kernel_size = int(image_size / np.power(2, repeat_num))
#self.conv31 = nn.Conv2d(curr_dim, 1, kernel_size=3, stride=1, padding=1, bias=False)
self.conv31 = nn.Conv2d(curr_dim, 1, kernel_size=kernel_size, bias=False)
self.conv32 = nn.Conv2d(curr_dim, 1, kernel_size=kernel_size, bias=False)
if asDiscriminator :
self.conv30 = nn.Conv2d(curr_dim, 1, kernel_size=3, stride=1, padding=1, bias=False)
#self.linear41 = nn.Linear(4, c_dim)
def forward(self, x,printH = False):
x1 = self.lrelu(self.conv1(x))
x21 = self.lrelu(self.conv21(x1))
x22 = self.lrelu(self.conv22(x21))
x23 = self.lrelu(self.conv23(x22))
x24 = self.lrelu(self.conv24(x23))
h = self.lrelu(self.conv25(x24))
out_A = self.conv31(h)
out_V = self.conv32(h)
out_A = out_A.view(out_A.size(0), out_A.size(1))
out_V = out_V.view(out_V.size(0), out_V.size(1))
outs = torch.cat((out_A,out_V),1)
'''print('out a : ',out_A.shape)
print('out v : ',out_V.shape)
out_A = torch.unsqueeze(out_A,1)
out_V = torch.unsqueeze(out_V,1)'''
if self.asD :
out_src = self.conv30(h)
return out_src,outs
else :
return outs #out_src, out_cls.view(out_cls.size(0), out_cls.size(1))
class GeneratorMZR(nn.Module):
"""Generator network with internal Z, heavily reduced.
128x128
"""
def __init__(self, conv_dim=64, c_dim=5, repeat_num=6, use_skip = True, compressLatent = False):
super(GeneratorMZR, self).__init__()
self.compressLatent = compressLatent
self.conv1 = nn.Conv2d(3+c_dim, conv_dim, kernel_size=7, stride=1, padding=3, bias=False)
self.i1 = nn.InstanceNorm2d(conv_dim, affine=True, track_running_stats=True)
self.relu = nn.ReLU(inplace=True)
# Down-sampling layers.
curr_dim = conv_dim
self.conv21 = nn.Conv2d(curr_dim, curr_dim*2, kernel_size=4, stride=2, padding=1, bias=False)
self.i21 = nn.InstanceNorm2d(curr_dim*2, affine=True, track_running_stats=True)
curr_dim = curr_dim * 2
self.conv22 = nn.Conv2d(curr_dim, curr_dim*2, kernel_size=4, stride=2, padding=1, bias=False)
self.i22 = nn.InstanceNorm2d(curr_dim*2, affine=True, track_running_stats=True)
curr_dim = curr_dim * 2
#Bottleneck layers
self.conv3 = ResidualBlock(dim_in=curr_dim, dim_out=curr_dim,use_skip = use_skip)
#Upsampling layers
self.conv41 = nn.ConvTranspose2d(curr_dim, curr_dim//2, kernel_size=4, stride=2, padding=1, bias=False)
self.i41 = nn.InstanceNorm2d(curr_dim//2, affine=True, track_running_stats=True)
curr_dim = curr_dim // 2
self.conv42 = nn.ConvTranspose2d(curr_dim, curr_dim//2, kernel_size=4, stride=2, padding=1, bias=False)
self.i42 = nn.InstanceNorm2d(curr_dim//2, affine=True, track_running_stats=True)
curr_dim = curr_dim // 2
#Last Layer
self.conv51 = nn.Conv2d(curr_dim, 3, kernel_size=7, stride=1, padding=3, bias=False)
self.tanh = nn.Tanh()
def forward(self, x, c = None,returnInter = False):
# Replicate spatially and concatenate domain information.
# Note that this type of label conditioning does not work at all if we use reflection padding in Conv2d.
# This is because instance normalization ignores the shifting (or bias) effect.
if c is not None :
c = c.view(c.size(0), c.size(1), 1, 1)
c = c.repeat(1, 1, x.size(2), x.size(3))
x = torch.cat([x, c], dim=1)
debug = False
x1 = self.relu(self.i1(self.conv1(x)))
x21 = self.relu(self.i21(self.conv21(x1)))
x22 = self.relu(self.i22(self.conv22(x21)))
'''x31 = self.conv31(x22)
x32 = self.conv32(x31) #2d latent
x33 = self.conv33(x32)'''
x3 = self.conv3(x22) #2d latent
x41 = self.relu(self.i41(self.conv41(x3)))
x42 = self.relu(self.i42(self.conv42(x41)))
x51 = self.tanh(self.conv51(x42))
if debug :
print('G-x0',x.shape)
print('x1',x1.shape)
print('x21',x21.shape)
print('x22',x22.shape)
'''print('x31',x31.shape)
print('x32',x32.shape)
print('x33',x33.shape)'''
#print('x31',x31.shape)
print('x3',x3.shape)
#print('x33',x33.shape)
print('x41',x41.shape)
print('x42',x42.shape)
print('x51',x51.shape)
#return self.main(x)
if returnInter :
#return x51, x33
return x51, x3
else :
return x51
class DiscriminatorMZR(nn.Module):
"""Discriminator network with with external z and S. Greatly reduced version
128x128
"""
def __init__(self, image_size=128, conv_dim=64, c_dim=5, repeat_num=4, inputC = 3):
super(DiscriminatorMZR, self).__init__()
self.tanh = nn.Tanh()
self.relu = nn.ReLU()
self.lrelu = nn.LeakyReLU(0.01)
self.conv1 = nn.Conv2d(inputC, conv_dim, kernel_size=4, stride=2, padding=1)
curr_dim = conv_dim
self.conv21 = nn.Conv2d(curr_dim, curr_dim*2, kernel_size=4, stride=2, padding=1)
curr_dim = curr_dim * 2
self.conv22 = nn.Conv2d(curr_dim, curr_dim*2, kernel_size=4, stride=2, padding=1)
curr_dim = curr_dim * 2
self.conv23 = nn.Conv2d(curr_dim, curr_dim*2, kernel_size=4, stride=2, padding=1)
curr_dim = curr_dim * 2
kernel_size = int(image_size / np.power(2, repeat_num))
self.conv31 = nn.Conv2d(curr_dim, 1, kernel_size=3, stride=1, padding=1, bias=False)
self.conv32 = nn.Conv2d(curr_dim, c_dim, kernel_size=kernel_size, bias=False)
self.linear1 = nn.Linear(46, 112)
################################################################
def forward(self, x, s = None, z = None ):
debug = True
if s is not None :
s2 = self.tanh(self.linear1(s))
#print(s2)
#print(s2.shape)
s2 = torch.unsqueeze(torch.unsqueeze(s2,1),2)
#print(s2.shape,s2.size(0))
#s2 = s2.repeat(1, 1, x.size(2), x.size(3))
s2 = s2.expand(s2.size(0),1,112,112)
#print(s2.shape,x.shape)
#print(x)
x = torch.cat([x, s2], dim=1)
x1 = self.lrelu(self.conv1(x))
if z is not None :
#print('combining')
x21 = self.lrelu(self.conv21(x1))+z
else :
x21 = self.lrelu(self.conv21(x1))
x22 = self.lrelu(self.conv22(x21))
h = self.lrelu(self.conv23(x22))
if debug :
print('D-x0',x1.shape)
print('x1',x1.shape)
print('x21',x21.shape)
print('x22',x22.shape)
print('xh',h.shape)
#h = self.main(x)
out_src = self.conv31(h)
tmp = self.conv32(h)
out_cls = tmp
return out_src, out_cls.view(out_cls.size(0), out_cls.size(1))
class DiscriminatorMZRL(nn.Module):
"""Discriminator network with with external z and S. Greatly reduced version
128x128
"""
def __init__(self, image_size=128, conv_dim=64, c_dim=5, repeat_num=3, inputC = 3):
super(DiscriminatorMZRL, self).__init__()
self.tanh = nn.Tanh()
self.relu = nn.ReLU()
self.lrelu = nn.LeakyReLU(0.01)
self.conv1 = nn.Conv2d(inputC, conv_dim, kernel_size=4, stride=2, padding=1)
curr_dim = conv_dim
self.conv21 = nn.Conv2d(curr_dim, curr_dim*2, kernel_size=4, stride=2, padding=1)
curr_dim = curr_dim * 2
self.conv22 = nn.Conv2d(curr_dim, curr_dim*2, kernel_size=4, stride=2, padding=1)
curr_dim = curr_dim * 2
kernel_size = int(inputC / np.power(2, repeat_num))
print('kernelsize : ',kernel_size)
self.conv31 = nn.Conv2d(curr_dim, 1, kernel_size=3, stride=1, padding=1, bias=False)
self.conv32 = nn.Conv2d(curr_dim, c_dim, kernel_size=kernel_size, bias=False)
self.linear1 = nn.Linear(46, 112)
################################################################
def forward(self, x, s = None, z = None ):
debug = False
if s is not None :
s2 = self.tanh(self.linear1(s))
#print(s2)
#print(s2.shape)
s2 = torch.unsqueeze(torch.unsqueeze(s2,1),2)
#print(s2.shape,s2.size(0))
#s2 = s2.repeat(1, 1, x.size(2), x.size(3))
s2 = s2.expand(s2.size(0),1,112,112)
#print(s2.shape,x.shape)
#print(x)
x = torch.cat([x, s2], dim=1)
x1 = self.lrelu(self.conv1(x))
#print('x1s',x1.shape)
if z is not None :
#print('combining')
x21 = self.lrelu(self.conv21(x1))+z
else :
x21 = self.lrelu(self.conv21(x1))
h = self.lrelu(self.conv22(x21))
#h = self.lrelu(self.conv23(x22))
if debug :
print('D-x',x.shape)
print('D-x0',x1.shape)
print('x1',x1.shape)
print('x21',x21.shape)
#print('x22',x22.shape)
print('xh',h.shape)
#h = self.main(x)
out_src = self.conv31(h)
tmp = self.conv32(h)
out_cls = tmp
return out_src, out_cls.view(out_cls.size(0), out_cls.size(1))
class Generator(nn.Module):
"""Generator network."""
def __init__(self, conv_dim=64, c_dim=5, repeat_num=6, use_skip = True):
super(Generator, self).__init__()
layers = []
layers.append(nn.Conv2d(3+c_dim, conv_dim, kernel_size=7, stride=1, padding=3, bias=False))
layers.append(nn.InstanceNorm2d(conv_dim, affine=True, track_running_stats=True))
layers.append(nn.ReLU(inplace=True))
# Down-sampling layers.
curr_dim = conv_dim
for i in range(2):
layers.append(nn.Conv2d(curr_dim, curr_dim*2, kernel_size=4, stride=2, padding=1, bias=False))
layers.append(nn.InstanceNorm2d(curr_dim*2, affine=True, track_running_stats=True))
layers.append(nn.ReLU(inplace=True))
curr_dim = curr_dim * 2
# Bottleneck layers.
for i in range(repeat_num/2):
layers.append(ResidualBlock(dim_in=curr_dim, dim_out=curr_dim,use_skip = True))
layers.append(ResidualBlock(dim_in=curr_dim, dim_out=curr_dim,use_skip = False))
for i in range(repeat_num/2):
layers.append(ResidualBlock(dim_in=curr_dim, dim_out=curr_dim,use_skip = True))
# Up-sampling layers.
for i in range(2):
layers.append(nn.ConvTranspose2d(curr_dim, curr_dim//2, kernel_size=4, stride=2, padding=1, bias=False))
layers.append(nn.InstanceNorm2d(curr_dim//2, affine=True, track_running_stats=True))
layers.append(nn.ReLU(inplace=True))
curr_dim = curr_dim // 2
layers.append(nn.Conv2d(curr_dim, 3, kernel_size=7, stride=1, padding=3, bias=False))
layers.append(nn.Tanh())
self.main = nn.Sequential(*layers)
def forward(self, x, c = None):
# Replicate spatially and concatenate domain information.
# Note that this type of label conditioning does not work at all if we use reflection padding in Conv2d.
# This is because instance normalization ignores the shifting (or bias) effect.
if c is not None :
c = c.view(c.size(0), c.size(1), 1, 1)
c = c.repeat(1, 1, x.size(2), x.size(3))
x = torch.cat([x, c], dim=1)
return self.main(x)
class Discriminator(nn.Module):
"""Discriminator network with PatchGAN."""
def __init__(self, image_size=128, conv_dim=64, c_dim=5, repeat_num=6):
super(Discriminator, self).__init__()
self.tanh = nn.Tanh()
self.relu = nn.ReLU()
layers = []
layers.append(nn.Conv2d(3, conv_dim, kernel_size=4, stride=2, padding=1))
layers.append(nn.LeakyReLU(0.01))
curr_dim = conv_dim
for i in range(1, repeat_num):
layers.append(nn.Conv2d(curr_dim, curr_dim*2, kernel_size=4, stride=2, padding=1))
layers.append(nn.LeakyReLU(0.01))
curr_dim = curr_dim * 2
kernel_size = int(image_size / np.power(2, repeat_num))
self.main = nn.Sequential(*layers)
self.conv1 = nn.Conv2d(curr_dim, 1, kernel_size=3, stride=1, padding=1, bias=False)
self.conv2 = nn.Conv2d(curr_dim, c_dim, kernel_size=kernel_size, bias=False)
self.linear1 = nn.Linear(4, c_dim)
def forward(self, x,useTanh = True,printH = False):
h = self.main(x)
out_src = self.conv1(h)
tmp = self.conv2(h)
if printH :
print('tmp : ',tmp[:2])
if useTanh :
out_cls = self.tanh(tmp)
else :
out_cls = tmp
return out_src, out_cls.view(out_cls.size(0), out_cls.size(1))
| 67,049 | 33.795018 | 137 | py |
Seq-Att-Affect | Seq-Att-Affect-master/main_red_test.py | import os
import argparse
from solver import Solver
from data_loader import get_loader
from torch.backends import cudnn
from model import Generator,Combiner
from model import Discriminator,DiscriminatorM,DiscriminatorMST, DiscriminatorMZ,\
DiscriminatorMZR, Combiner,CombinerSeq,CombinerSeqL,CombinerSeqAtt,CombinerSeqAttReplace, GeneratorM,DiscriminatorM
from torch.autograd import Variable
from torchvision.utils import save_image
from FacialDataset import AFEWVA,AFEWVAReduced,SEWAFEWReduced, SEWAFEWReducedLatent
from utils import *
import time
import torch.nn.functional as F
import numpy as np
import torch
import datetime
from torchvision import transforms
from torch import nn
from calcMetrix import *
from config import *
import csv
import file_walker
import matplotlib.ticker as ticker
from PIL import Image
from scipy.special import softmax
import matplotlib.gridspec as gridspec
def str2bool(v):
return v.lower() in ('true')
def train_only_comb_seq():
#64,0,1200 32,1,2000? 32,2,
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]="0"
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-split', nargs='?', const=1, type=int, default=0)#0,1,2
parser.add_argument('-addLoss', nargs='?', const=1, type=int, default=1)#0,1,2
parser.add_argument('-singleTask', nargs='?', const=1, type=int, default=0)#0,1,2
parser.add_argument('-trainQuadrant', nargs='?', const=1, type=int, default=0)#0,1,2
parser.add_argument('-dConv', nargs='?', const=1, type=int, default=64)#64
parser.add_argument('-batch_size', nargs='?', const=1, type=int, default=4000) #0 is ori, 1 is red
parser.add_argument('-sewa', nargs='?', const=1, type=int, default=0)#0,1,2
parser.add_argument('-useWeightNormalization', nargs='?', const=1, type=int, default=1)#0,1,2
parser.add_argument('-useAll', nargs='?', const=1, type=int, default=1)#0,1,2
parser.add_argument('-seq_length', nargs='?', const=1, type=int, default=4)#1,2,4,8,16,32
parser.add_argument('-use_attention', nargs='?', const=1, type=int, default=1)#0,1,2
parser.add_argument('-use_ch', nargs='?', const=1, type=int, default=1)#0,1,2
parser.add_argument('-use_h', nargs='?', const=1, type=int, default=1)#0,1,2
parser.add_argument('-toLoad', nargs='?', const=1, type=int, default=0)#0,1,2
parser.add_argument('-toUpgrade', nargs='?', const=1, type=int, default=0)#0,1,2
parser.add_argument('-toAddAttention', nargs='?', const=1, type=int, default=0)#0,1,2
parser.add_argument('-numIters', nargs='?', const=1, type=int, default=200000)#0,1,2
args = parser.parse_args()
split = args.split
addLoss = args.addLoss
singleTask = args.singleTask
isSewa = args.sewa
useWeight = args.useWeightNormalization
useAll = args.useAll
useAtt = args.use_attention
useCH = args.use_ch
useH = args.use_h
trainQuadrant = args.trainQuadrant
alterQuadrant = True
#curDir = "/home/deckyal/eclipse-workspace/FaceTracking/"
c_dim=2
image_size=128
d_conv_dim=args.dConv
lambda_cls=1
lambda_rec=10
lambda_gp=10
inputC = 3#input channel for discriminator
toLoad = args.toLoad
toUpgrade = args.toUpgrade
toAddAttention = args.toAddAttention
resume_iters=None #, help='resume training from this step')
num_iters=args.numIters #, help='number of total iterations for training D')
num_iters_decay=100000 #, help='number of iterations for decaying lr')
g_lr=0.0001 #, help='learning rate for G')
d_lr=0.0001 #, help='learning rate for D')
n_critic=5 #, help='number of D updates per each G update')
beta1=0.5 #, help='beta1 for Adam optimizer')
beta2=0.999 #, help='beta2 for Adam optimizer')
isVideo = True
toAlign = False
seq_length = args.seq_length
batch_size=int(truediv(args.batch_size,seq_length))#500, help='mini-batch size')
# Test configuration.
test_iters=200000 #, help='test model from this step')
# Miscellaneous.
num_workers=1
mode='train' #, choices=['train', 'test'])
use_tensorboard=False
log_dir='stargan/logs'
model_save_dir='stargan/models'
sample_dir='stargan/samples'
result_dir='stargan/results'
# Step size.
log_step=10
sample_step=1000
model_save_step=10000
lr_update_step=100
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# For fast training.
cudnn.benchmark = True
if not os.path.exists(model_save_dir):
os.makedirs(model_save_dir)
#Split
#split = 0
multi_gpu = False
testSplit = split
print("Test split " , testSplit)
nSplit = 5
listSplit = []
for i in range(nSplit):
if i!=testSplit :
listSplit.append(i)
print(listSplit)
if not isSewa :
main_name = 'AF-C-'
d_name = 'AFEW-VA-Fixed'#'AFEW-VA-Fixed'
dbType = 0
else :
main_name = 'SE-C-'
d_name = 'SEWA'
dbType = 1
if useH :
main_name += 'R-'
if useCH :
main_name += 'CH-'
load_to_add = main_name
if useAtt :
main_name += 'A-'
load_to_add_split = main_name
mseLoss = nn.MSELoss()
main_name+=(str(d_conv_dim)+'-')
load_to_add+=(str(d_conv_dim)+'-')
load_to_add_split+=(str(d_conv_dim)+'-')
if trainQuadrant :
if alterQuadrant :
main_name+="-QDAL"
c_dim = 1
else :
main_name+="-QD"
c_dim = 4
save_name = main_name+str(testSplit)+'-n-'+str(seq_length)
print('saving name is : ',save_name)
load_to_add_split = load_to_add_split+str(testSplit)+'-n-'+str(seq_length)
load_to_add = load_to_add+str(testSplit)+'-n-'+str(seq_length)
load_prev = main_name+str(testSplit)+'-n-'+str(int(truediv(seq_length,2)))
err_file = curDir+save_name+".txt"
transform =transforms.Compose([
transforms.Resize((image_size,image_size)),
#transforms.CenterCrop(image_size),
transforms.ToTensor(),
transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
#transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
ID = SEWAFEWReducedLatent([d_name], None, image_size, 1,split=True, nSplit = nSplit ,listSplit=listSplit
,isVideo=isVideo, seqLength = seq_length,dbType = dbType, returnQuadrant=trainQuadrant,
returnWeight = useWeight,useAll = useAll, splitNumber=testSplit)
dataloader = torch.utils.data.DataLoader(dataset = ID, batch_size = batch_size, shuffle = True,worker_init_fn=worker_init_fn)
VD = SEWAFEWReducedLatent([d_name], None, image_size, 1,split=True, nSplit = nSplit,listSplit=[testSplit]
,isVideo=isVideo, seqLength = seq_length, returnQuadrant=trainQuadrant,dbType = dbType,useAll = useAll)
dataloaderV = torch.utils.data.DataLoader(dataset = VD, batch_size = batch_size, shuffle = False)
if not useH:
model_ft = CombinerSeqAtt(image_size, d_conv_dim, c_dim, 4,64,512,seq_length,batch_size,useCH=useCH)
else :
model_ft = CombinerSeqAttReplace(image_size, d_conv_dim, c_dim, 4,64,512,seq_length,batch_size,useCH=useCH)
d_optimizer = torch.optim.Adam(model_ft.parameters(), d_lr, [beta1, beta2])
print_network(model_ft, 'D')
if toLoad:
print('loading previous model ')
model_ft.load_state_dict(torch.load(curDir+'t-models/'+save_name))
elif toUpgrade :
print('upgrading from previous model ',load_prev)
model_ft.load_state_dict(torch.load(curDir+'t-models/'+load_prev))
elif toAddAttention :
print('adding attention to original model ',load_to_add)
model_ft.load_state_dict(torch.load(curDir+'t-models/'+load_to_add))
else :
model_ft.apply(weights_init_uniform_rule)
model_ft.to(device)
d_lr = d_lr
start_iters = 0
'''if resume_iters:
start_iters = resume_iters
restore_model(resume_iters)'''
# Start training.
print('Start training...')
start_time = time.time()
f = open(err_file,'w+')
f.write("err : ")
f.close()
#best_model_wts = copy.deepcopy(model.state_dict())
lowest_loss = 99999
lMSA,lMSV,lCCV,lCCA,lICA,lICV,lCRA, lCRV, total = 9999,9999,-9999, -9999, -9999, -9999, -9999, -9999, -9999
w,wv,wa = None,None,None
print('batch size : ',batch_size)
for i in range(start_iters, num_iters):
random.seed()
manualSeed = random.randint(1, 10000) # use if you want new results
random.seed(manualSeed)
torch.manual_seed(manualSeed)
print('Epoch {}/{}'.format(i, num_iters - 1))
print('-'*10)
running_loss = 0
model_ft.train()
for x,(data) in enumerate(dataloader,0) :
rinputs_l, rlabels_l,rldmrk_l,_ = data[0],data[1],data[2],data[3]
if useWeight :
w = data[5].cuda()
ccPred_l = []
model_ft.initialize(batch_size = rinputs_l.size(0)) #initialize for each seq
prev_result = None
d_optimizer.zero_grad()
cumLoss = 0
if useAtt :
l_h = []
#print('shape of inputs',rinputs_l.shape)
for y in range(seq_length):
rinputs, rlabels = rinputs_l[:,y].cuda(),rlabels_l[:,y].cuda()
if useAtt :
if len(l_h) > 0:
outputs = model_ft(rinputs,prev_h = l_h)
else :
outputs = model_ft(rinputs)
if useCH :
l_h.append(torch.cat((model_ft.lstm1_hdn[0][0],model_ft.lstm1_hdn[1][0]),1))
else :
l_h.append(model_ft.lstm1_hdn[0][0])
else :
outputs = model_ft(rinputs)
ccPred_l.append(outputs)
#loss+=mseLoss(outputs,rlabels)
loss = calcMSET(outputs,rlabels,w)
cumLoss+=loss
if addLoss :
ov,oa,lv,la = outputs[:,0],outputs[:,1], rlabels[:,0], rlabels[:,1]
corV = -calcCORT(ov, lv, wv)
corA = -calcCORT(oa, la, wa)
cccV = -calcCCCT(ov, lv, wv)
cccA = -calcCCCT(oa, la, wa)
iccV = -calcICCT(ov, lv, wv)
iccA = -calcICCT(oa, la, wa)
#<lossO =corV+corA +cccV+cccA+iccV+iccA
lossO = cccV+cccA+iccV+iccA
if not addLoss :
print("{}/{} loss : {}".format(x,int(len(dataloader.dataset)/batch_size),loss.item()))
else :
print("{}/{} loss : {:.8f}, cor : {:.8f}/{:.8f}, ccc : {:.8f}/{:.8f}, icc : {:.8f}/{:.8f}".format(x,int(len(dataloader.dataset)/batch_size),
loss.item(),corV.item(),corA.item(),cccV.item(),cccA.item(),iccV.item(),iccA.item()))
f = open(err_file,'a')
if not addLoss :
f.write("{}/{} loss : {}\n".format(x,int(len(dataloader.dataset)/batch_size),loss.item()))
else :
f.write("{}/{} loss : {:.3f}, cor : {:.3f}/{:.3f}, ccc : {:.3f}/{:.3f}, icc : {:.3f}/{:.3f}\n".format(x,int(len(dataloader.dataset)/batch_size),
loss.item(),corV.item(),corA.item(),cccV.item(),cccA.item(),iccV.item(),iccA.item()))
f.close()
if addLoss :
cumLoss += lossO
cumLoss.backward()
d_optimizer.step()
# Decay learning rates.
if (i+1) % lr_update_step == 0 and (i+1) > 50 : #(num_iters - num_iters_decay):
d_lr -= (d_lr / float(num_iters_decay))
update_lr(d_lr,d_optimizer)
print ('Decayed learning rates, g_lr: {}, d_lr: {}.'.format(g_lr, d_lr))
if i %2 == 0 :
if multi_gpu :
torch.save(model_ft.module.state_dict(),curDir+'t-models/'+save_name)
else :
torch.save(model_ft.state_dict(),curDir+'t-models/'+save_name)
#Deep copy the model_ft
if i%5 == 0 :#epoch_loss < lowest_loss :
lowest_loss = lowest_loss
model_ft.eval()
if True :
listValO = []
listAroO = []
listValL = []
listAroL = []
tvo = [];tao=[];tvl = []; tal = [];
anyDiffer = False
for x,(data) in enumerate(dataloaderV,0) :
rinputs_l, rlabels_l,rldmrk_l = data[0],data[1],data[2]
model_ft.initialize(rinputs_l.shape[0])
if useAtt :
l_h = []
with torch.set_grad_enabled(False) :
pre_result = None
for y in range(seq_length):
rinputs, rlabels, rldmrk = rinputs_l[:,y], rlabels_l[:,y],rldmrk_l[:,y]
inputs = rinputs.cuda()#to(device)
labels = rlabels.cuda()#to(device)
'''if useAtt :
outputs,the_w = model_ft(inputs,ret_w=True)
print(x,',',int(truediv(len(VD),batch_size)),outputs[:2], labels[:2],outputs[:,0].shape[0],outputs.shape)
print('w',the_w[:2])
else :
outputs = model_ft(inputs)
print(x,',',int(truediv(len(VD),batch_size)),outputs[:2], labels[:2],outputs[:,0].shape[0],outputs.shape)
'''
if useAtt :
if len(l_h) > 0:
outputs,the_w = model_ft(inputs,prev_h = l_h,ret_w=True)
print('w',the_w[:2])
else :
outputs = model_ft(inputs)
if useCH :
l_h.append(torch.cat((model_ft.lstm1_hdn[0][0],model_ft.lstm1_hdn[1][0]),1))
else :
l_h.append(model_ft.lstm1_hdn[0][0])
else :
outputs = model_ft(inputs)
#print('o shape',outputs.shape)
print(x,',',int(truediv(len(VD),batch_size)),outputs[:2], labels[:2],outputs[:,0].shape[0],outputs.shape)
if outputs[:,0].shape[0] != batch_size : #in case the batch size is differ, usually at end of iter
anyDiffer = True
print('differ')
tvo.append(outputs[:,0].detach().cpu())
tao.append(outputs[:,1].detach().cpu())
tvl.append(labels[:,0].detach().cpu())
tal.append(labels[:,1].detach().cpu())
else :
print('equal')
listValO.append(outputs[:,0].detach().cpu())
listAroO.append(outputs[:,1].detach().cpu())
listValL.append(labels[:,0].detach().cpu())
listAroL.append(labels[:,1].detach().cpu())
if len(listValO) > 0 :
est_V = np.asarray(torch.stack(listValO)).flatten()
est_A = np.asarray(torch.stack(listAroO)).flatten()
gt_V = np.asarray(torch.stack(listValL)).flatten()
gt_A = np.asarray(torch.stack(listAroL)).flatten()
if anyDiffer :
est_Vt = np.asarray(torch.stack(tvo)).flatten()
est_At = np.asarray(torch.stack(tao)).flatten()
gt_Vt = np.asarray(torch.stack(tvl)).flatten()
gt_At = np.asarray(torch.stack(tal)).flatten()
#python main_red_test.py -useAll=1 -batch_size=6000 -seq_length=4 -use_attention=1
#now concatenate
if len(listValO) > 0 :
est_V = np.concatenate((est_V,est_Vt))
est_A = np.concatenate((est_A,est_At))
gt_V = np.concatenate((gt_V,gt_Vt))
gt_A = np.concatenate((gt_A,gt_At))
else :
est_V,est_A,gt_V,gt_A = est_Vt,est_At,gt_Vt,gt_At
print(est_V.shape, gt_V.shape)
mseV = calcMSE(est_V, gt_V)
mseA = calcMSE(est_A, gt_A)
corV = calcCOR(est_V, gt_V)
corA = calcCOR(est_A, gt_A)
iccV = calcICC(est_V, gt_V)
iccA = calcICC(est_A, gt_A)
iccV2 = calcICC(gt_V, gt_V)
iccA2 = calcICC(gt_A, gt_A)
cccV = calcCCC(est_V, gt_V)
cccA = calcCCC(est_A, gt_A)
cccV2 = calcCCC(gt_V, gt_V)
cccA2 = calcCCC(gt_A, gt_A)
if lMSA > mseA :
lMSA = mseA
if lMSV > mseV :
lMSV = mseV
if corA > lCRA :
lCRA = corA
if corV > lCRV :
lCRV = corV
if cccA > lCCA :
lCCA = cccA
if cccV > lCCV :
lCCV = cccV
if iccA > lICA :
lICA = iccA
if iccV > lICV :
lICV = iccV
if (corA+corV+cccA+cccV+iccA+iccV) > total :
total = (corA+corV+cccA+cccV+iccA+iccV)
if multi_gpu :
torch.save(model_ft.module.state_dict(),curDir+'t-models/'+save_name+'-best')
else :
torch.save(model_ft.state_dict(),curDir+'t-models/'+save_name+'-best')
print('Best, MSEA : '+str(lMSA)+', CORA : '+str(lCRA)+', CCCA : '+str(lCCA)+', ICCA : '+str(lICA)+ ', MSEV : ' +str(lMSV)+ ', CORV : ' +str(lCRV)+', CCCV : '+str(lCCV) +', ICCV : '+str(lICV)+', Total : '+str(total))
print('MSEV : ',mseV, ', CORV : ',corV,', CCCV : ',cccV,', CCCV2 : ',cccV2,', ICCV : ',iccV,', ICCV2 : ',iccV2)
print('MSEA : ',mseA, ', CORA : ',corA,', CCCA : ',cccA,', CCCA2 : ',cccA2,', ICCA : ',iccA,', ICCA2 : ',iccA2)
f = open(err_file,'a')
res = 'MSEV : '+str(mseV)+ ', CORV : ' +str(corV)+', CCCV : '+str(cccV) +', ICCV : '+str(iccV)+' \n '
f.write(res)
res = 'MSEA : '+str(mseA)+ ', CORA : '+str(corA) +', CCCA : '+str(cccA) +', ICCA : '+str(iccA)+' \n '
f.write(res)
res = 'Best, MSEA : '+str(lMSA)+', CORA : '+str(lCRA)+', CCCA : '+str(lCCA)+', ICCA : '+str(lICA)+ ', MSEV : ' +str(lMSV)+ ', CORV : ' +str(lCRV)+', CCCV : '+str(lCCV) +', ICCV : '+str(lICV)+', Total : '+str(total)+' \n '
f.write(res)
f.close()
print('Best val Acc: {:4f}'.format(lowest_loss))
return
def test_only_comb_seq():
#64,0,1200 32,1,2000? 32,2,
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]="0"
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-split', nargs='?', const=1, type=int, default=0)#0,1,2
parser.add_argument('-addLoss', nargs='?', const=1, type=int, default=1)#0,1,2
parser.add_argument('-singleTask', nargs='?', const=1, type=int, default=0)#0,1,2
parser.add_argument('-trainQuadrant', nargs='?', const=1, type=int, default=0)#0,1,2
parser.add_argument('-dConv', nargs='?', const=1, type=int, default=64)#64
parser.add_argument('-batch_size', nargs='?', const=1, type=int, default=6000) #0 is ori, 1 is red
parser.add_argument('-sewa', nargs='?', const=1, type=int, default=0)#0,1,2
parser.add_argument('-useWeightNormalization', nargs='?', const=1, type=int, default=1)#0,1,2
parser.add_argument('-useAll', nargs='?', const=1, type=int, default=1)#0,1,2
parser.add_argument('-seq_length', nargs='?', const=1, type=int, default=4)#1,2,4,8,16,32
parser.add_argument('-use_attention', nargs='?', const=1, type=int, default=1)#0,1,2
parser.add_argument('-use_ch', nargs='?', const=1, type=int, default=1)#0,1,2
parser.add_argument('-use_h', nargs='?', const=1, type=int, default=0)#0,1,2
parser.add_argument('-toLoad', nargs='?', const=1, type=int, default=1)#0,1,2
parser.add_argument('-toUpgrade', nargs='?', const=1, type=int, default=0)#0,1,2
parser.add_argument('-toAddAttention', nargs='?', const=1, type=int, default=0)#0,1,2
parser.add_argument('-per', nargs='?', const=1, type=int, default=0)#0,1,2
parser.add_argument('-numIters', nargs='?', const=1, type=int, default=200000)#0,1,2
args = parser.parse_args()
split = args.split
addLoss = args.addLoss
singleTask = args.singleTask
isSewa = args.sewa
useWeight = args.useWeightNormalization
useAll = args.useAll
useAtt = args.use_attention
useCH = args.use_ch
useH = args.use_h
trainQuadrant = args.trainQuadrant
alterQuadrant = True
per = args.per
list_seq = [2,4,8,16,32]#[1]#[0,2,4,8,16,32]
list_split = range(5)
listRes = []
c_dim=2
image_size=128
d_conv_dim=args.dConv
inputC = 3#input channel for discriminator
isVideo = True
toAlign = False
toLoad = args.toLoad
toUpgrade = args.toUpgrade
toAddAttention = args.toAddAttention
num_workers=1
model_save_dir='stargan/models'
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
toRecordRes = True #use to get the metrics on the model's fodler.
toSave = False #use tosave to save the results to external folder
dirTarget = "/media/deckyal/INT-450GB/extracted"
for seq_length in (list_seq) :
root_dir = '/home/deckyal/Desktop/all-models/'
sp_dir = '0-CH-SeqAfew-att-plus5'
sq_dir = '/'+str(seq_length)+"/"
theDir = root_dir+sp_dir+sq_dir
resDir = theDir+'result/'
checkDirMake(resDir)
#seq_length = args.seq_length
batch_size=int(truediv(args.batch_size,seq_length))#500, help='mini-batch size')
evaluateSplit = True
listRes = []
if evaluateSplit :
for split in list_split :
testSplit = split
print("Test split " , testSplit)
nSplit = 5
listSplit = []
for i in range(nSplit):
if i!=testSplit :
listSplit.append(i)
print(listSplit)
if not isSewa :
main_name = 'AF-C-'
d_name = 'AFEW-VA-Fixed'#'AFEW-VA-Fixed'
dbType = 0
else :
main_name = 'SE-C-'
d_name = 'SEWA'
dbType = 1
if useH :
main_name += 'R-'
if useCH :
main_name += 'CH-'
load_to_add = main_name
if useAtt :
main_name += 'A-'
load_to_add_split = main_name
mseLoss = nn.MSELoss()
main_name+=(str(d_conv_dim)+'-')
load_to_add+=(str(d_conv_dim)+'-')
load_to_add_split+=(str(d_conv_dim)+'-')
if trainQuadrant :
if alterQuadrant :
main_name+="-QDAL"
c_dim = 1
else :
main_name+="-QD"
c_dim = 4
save_name = main_name+str(testSplit)+'-n-'+str(seq_length)
save_name_all = main_name+'all-'+str(seq_length)
print('saving name is : ',save_name)
VD = SEWAFEWReducedLatent([d_name], None, image_size, 1,split=True, nSplit = nSplit,listSplit=[testSplit]
,isVideo=isVideo, seqLength = seq_length, returnQuadrant=trainQuadrant,dbType = dbType,useAll = useAll,returnFName = toSave)
dataloaderV = torch.utils.data.DataLoader(dataset = VD, batch_size = batch_size, shuffle = False)
if not useH:
model_ft = CombinerSeqAtt(image_size, d_conv_dim, c_dim, 4,64,512,seq_length,batch_size,useCH=useCH)
else :
model_ft = CombinerSeqAttReplace(image_size, d_conv_dim, c_dim, 4,64,512,seq_length,batch_size,useCH=useCH)
if toLoad:
print('loading previous model ')
model_ft.load_state_dict(torch.load(theDir+save_name))
model_ft.to(device)
model_ft.eval()
listValO = []
listAroO = []
listValL = []
listAroL = []
tvo = [];tao=[];tvl = []; tal = [];
anyDiffer = False
print('not eval')
#model_ft.eval()
for x,(data) in enumerate(dataloaderV,0) :
rinputs_l, rlabels_l,rldmrk_l = data[0],data[1],data[2]
if toSave :
fname_l = data[-1]
model_ft.initialize(rinputs_l.shape[0])
if useAtt :
l_h = []
the_w = None
with torch.set_grad_enabled(False) :
pre_result = None
for y in range(seq_length):
rinputs, rlabels, rldmrk = rinputs_l[:,y], rlabels_l[:,y],rldmrk_l[:,y]
inputs = rinputs.cuda()#to(device)
labels = rlabels.cuda()#to(device)
if useAtt :
if len(l_h) > 0:
outputs,the_w = model_ft(inputs,prev_h = l_h,ret_w=True)
print('w',the_w[:2])
else :
outputs = model_ft(inputs)
if useCH :
l_h.append(torch.cat((model_ft.lstm1_hdn[0][0],model_ft.lstm1_hdn[1][0]),1))
else :
l_h.append(model_ft.lstm1_hdn[0][0])
else :
outputs = model_ft(inputs)
#print('o shape',outputs.shape)
print(x,',',int(truediv(len(VD),batch_size)),outputs[:2], labels[:2],outputs[:,0].shape[0],outputs.shape)
if outputs[:,0].shape[0] != batch_size : #in case the batch size is differ, usually at end of iter
anyDiffer = True
print('differ')
tvo.append(outputs[:,0].detach().cpu())
tao.append(outputs[:,1].detach().cpu())
tvl.append(labels[:,0].detach().cpu())
tal.append(labels[:,1].detach().cpu())
else :
print('equal')
listValO.append(outputs[:,0].detach().cpu())
listAroO.append(outputs[:,1].detach().cpu())
listValL.append(labels[:,0].detach().cpu())
listAroL.append(labels[:,1].detach().cpu())
if toSave :
if the_w is None :
the_w = labels.clone()
the_w *=0
#print(fname_l)
#exit(0)
for fn,pred,gt,tw in zip(fname_l[0],outputs.detach().cpu().numpy(),labels.detach().cpu().numpy(),the_w.detach().cpu().numpy()):
#print(fn,pred.shape,gt.shape,tw.shape)
#1st get the file name
dirName, fName = os.path.split(fn)
fName = fName.split('.')[0]
#print('fname ',fName)
print(fName,tw)
listDir = dirName.split('/')
indexName = listDir.index(d_name)
folderName = os.path.join(dirTarget,d_name,listDir[indexName+1])
folderNameImage = os.path.join(folderName,'img')
folderNameRes = os.path.join(folderName,'resPred')
folderNameW = os.path.join(folderName,'theW')
checkDirMake(folderNameImage)
checkDirMake(folderNameRes)
checkDirMake(folderNameW)
#original image path
listDir[-1] = 'img-128'
imgPath = '/'.join(listDir)
#check the image from actual gt, jpg etc. and save dummy file
l_poss = ["jpg","jpeg",'png']
imgName = None
intr = 0
imgName = imgPath+'/'+fName+"."+l_poss[intr]
while (not os.path.isfile(imgName)):
#print('checking ',imgName)
intr+=1
imgName = imgPath+'/'+fName+"."+l_poss[intr]
f = open(folderNameImage+'/'+fName+".txt",'w')
f.write(imgName)
f.close()
#print('saved ',imgName,' to', folderNameImage+'/'+fName+".txt")
#now save the pred,gt in npz
np.savez(folderNameRes+'/'+fName+".npz",pred=pred,lbl=gt)
#now save the tw in separate npz
np.save(folderNameW+'/'+fName+".npy",the_w)
#exit(0)
if len(listValO) > 0 :
est_V = np.asarray(torch.stack(listValO)).flatten()
est_A = np.asarray(torch.stack(listAroO)).flatten()
gt_V = np.asarray(torch.stack(listValL)).flatten()
gt_A = np.asarray(torch.stack(listAroL)).flatten()
if anyDiffer :
est_Vt = np.asarray(torch.stack(tvo)).flatten()
est_At = np.asarray(torch.stack(tao)).flatten()
gt_Vt = np.asarray(torch.stack(tvl)).flatten()
gt_At = np.asarray(torch.stack(tal)).flatten()
#now concatenate
if len(listValO) > 0 :
est_V = np.concatenate((est_V,est_Vt))
est_A = np.concatenate((est_A,est_At))
gt_V = np.concatenate((gt_V,gt_Vt))
gt_A = np.concatenate((gt_A,gt_At))
else :
est_V,est_A,gt_V,gt_A = est_Vt,est_At,gt_Vt,gt_At
print(est_V.shape, gt_V.shape)
mseV = calcMSE(est_V, gt_V)
mseA = calcMSE(est_A, gt_A)
corV = calcCOR(est_V, gt_V)
corA = calcCOR(est_A, gt_A)
iccV = calcICC(est_V, gt_V)
iccA = calcICC(est_A, gt_A)
iccV2 = calcICC(gt_V, gt_V)
iccA2 = calcICC(gt_A, gt_A)
cccV = calcCCC(est_V, gt_V)
cccA = calcCCC(est_A, gt_A)
cccV2 = calcCCC(gt_V, gt_V)
cccA2 = calcCCC(gt_A, gt_A)
#print('Best, MSEA : '+str(lMSA)+', CORA : '+str(lCRA)+', CCCA : '+str(lCCA)+', ICCA : '+str(lICA)+ ', MSEV : ' +str(lMSV)+ ', CORV : ' +str(lCRV)+', CCCV : '+str(lCCV) +', ICCV : '+str(lICV)+', Total : '+str(total))
print('MSEV : ',mseV, ', CORV : ',corV,', CCCV : ',cccV,', CCCV2 : ',cccV2,', ICCV : ',iccV,', ICCV2 : ',iccV2)
print('MSEA : ',mseA, ', CORA : ',corA,', CCCA : ',cccA,', CCCA2 : ',cccA2,', ICCA : ',iccA,', ICCA2 : ',iccA2)
res = np.asarray([[mseV,mseA],[corV,corA],[cccV,cccA],[iccV,iccA]])
listRes.append(res)
if toRecordRes :
np.save(resDir+save_name+".npy",res)
print('saved : ',resDir+save_name+".npy")
with open(resDir+save_name+'.csv', 'w', newline='') as csvfile:
spamwriter = csv.writer(csvfile, delimiter=',',
quotechar='|', quoting=csv.QUOTE_MINIMAL)
spamwriter.writerow([res[0,0],res[0,1]])
spamwriter.writerow([res[1,0],res[1,1]])
spamwriter.writerow([res[2,0],res[2,1]])
spamwriter.writerow([res[3,0],res[3,1]])
if toRecordRes :
#now compiling the reesults from 5 split
listRes = np.stack(listRes)
np.save(resDir+save_name_all+".npy",listRes)
print('saved : ',resDir+save_name_all)
if not isSewa :
main_name = 'AF-C-'
d_name = 'AFEW-VA-Fixed'#'AFEW-VA-Fixed'
dbType = 0
else :
main_name = 'SE-C-'
d_name = 'SEWA'
dbType = 1
if useH :
main_name += 'R-'
if useCH :
main_name += 'CH-'
load_to_add = main_name
if useAtt :
main_name += 'A-'
load_to_add_split = main_name
mseLoss = nn.MSELoss()
main_name+=(str(d_conv_dim)+'-')
load_to_add+=(str(d_conv_dim)+'-')
load_to_add_split+=(str(d_conv_dim)+'-')
if trainQuadrant :
if alterQuadrant :
main_name+="-QDAL"
c_dim = 1
else :
main_name+="-QD"
c_dim = 4
save_name_all = main_name+'all-'+str(seq_length)
listRes = np.load(resDir+save_name_all+".npy")
print('loaded : ',resDir+save_name_all)
print(listRes)
l_m = []
l_cor = []
l_cc = []
l_ic = []
for tmp in range(listRes.shape[0]):
l_m.append(listRes[tmp][0,0]);l_m.append(listRes[tmp][0,1])
l_cor.append(listRes[tmp][1,0]);l_cor.append(listRes[tmp][1,1])
l_cc.append(listRes[tmp][2,0]);l_cc.append(listRes[tmp][2,1])
l_ic.append(listRes[tmp][3,0]);l_ic.append(listRes[tmp][3,1])
if toRecordRes:
with open(resDir+save_name_all+'.csv', 'w', newline='') as csvfile:
spamwriter = csv.writer(csvfile, delimiter=',',
quotechar='|', quoting=csv.QUOTE_MINIMAL)
spamwriter.writerow(l_m)
spamwriter.writerow(l_cor)
spamwriter.writerow(l_cc)
spamwriter.writerow(l_ic)
print(np.stack(l_m))
#now opening the file to make the csv
if __name__ == '__main__':
train_only_comb_seq() #To train seq C given extracted features of G and D
test_only_comb_seq #To test the seq C
| 40,371 | 38.972277 | 237 | py |
Seq-Att-Affect | Seq-Att-Affect-master/main_gan_single_reduction.py | import os
import argparse
from solver import Solver
from data_loader import get_loader
from torch.backends import cudnn
from model import Generator, Discriminator, GeneratorM, GeneratorMZ, GeneratorMZR, DiscriminatorM,
DiscriminatorMST,DiscriminatorMZ,DiscriminatorMZR,DiscriminatorMZRL,CombinerSeqAtt
from torch.autograd import Variable
from torchvision.utils import save_image
from FacialDataset import AFEWVA, AFEWVAReduced,SEWAFEWReduced
from utils import *
import time
import torch.nn.functional as F
import numpy as np
import torch
import datetime
from torchvision import transforms
from torch import nn
from calcMetrix import *
from config import *
import argparse
import shutil
parser = argparse.ArgumentParser()
parser.add_argument('-split', nargs='?', const=1, type=int, default=0)#0,1,2
parser.add_argument('-sewa', nargs='?', const=1, type=int, default=0)#0,1,2
parser.add_argument('-semaine', nargs='?', const=1, type=int, default=1)#0,1,2
parser.add_argument('-gConv', nargs='?', const=1, type=int, default=16)#64
parser.add_argument('-dConv', nargs='?', const=1, type=int, default=16)#64
parser.add_argument('-nSel', nargs='?', const=1, type=int, default=0) #0 is ori, 1 is red
parser.add_argument('-batch_size', nargs='?', const=1, type=int, default=300) #0 is ori, 1 is red
parser.add_argument('-multi_gpu', nargs='?', const=1, type=int, default=0) #0 is ori, 1 is red
parser.add_argument('-resume_iters', nargs='?', const=1, type=int, default=79)#0,1,2. helpfull
parser.add_argument('-mode', nargs='?', const=1, type=int, default=0)#0 : train, 1 : extract
#may change
parser.add_argument('-tryDenoise', nargs='?', const=1, type=int, default=1)#0,1,2. Helpfull
parser.add_argument('-useWeightNormalization', nargs='?', const=0, type=int, default=1)#0,1,2. helpfull
parser.add_argument('-addLoss', nargs='?', const=1, type=int, default=1)#0,1,2. helpfull
#dont change
parser.add_argument('-singleTask', nargs='?', const=1, type=int, default=0)#0,1,2. Multitask is slightly better
parser.add_argument('-trainQuadrant', nargs='?', const=1, type=int, default=0)#0,1,2
parser.add_argument('-alterQuadrant', nargs='?', const=1, type=int, default=0)#0,1,2
parser.add_argument('-useLatent', nargs='?', const=1, type=int, default=0)#0,1,2 #To use linear latent : bad
parser.add_argument('-useSkip', nargs='?', const=1, type=int, default=0)#0,1,2 #To use skip : no difference
args = parser.parse_args()
def str2bool(v):
return v.lower() in ('true')
##############################################################
def toQuadrant(inputData = None, min = -10, max = 10, toOneHot = False):
threshold = truediv(min+max,2)
vLow = False
aLow = False
q = 0
#print(min,max)
#print('the threshold : ',threshold)
if inputData[0] < threshold :
vLow = True
if inputData[1] < threshold :
aLow = True
if vLow and aLow :
q = 2
elif vLow and not aLow :
q = 1
elif not vLow and not aLow :
q = 0
else :
q = 3
if toOneHot :
rest = np.zeros(4)
rest[q]+=1
return rest
else :
return q
def train_w_gdc_adl(): #training g and d on standard l2 loss
split = args.split
isSewa = args.sewa
isSemaine = args.semaine
modelExist = True
toLoadModel = True
resume_iters=args.resume_iters#89
GName = None;#"AF0-0-16-16-Den-UA-G-429.ckpt"
DName = None;#"AF0-0-16-16-Den-UA-D-429.ckpt"
use_skip = args.useSkip
useLatent = args.useLatent
tryDenoise = args.tryDenoise
addLoss = args.addLoss
useWeight = args.useWeightNormalization
singleTask = args.singleTask
trainQuadrant = args.trainQuadrant
alterQuadrant = args.alterQuadrant
nSel = args.nSel
#curDir = "/home/deckyal/eclipse-workspace/FaceTracking/"
c_dim=2
image_size=128
g_conv_dim=args.gConv
d_conv_dim=args.dConv
lambda_cls=1
lambda_rec=10
lambda_gp=10
inputC = 3#input channel for discriminator
visEvery = 5
saveEvery = 10
# Training configuration.
dataset='CelebA' #, choices=['CelebA', 'RaFD', 'Both'])
batch_size=args.batch_size#50#40#70#20 #, help='mini-batch size')
num_iters=200000 #, help='number of total iterations for training D')
num_iters_decay=100000 #, help='number of iterations for decaying lr')
g_lr=0.0001 #, help='learning rate for G')
d_lr=0.0001 #, help='learning rate for D')
n_critic=5 #, help='number of D updates per each G update')
beta1=0.5 #, help='beta1 for Adam optimizer')
beta2=0.999 #, help='beta2 for Adam optimizer')
#selected_attrs=['Black_Hair', 'Blond_Hair', 'Brown_Hair', 'Male', 'Young']
#', '--list', nargs='+', help='selected attributes for the CelebA dataset',default=['Black_Hair', 'Blond_Hair', 'Brown_Hair', 'Male', 'Young'])
isVideo = False
seq_length = 2
# Test configuration.
test_iters=200000 #, help='test model from this step')
# Miscellaneous.
num_workers=1
log_dir='stargan/logs'
model_save_dir='stargan/models'
sample_dir='stargan/samples-g_adl'
result_dir='stargan/results'
# Step size.
log_step=20
sample_step=5#1000
model_save_step=10
lr_update_step=100#1000
#model_save_step=10000
#lr_update_step=1000
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
if not os.path.exists(model_save_dir):
os.makedirs(model_save_dir)
multi_gpu = args.multi_gpu
testSplit = split
print("Test split " , testSplit)
nSplit = 5
listSplit = []
for i in range(nSplit):
if i!=testSplit:
listSplit.append(i)
print(listSplit)
if isSemaine:
isSewa = 0
if not isSewa :
if not isSemaine :
d_name = 'AFEW-VA-Fixed'
additionName = "AF"+str(split)+"-"
else :
d_name = 'Sem-Short'
additionName = "SEM"+str(split)+"-"
dbType = 0
else :
d_name = 'SEWA'
dbType = 1
additionName = "SW"+str(split)+"-"
additionName+=(str(nSel)+'-')
additionName+=(str(g_conv_dim)+'-')
additionName+=(str(d_conv_dim)+'-')
if trainQuadrant :
if alterQuadrant :
additionName+="QDAL-"
c_dim = 1
else :
additionName+="QD-"
c_dim = 4
if tryDenoise :
additionName+="Den-"
transform =transforms.Compose([
#transforms.Resize((image_size,image_size)),
#transforms.CenterCrop(image_size),
transforms.ToTensor(),
transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
])
#AFEW-VA-Small
ID = SEWAFEWReduced([d_name], None, True, image_size, transform, False, True, 1,split=True, nSplit = nSplit ,listSplit=listSplit
,isVideo=isVideo, seqLength = seq_length, returnQuadrant=trainQuadrant, returnNoisy = tryDenoise,dbType = dbType,
returnWeight = useWeight,isSemaine = isSemaine)
#ID = AFEWVA([d_name], None, True, image_size, transform, False, True, 1,split=True, nSplit = nSplit ,listSplit=listSplit
# ,isVideo=isVideo, seqLength = seq_length, returnQuadrant=trainQuadrant, returnNoisy = tryDenoise,dbType = dbType,returnWeight = useWeight)
dataloader = torch.utils.data.DataLoader(dataset = ID, batch_size = batch_size, shuffle = True,worker_init_fn=worker_init_fn)
VD = SEWAFEWReduced([d_name], None, True, image_size, transform, False, False, 1,split=True, nSplit = nSplit,listSplit=[testSplit]
,isVideo=isVideo, seqLength = seq_length, returnQuadrant=trainQuadrant, returnNoisy = tryDenoise,dbType = dbType,
isSemaine = isSemaine)
#VD = AFEWVA([d_name], None, True, image_size, transform, False, False, 1,split=True, nSplit = nSplit,listSplit=[testSplit]
# ,isVideo=isVideo, seqLength = seq_length, returnNoisy = tryDenoise,dbType = dbType)
dataloaderV = torch.utils.data.DataLoader(dataset = VD, batch_size = batch_size, shuffle = False)
#Build model
"""Create a generator and a discriminator."""
if nSel :
G = GeneratorMZ(g_conv_dim, 0, 1,use_skip,useLatent)
D = DiscriminatorMZR(image_size, d_conv_dim, c_dim, 4,inputC=inputC)
C = CombinerSeqAtt(image_size, d_conv_dim, c_dim, 4,64,512,1,batch_size,useCH=True)
else :
G = GeneratorM(g_conv_dim, 0, 1,use_skip,useLatent)
D = DiscriminatorM(image_size, d_conv_dim, c_dim, 6)
C = CombinerSeqAtt(image_size, d_conv_dim, c_dim, 4,64,512,1,batch_size,useCH=True)
print_network(G, 'G')
print_network(D, 'D')
if toLoadModel :
print('Loading models from iterations : ',resume_iters)
if modelExist :
additionName+='UA-'
if GName is None :
G_path = os.path.join(curDir+model_save_dir, '{}G-{}.ckpt'.format(additionName,resume_iters))
D_path = os.path.join(curDir+model_save_dir, '{}D-{}.ckpt'.format(additionName,resume_iters))
C_path = os.path.join(curDir+model_save_dir, '{}C-{}.ckpt'.format(additionName,resume_iters))
else :
G_path = os.path.join(curDir+model_save_dir, GName)
D_path = os.path.join(curDir+model_save_dir, DName)
C_path = os.path.join(curDir+model_save_dir, DName)
print('loading ',G_path)
print('loading ',D_path)
G.load_state_dict(torch.load(G_path, map_location=lambda storage, loc: storage))
D.load_state_dict(torch.load(D_path, map_location=lambda storage, loc: storage))
C.load_state_dict(torch.load(D_path, map_location=lambda storage, loc: storage))
if not modelExist:
additionName+='UA-'
else :
print('Initiating models')
G.apply(weights_init_uniform_rule)
D.apply(weights_init_uniform_rule)
save_name = additionName+str(testSplit)
err_file = curDir+save_name+".txt"
print('err file : ',err_file)
g_optimizer = torch.optim.Adam(G.parameters(), g_lr, [beta1, beta2])
d_optimizer = torch.optim.Adam(D.parameters(), d_lr, [beta1, beta2])
c_optimizer = torch.optim.Adam(C.parameters(), d_lr, [beta1, beta2])
G.to(device)
D.to(device)
C.to(device)
if multi_gpu:
G = nn.DataParallel(G)
D = nn.DataParallel(D)
# Set data loader.
data_loader = dataloader
if not trainQuadrant or (alterQuadrant):
criterion = nn.MSELoss()
else :
criterion = nn.CrossEntropyLoss() #F.cross_entropy(logit, target)
# Fetch fixed inputs for debugging.
data = next(iter(dataloader))
x_fixed, rlabels,rldmrk,_ = data[0],data[1],data[2],data[3]# x_fixed, c_org
if trainQuadrant :
if tryDenoise :
x_fixed = data[6].cuda()
x_target = data[0].cuda()
else :
if tryDenoise :
x_fixed = data[5].cuda()
x_target = data[0].cuda()
x_fixed = x_fixed.to(device)
# Learning rate cache for decaying.
d_lr = d_lr
start_iters = 0
# Start training.
print('Start training...')
start_time = time.time()
if trainQuadrant :
q1 = data[4]
f = open(err_file,'w+')
f.write("err : ")
f.close()
lowest_loss = 99999
lMSA,lMSV,lCCV,lCCA,lICA,lICV,lCRA, lCRV, total = 9999,9999,-9999, -9999, -9999, -9999, -9999, -9999, -9999
w,wv,wa = None,None,None
for i in range(start_iters, num_iters):
random.seed()
manualSeed = random.randint(1, 10000) # use if you want new results
random.seed(manualSeed)
torch.manual_seed(manualSeed)
print('Epoch {}/{}'.format(i, num_iters - 1))
print('-'*10)
running_loss = 0
G.train()
D.train()
for x,(data) in enumerate(dataloader,0) :
rinputs, rlabels,rldmrk,_ =data[0],data[1],data[2],data[3]
if trainQuadrant :
if alterQuadrant :
quadrant = data[5].float().cuda()
else :
quadrant = data[5].cuda()
if tryDenoise :
noisy = data[6].cuda()
else :
if tryDenoise :
noisy = data[5].cuda()
if useWeight :
w = data[6].cuda()
#print(w)
wv = w[:,1]
wa = w[:,0]
else :
if useWeight :
w = data[5].cuda()
#print(w)
wv = w[:,1]
wa = w[:,0]
inputs = rinputs.cuda()#to(device)
labels = rlabels.cuda()#to(device)
# Compute loss with real images.
out_src, out_cls = D(inputs)
d_loss_real = - torch.mean(out_src)
if not trainQuadrant:
if useWeight :
d_loss_cls = calcMSET(out_cls,labels,w) #criterion(out_cls, labels)
else :
d_loss_cls = criterion(out_cls, labels) #classification_loss(out_cls, label_org, dataset)
if addLoss :
ov,oa,lv,la = out_cls[:,0],out_cls[:,1], labels[:,0], labels[:,1]
corV = -calcCORT(ov, lv, wv)
corA = -calcCORT(oa, la, wa)
cccV = -calcCCCT(ov, lv, wv)
cccA = -calcCCCT(oa, la, wa)
iccV = -calcICCT(ov, lv, wv)
iccA = -calcICCT(oa, la, wa)
d_loss_cls = d_loss_cls + corV+corA +cccV+cccA+iccV+iccA
else :
#print('q ',quadrant)
#print(out_cls.shape, quadrant.shape )
if alterQuadrant :
d_loss_cls = criterion(torch.squeeze(out_cls), quadrant)
else :
d_loss_cls = criterion(out_cls, quadrant)
if x%10 == 0 :
if not trainQuadrant:
print(x,'-',len(dataloader)," Res - label-G : ", out_cls[:3],labels[:3])
else :
if alterQuadrant :
print(x,'-',len(dataloader)," Res - label-G : ", torch.round(out_cls[:3]),quadrant[:3])
else :
print(x,'-',len(dataloader)," Res - label-G : ", torch.max(out_cls[:3],1)[1],quadrant[:3])
# Compute loss with fake images.
if tryDenoise :
theInput = noisy
else :
theInput = inputs
x_fake = G(theInput)
out_src, out_cls = D(x_fake.detach())
d_loss_fake = torch.mean(out_src)
# Compute loss for gradient penalty.
alpha = torch.rand(theInput.size(0), 1, 1, 1).to(device)
x_hat = (alpha * theInput.data + (1 - alpha) * x_fake.data).requires_grad_(True)
out_src, _ = D(x_hat)
d_loss_gp = gradient_penalty(out_src, x_hat)
# Backward and optimize.
d_loss = d_loss_real + d_loss_fake + lambda_cls * d_loss_cls + lambda_gp * d_loss_gp
#reset_grad()
g_optimizer.zero_grad()
d_optimizer.zero_grad()
d_loss.backward()
d_optimizer.step()
# Logging.
loss = {}
loss['D/loss_real'] = d_loss_real.item()
loss['D/loss_fake'] = d_loss_fake.item()
loss['D/loss_cls'] = d_loss_cls.item()
loss['D/loss_gp'] = d_loss_gp.item()
###! Actual training of the generator
if (i+1) % n_critic == 0:
# Original-to-target domain.
if tryDenoise :
z,x_fake = G(noisy,returnInter = True)
else :
z,x_fake = G(inputs)
out_src, out_cls = D(x_fake)
if x%10 == 0 :
print("Res - label-D : ", out_cls[:3],labels[:3])
g_loss_fake = - torch.mean(out_src)
if not trainQuadrant:
#g_loss_cls = criterion(out_cls, labels) #classification_loss(out_cls, label_org, dataset)
if useWeight :
g_loss_cls = calcMSET(out_cls,labels,w) #criterion(out_cls, labels)
else :
g_loss_cls = criterion(out_cls, labels) #classification_loss(out_cls, label_org, dataset)
if addLoss :
ov,oa,lv,la = out_cls[:,0],out_cls[:,1], labels[:,0], labels[:,1]
corV = -calcCORT(ov, lv, wv)
corA = -calcCORT(oa, la, wa)
cccV = -calcCCCT(lv, lv, wv)
cccA = -calcCCCT(oa, la, wa)
iccV = -calcICCT(ov, lv, wv)
iccA = -calcICCT(oa, la, wa)
g_loss_cls = g_loss_cls + corV+corA +cccV+cccA+iccV+iccA
else :
if alterQuadrant :
g_loss_cls = criterion(torch.squeeze(out_cls), quadrant)
else :
g_loss_cls = criterion(out_cls, quadrant)
if not isSewa:
q = toQuadrant(out_cls, -10, 10, False)
else :
q = toQuadrant(out_cls, 0, 1, False)
out_c = C(torch.cat((z,q),1))
if useWeight :
c_loss = calcMSET(out_cls,labels,w) #criterion(out_cls, labels)
else :
c_loss = criterion(out_cls, labels) #classification_loss(out_cls, label_org, dataset)
if addLoss :
ov,oa,lv,la = out_c[:,0],out_c[:,1], labels[:,0], labels[:,1]
corV = -calcCORT(ov, lv, wv)
corA = -calcCORT(oa, la, wa)
cccV = -calcCCCT(lv, lv, wv)
cccA = -calcCCCT(oa, la, wa)
iccV = -calcICCT(ov, lv, wv)
iccA = -calcICCT(oa, la, wa)
c_loss = c_loss + corV+corA +cccV+cccA+iccV+iccA
# Target-to-original domain.
x_reconst = G(x_fake)
g_loss_rec = torch.mean(torch.abs(inputs - x_reconst))
# Backward and optimize.
g_loss = g_loss_fake + lambda_rec * g_loss_rec + lambda_cls * g_loss_cls
#reset_grad()
g_optimizer.zero_grad()
d_optimizer.zero_grad()
c_optimizer.zero_grad()
c_loss.backward()
g_loss.backward()
g_optimizer.step()
c_optimizer.step()
# Logging.
loss['G/loss_fake'] = g_loss_fake.item()
loss['G/loss_rec'] = g_loss_rec.item()
loss['G/loss_cls'] = g_loss_cls.item()
loss['C'] = c_loss.item()
###! Getting the training metrics and samples
#running_loss += loss.item() * inputs.size(0)
#print("{}/{} loss : {}/{}".format(x,int(len(dataloader.dataset)/batch_size),lossC.item(),lossR.item()))
if (i+1) % 10 == 0:
et = time.time() - start_time
et = str(datetime.timedelta(seconds=et))[:-7]
log = "Elapsed [{}], Iteration [{}/{}], Inner {}/{} \n".format(et, i+1, num_iters,x,int(len(dataloader.dataset)/batch_size))
for tag, value in loss.items():
log += ", {}: {:.4f}".format(tag, value)
print(log)
f = open(err_file,'a')
f.write("Elapsed [{}], Iteration [{}/{}], Inner {}/{} \n".format(et, i+1, num_iters,x,int(len(dataloader.dataset)/batch_size)))
f.write(log)
f.close()
# Translate fixed images for debugging.
if (i+1) % visEvery == 0:
with torch.no_grad():
x_fake_list = [x_fixed]
x_concat = G(x_fixed)
sample_path = os.path.join(curDir+sample_dir, '{}{}-images-denoised.jpg'.format(i+1,additionName))
save_image(denorm(x_concat.data.cpu()), sample_path, nrow=int(round(batch_size/4)), padding=0)
print('Saved real and fake denoised images into {}...'.format(sample_path))
if tryDenoise :
x_concat = x_fixed
sample_path = os.path.join(curDir+sample_dir, '{}{}-images-original.jpg'.format(i+1,additionName))
save_image(denorm(x_concat.data.cpu()), sample_path, nrow=int(round(batch_size/4)), padding=0)
print('Saved real and fake real images into {}...'.format(sample_path))
x_concat = x_target
sample_path = os.path.join(curDir+sample_dir, '{}{}-images-groundtruth.jpg'.format(i+1,additionName))
save_image(denorm(x_concat.data.cpu()), sample_path, nrow=int(round(batch_size/4)), padding=0)
print('Saved real and fake real images into {}...'.format(sample_path))
# Save model checkpoints.
if (i+1) % saveEvery == 0:
G_path = os.path.join(curDir+model_save_dir, '{}G-{}.ckpt'.format(additionName,i))
D_path = os.path.join(curDir+model_save_dir, '{}D-{}.ckpt'.format(additionName,i))
C_path = os.path.join(curDir+model_save_dir, '{}C-{}.ckpt'.format(additionName,i))
if multi_gpu :
torch.save(G.module.state_dict(), G_path)
torch.save(D.module.state_dict(), D_path)
torch.save(C.module.state_dict(), C_path)
else :
torch.save(G.state_dict(), G_path)
torch.save(D.state_dict(), D_path)
torch.save(C.state_dict(), C_path)
print('Saved model checkpoints into {}...'.format(model_save_dir))
print(G_path)
# Decay learning rates.
if (i+1) % lr_update_step == 0 and (i+1) > 50:
g_lr -= (g_lr / float(num_iters_decay))
d_lr -= (d_lr / float(num_iters_decay))
update_lr_ind(d_optimizer,d_lr)
update_lr_ind(g_optimizer,g_lr)
print ('Decayed learning rates, g_lr: {}, d_lr: {}.'.format(g_lr, d_lr))
epoch_loss = running_loss / len(dataloader.dataset)
print('Loss : {:.4f}'.format(epoch_loss))
if i %2 == 0 :
if multi_gpu :
torch.save(D.module.state_dict(),curDir+'t-models/'+'-D'+save_name)
torch.save(G.module.state_dict(),curDir+'t-models/'+'-G'+save_name)
torch.save(C.module.state_dict(),curDir+'t-models/'+'-C'+save_name)
else :
torch.save(D.state_dict(),curDir+'t-models/'+'-D'+save_name)
torch.save(G.state_dict(),curDir+'t-models/'+'-G'+save_name)
torch.save(G.state_dict(),curDir+'t-models/'+'-C'+save_name)
#Deep copy the model_ft
if i%5 == 0 :#epoch_loss < lowest_loss :
if trainQuadrant :
a = 0
b = 0
else :
a = 0
b = 1
lowest_loss = lowest_loss
print("outp8ut : ",out_cls[0])
print("labels : ",labels[0])
if True :
listValO = []
listAroO = []
listValL = []
listAroL = []
tvo = [];tao=[];tvl = []; tal = [];
anyDiffer = False
for x,(data) in enumerate(dataloaderV,0) :
if trainQuadrant:
rinputs, rlabels,rldmrk = data[0],data[5],data[2]
else :
rinputs, rlabels,rldmrk = data[0],data[1],data[2]
G.eval()
D.eval()
C.eval()
inputs = rinputs.cuda()#to(device)
labels = rlabels.cuda()#to(device)
with torch.set_grad_enabled(False) :
z,inputsM = G(inputs,returnInter = True)
_, outD = D(inputsM)
if not isSewa:
q = toQuadrant(outD, -10, 10, False)
else :
q = toQuadrant(outD, 0, 1, False)
outputs = C(torch.cat((z,q),1))
if trainQuadrant:
if alterQuadrant :
outputs = torch.round(outputs)
else :
_,outputs = torch.max(outputs,1)
if trainQuadrant :
print(x,',',int(truediv(len(VD),batch_size)),outputs[:2], labels[:2],outputs.shape)
else :
print(x,',',int(truediv(len(VD),batch_size)),outputs[:2], labels[:2],outputs[:,0].shape[0],outputs.shape)
#print(outputs.shape)
if not trainQuadrant :
shape = outputs[:,0].shape[0]
else :
shape = outputs.shape[0]
if shape != batch_size : #in case the batch size is differ, usually at end of iter
anyDiffer = True
print('differ')
if trainQuadrant:
tvo.append(outputs.detach().cpu())
tao.append(outputs.detach().cpu())
tvl.append(labels.detach().cpu())
tal.append(labels.detach().cpu())
else :
tvo.append(outputs[:,a].detach().cpu())
tao.append(outputs[:,b].detach().cpu())
tvl.append(labels[:,a].detach().cpu())
tal.append(labels[:,b].detach().cpu())
else :
print('equal')
if trainQuadrant :
listValO.append(outputs.detach().cpu())
listAroO.append(outputs.detach().cpu())
listValL.append(labels.detach().cpu())
listAroL.append(labels.detach().cpu())
else :
listValO.append(outputs[:,a].detach().cpu())
listAroO.append(outputs[:,b].detach().cpu())
listValL.append(labels[:,a].detach().cpu())
listAroL.append(labels[:,b].detach().cpu())
est_V = np.asarray(torch.stack(listValO)).flatten()
est_A = np.asarray(torch.stack(listAroO)).flatten()
gt_V = np.asarray(torch.stack(listValL)).flatten()
gt_A = np.asarray(torch.stack(listAroL)).flatten()
if anyDiffer :
est_Vt = np.asarray(torch.stack(tvo)).flatten()
est_At = np.asarray(torch.stack(tao)).flatten()
gt_Vt = np.asarray(torch.stack(tvl)).flatten()
gt_At = np.asarray(torch.stack(tal)).flatten()
#now concatenate
est_V = np.concatenate((est_V,est_Vt))
est_A = np.concatenate((est_A,est_At))
gt_V = np.concatenate((gt_V,gt_Vt))
gt_A = np.concatenate((gt_A,gt_At))
print(est_V.shape, gt_V.shape)
mseV = calcMSE(est_V, gt_V)
mseA = calcMSE(est_A, gt_A)
corV = calcCOR(est_V, gt_V)
corA = calcCOR(est_A, gt_A)
iccV = calcICC(est_V, gt_V)
iccA = calcICC(est_A, gt_A)
cccV = calcCCC(est_V, gt_V)
cccA = calcCCC(est_A, gt_A)
iccV2 = calcCCC(gt_V, gt_V)
iccA2 = calcCCC(gt_A, gt_A)
if lMSA > mseA :
lMSA = mseA
if lMSV > mseV :
lMSV = mseV
if corA > lCRA :
lCRA = corA
if corV > lCRV :
lCRV = corV
if cccA > lCCA :
lCCA = cccA
if cccV > lCCV :
lCCV = cccV
if iccA > lICA :
lICA = iccA
if iccV > lICV :
lICV = iccV
if (corA+corV+cccA+cccV+iccA+iccV) > total :
total = (corA+corV+cccA+cccV+iccA+iccV)
G_path = os.path.join(curDir+model_save_dir, '{}G-best-{}.ckpt'.format(additionName,i))
D_path = os.path.join(curDir+model_save_dir, '{}D-best-{}.ckpt'.format(additionName,i))
#G_path = os.path.join(curDir+model_save_dir, '{}{}-G-adl-best.ckpt'.format(i+1,additionName))
#D_path = os.path.join(curDir+model_save_dir, '{}{}-D-adl-best.ckpt'.format(i+1,additionName))
if multi_gpu :
torch.save(G.module.state_dict(), G_path)
torch.save(D.module.state_dict(), D_path)
else :
torch.save(G.state_dict(), G_path)
torch.save(D.state_dict(), D_path)
print('Best, MSEA : '+str(lMSA)+', CORA : '+str(lCRA)+', CCCA : '+str(lCCA)+', ICCA : '+str(lICA)+ ', MSEV : ' +str(lMSV)+ ', CORV : ' +str(lCRV)+', CCCV : '+str(lCCV) +', ICCV : '+str(lICV)+', Total : '+str(total))
print('MSEV : ',mseV, ', CORV : ',corV,', CCCV : ',cccV,', ICCV : ',iccV)
print('MSEA : ',mseA, ', CORA : ',corA,', CCCA : ',cccA,', ICCA : ',iccA)
f = open(err_file,'a')
res = 'MSEV : '+str(mseV)+ ', CORV : ' +str(corV)+', CCCV : '+str(cccV) +', ICCV : '+str(iccV)+' \n '
f.write(res)
res = 'MSEA : '+str(mseA)+ ', CORA : '+str(corA) +', CCCA : '+str(cccA) +', ICCA : '+str(iccA)+' \n '
f.write(res)
res = 'Best, MSEA : '+str(lMSA)+', CORA : '+str(lCRA)+', CCCA : '+str(lCCA)+', ICCA : '+str(lICA)+ ', MSEV : ' +str(lMSV)+ ', CORV : ' +str(lCRV)+', CCCV : '+str(lCCV) +', ICCV : '+str(lICV)+', Total : '+str(total)+' \n '
f.write(res)
f.close()
print('Best val Acc: {:4f}'.format(lowest_loss))
pass
def extract(): #training g and d on standard l2 loss
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]="1"
split = args.split
isSewa = args.sewa
isSemaine = args.semaine
toLoadModel = True
resume_iters=args.resume_iters
use_skip = args.useSkip
useLatent = args.useLatent
tryDenoise = args.tryDenoise
addLoss = args.addLoss
useWeight = args.useWeightNormalization
singleTask = args.singleTask
trainQuadrant = args.trainQuadrant
alterQuadrant = args.alterQuadrant
nSel = args.nSel
#curDir = "/home/deckyal/eclipse-workspace/FaceTracking/"
c_dim=2
image_size=128
g_conv_dim=16
d_conv_dim=16
lambda_cls=1
lambda_rec=10
lambda_gp=10
inputC = 3#input channel for discriminator
batch_size=args.batch_size#200 #50#40#70#20 #, help='mini-batch size')
isVideo = False
seq_length = 2
# Test configuration.
test_iters=200000 #, help='test model from this step')
# Miscellaneous.
num_workers=1
log_dir='stargan/logs'
model_save_dir='stargan/models'
sample_dir='stargan/samples-g_adl'
result_dir='stargan/results'
# Step size.
log_step=20
sample_step=5#1000
model_save_step=10
lr_update_step=100#1000
#model_save_step=10000
#lr_update_step=1000
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
if not os.path.exists(model_save_dir):
os.makedirs(model_save_dir)
multi_gpu = False
testSplit = split
print("Test split " , testSplit)
nSplit = 5
listSplit = []
for i in range(nSplit):
if i!=testSplit :
listSplit.append(i)
print(listSplit)
if not isSewa :
if not isSemaine :
d_name = 'AFEW-VA-Fixed'
additionName = "AF"+str(split)+"-"
else :
d_name = 'Sem-Short'
additionName = "SEM"+str(split)+"-"
dbType = 0
else :
d_name = 'SEWA'
dbType = 1
additionName = "SW"+str(split)+"-"
additionName+=(str(nSel)+'-')
additionName+=(str(g_conv_dim)+'-')
additionName+=(str(d_conv_dim)+'-')
if trainQuadrant :
if alterQuadrant :
additionName+="QDAL-"
c_dim = 1
else :
additionName+="QD-"
c_dim = 4
if tryDenoise :
additionName+="Den-"
save_name = additionName+str(testSplit)
transform =transforms.Compose([
transforms.Resize((image_size,image_size)),
transforms.ToTensor(),
transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
])
toDelete = False
VD = SEWAFEWReduced([d_name], None, True, image_size, transform, False, False, 1,split=False, nSplit = nSplit,listSplit=[testSplit]
,isVideo=isVideo, seqLength = seq_length, returnQuadrant=trainQuadrant, returnNoisy = tryDenoise,dbType = dbType, isSemaine=isSemaine)
dataloaderV = torch.utils.data.DataLoader(dataset = VD, batch_size = batch_size, shuffle = False)
if nSel :
G = GeneratorMZ(g_conv_dim, 0, 1,use_skip,useLatent)
D = DiscriminatorMZR(image_size, d_conv_dim, c_dim, 4,inputC=inputC)
else :
G = GeneratorM(g_conv_dim, 0, 1,use_skip,useLatent)
D = DiscriminatorM(image_size, d_conv_dim, c_dim, 6)
print_network(G, 'G')
print_network(D, 'D')
if toLoadModel :
print('Loading models from iterations : ',resume_iters)
G_path = os.path.join(curDir+model_save_dir, '{}G-{}.ckpt'.format(additionName,resume_iters))
D_path = os.path.join(curDir+model_save_dir, '{}D-{}.ckpt'.format(additionName,resume_iters))
G.load_state_dict(torch.load(G_path, map_location=lambda storage, loc: storage),strict=False)
D.load_state_dict(torch.load(D_path, map_location=lambda storage, loc: storage),strict=False)
G.to(device)
D.to(device)
listValO = []
listAroO = []
listValL = []
listAroL = []
a = 0
b = 1
iterator = 0
tvo = [];tao=[];tvl = []; tal = [];
anyDiffer = False
print('length : ',len(dataloaderV))
for x,(data) in enumerate(dataloaderV,0) :
if trainQuadrant:
rinputs, rlabels,rldmrk = data[0],data[5],data[2]
else :
rinputs, rlabels,rldmrk = data[0],data[1],data[2]
#for real_batch,va,gt,M,ln,q,noisy_batch,weight in (dataloader) :
fNames = data[4]
G.train()
D.train()
inputs = rinputs.cuda()#to(device)
labels = rlabels.cuda()#to(device)
with torch.set_grad_enabled(False) :
inputsM,z = G(inputs,returnInter = True)
_, outputs = D(inputsM)
if trainQuadrant:
if alterQuadrant :
outputs = torch.round(outputs)
else :
_,outputs = torch.max(outputs,1)
print('inside ')
if trainQuadrant :
print(x,',',int(truediv(len(VD),batch_size)),outputs[:2], labels[:2],outputs.shape)
else :
print(x,',',int(truediv(len(VD),batch_size)),outputs[:2], labels[:2],outputs[:,0].shape[0],outputs.shape)
#print(outputs.shape)
print(z.shape)
zSave = z.cpu().numpy()
qSave = outputs.cpu().numpy()
combine = True
#now saving the results individually
for fname,features,va in zip(fNames, zSave,qSave):
iterator+=1
#first inspect the dir
dirName, fName = os.path.split(fname)
fName = fName.split('.')[0]+'.npz'
listDir = dirName.split('/')
listDir[-1] = 'FT-'+additionName+'z'
dirTgt = '/'.join(listDir)
if not toDelete :
checkDirMake(dirTgt)
#va = np.array([5,-5])
#print(va)
if not isSewa:
q = toQuadrant(va, -10, 10, False)
else :
q = toQuadrant(va, 0, 1, False)
#print(q)
if combine :
tmp=np.zeros((1,features.shape[1],features.shape[2]),np.float32)+q
features=np.concatenate((features,tmp),0)
print(tmp[0,0,:2])
print(fname, features.shape)
if os.path.isdir(dirTgt) and toDelete: # and isSewa or False:
print('removing : ',dirTgt)
#os.remove(os.path.join(dirTgt,fNameOri))
#exit(0)
shutil.rmtree(dirTgt)
#print(dirTgt, fName)
vaq = np.array([va[0],va[1],q])
#print('vaq : ',vaq)
if not toDelete :#not os.path.isfile(os.path.join(dirTgt,fName)) :
#np.save(os.path.join(dirTgt,fName),features)
np.savez(os.path.join(dirTgt,fName),z=features,vaq=vaq)
#exit(0)
#np.save('testing.npy',zSave)
#exit(0)
if not trainQuadrant :
shape = outputs[:,0].shape[0]
else :
shape = outputs.shape[0]
if shape != batch_size : #in case the batch size is differ, usually at end of iter
anyDiffer = True
print('differ')
if trainQuadrant:
tvo.append(outputs.detach().cpu())
tao.append(outputs.detach().cpu())
tvl.append(labels.detach().cpu())
tal.append(labels.detach().cpu())
else :
tvo.append(outputs[:,a].detach().cpu())
tao.append(outputs[:,b].detach().cpu())
tvl.append(labels[:,a].detach().cpu())
tal.append(labels[:,b].detach().cpu())
else :
print('equal')
if trainQuadrant :
listValO.append(outputs.detach().cpu())
listAroO.append(outputs.detach().cpu())
listValL.append(labels.detach().cpu())
listAroL.append(labels.detach().cpu())
else :
listValO.append(outputs[:,a].detach().cpu())
listAroO.append(outputs[:,b].detach().cpu())
listValL.append(labels[:,a].detach().cpu())
listAroL.append(labels[:,b].detach().cpu())
if len(listValO) > 0 :
est_V = np.asarray(torch.stack(listValO)).flatten()
est_A = np.asarray(torch.stack(listAroO)).flatten()
gt_V = np.asarray(torch.stack(listValL)).flatten()
gt_A = np.asarray(torch.stack(listAroL)).flatten()
if anyDiffer :
est_Vt = np.asarray(torch.stack(tvo)).flatten()
est_At = np.asarray(torch.stack(tao)).flatten()
gt_Vt = np.asarray(torch.stack(tvl)).flatten()
gt_At = np.asarray(torch.stack(tal)).flatten()
#now concatenate
if len(listValO) > 0 :
est_V = np.concatenate((est_V,est_Vt))
est_A = np.concatenate((est_A,est_At))
gt_V = np.concatenate((gt_V,gt_Vt))
gt_A = np.concatenate((gt_A,gt_At))
else :
est_V,est_A,gt_V,gt_A = est_Vt,est_At,gt_Vt,gt_At
print(est_V.shape, gt_V.shape)
mseV = calcMSE(est_V, gt_V)
mseA = calcMSE(est_A, gt_A)
corV = calcCOR(est_V, gt_V)
corA = calcCOR(est_A, gt_A)
iccV = calcICC(est_V, gt_V)
iccA = calcICC(est_A, gt_A)
cccV = calcCCC(est_V, gt_V)
cccA = calcCCC(est_A, gt_A)
iccV2 = calcCCC(gt_V, gt_V)
iccA2 = calcCCC(gt_A, gt_A)
print('MSEV : ',mseV, ', CORV : ',corV,', CCCV : ',cccV,', ICCV : ',iccV)
print('MSEA : ',mseA, ', CORA : ',corA,', CCCA : ',cccA,', ICCA : ',iccA)
if __name__ == '__main__':
mode = args.mode
if mode == 0 : #To train GDC
train_w_gdc_adl()
elif mode == 1 : #To extract the features
extract()
| 44,327 | 36.156748 | 237 | py |
Seq-Att-Affect | Seq-Att-Affect-master/FacialDataset.py | from math import sqrt
import re
from PIL import Image,ImageFilter
import torch
from torch.utils import data
import torchvision.transforms as transforms
import torchvision.utils as vutils
import csv
import torchvision.transforms.functional as F
import numbers
from torchvision.transforms import RandomRotation,RandomResizedCrop,RandomHorizontalFlip
from utils import *
from config import *
from ImageAugment import *
import utils
from os.path import isfile# load additional module
import pickle
import os
#import nudged
import shutil
import file_walker
import copy
from random import randint
#noiseParamList = np.asarray([[0,0,0],[1,2,3],[1,3,5],[.001,.005,.01],[.8,.5,.2],[0,0,0]])#0 [], 1[1/2,2/4,3/8], 2 [1,3,5], 3 [.01,.1,1], [.001,.005,.01]
noiseParamList =np.asarray([[0,0,0],[2,3,4],[2,4,6],[.005,.01,.05],[.5,.2,.1],[0,0,0]])#0 [], 1[1/2,2/4,3/8], 2 [1,3,5], 3 [.01,.1,1], [.001,.005,.01]
#noiseParamListTrain = np.asarray([[0,0,0],[2,3,4],[2,4,6],[.005,.01,.05],[.5,.2,.1],[0,0,0]])#0 [], 1[1/2,2/4,3/8], 2 [1,3,5], 3 [.01,.1,1], [.001,.005,.01]
noiseParamListTrain = np.asarray([[0,0,0],[2,3,4],[2,4,6],[.005,.01,.05],[.5,.2,.1],[0,0,0]])#0 [], 1[1/2,2/4,3/8], 2 [1,3,5], 3 [.01,.1,1], [.001,.005,.01]
rootDir = "/home/deckyal/eclipse-workspace/DeepModel/src/MMTVA/data"
rootDirLdmrk = "/home/deckyal/eclipse-workspace/DeepModel/src/MMTVA/data/"
def addGaussianNoise(img,noiseLevel = 1):
noise = torch.randn(img.size()) * noiseLevel
noisy_img = img + noise
return noisy_img
def toQuadrant(inputData = None, min = -10, max = 10, toOneHot = False):
threshold = truediv(min+max,2)
vLow = False
aLow = False
q = 0
#print(min,max)
#print('the threshold : ',threshold)
if inputData[0] < threshold :
vLow = True
if inputData[1] < threshold :
aLow = True
if vLow and aLow :
q = 2
elif vLow and not aLow :
q = 1
elif not vLow and not aLow :
q = 0
else :
q = 3
if toOneHot :
rest = np.zeros(4)
rest[q]+=1
return rest
else :
return q
class SEWAFEW(data.Dataset):
mean_bgr = np.array([91.4953, 103.8827, 131.0912]) # from resnet50_ft.prototxt
def __init__(self, data_list = ["AFEW"],dir_gt = None,onlyFace = True, image_size =224,
transform = None,useIT = False,augment = False, step = 1,split = False,
nSplit = 5, listSplit = [0,1,2,3,4],wHeatmap= False,isVideo = False, seqLength = None,
returnM = False, toAlign = False, dbType = 0):#dbtype 0 is AFEW, 1 is SEWA
self.dbType = dbType
self.seq_length = seqLength
self.isVideo = isVideo
self.align = toAlign
self.useNudget = False
self.returnM = returnM
self.transform = transform
self.onlyFace = onlyFace
self.augment = augment
self.wHeatmap = wHeatmap
self.imageSize = image_size
self.imageHeight = image_size
self.imageWidth = image_size
self.useIT = useIT
self.curDir = rootDir+"/"#/home/deckyal/eclipse-workspace/DeepModel/src/MMTVA/data/"
if self.dbType ==1 :
annotL_name = "annotOri"
self.ldmrkNumber = 49
self.nose = 16
self.leye = 24
self.reye = 29
#mean_shape49-pad3-224
self.mean_shape = np.load(curDir+'mean_shape49-pad3-'+str(image_size)+'.npy')
else :
annotL_name = 'annot'
self.ldmrkNumber = 68
self.nose = 33
self.leye = 41
self.reye = 46
self.mean_shape = np.load(curDir+'mean_shape-pad-'+str(image_size)+'.npy')
self.swap = False
if self.swap :
self.ptsDst = np.asarray([
[self.mean_shape[self.nose+self.ldmrkNumber],self.mean_shape[self.nose]],[self.mean_shape[self.leye+self.ldmrkNumber],self.mean_shape[self.leye]],[self.mean_shape[self.reye+self.ldmrkNumber],self.mean_shape[self.reye]]
],dtype= np.float32)
self.ptsTn = [self.mean_shape[self.nose+self.ldmrkNumber],self.mean_shape[self.nose]],[self.mean_shape[self.leye+self.ldmrkNumber],self.mean_shape[self.leye]],[self.mean_shape[self.reye+self.ldmrkNumber],self.mean_shape[self.reye]]
else :
self.ptsDst = np.asarray([
[self.mean_shape[self.nose],self.mean_shape[self.nose+self.ldmrkNumber]],[self.mean_shape[self.leye],self.mean_shape[self.leye+self.ldmrkNumber]],[self.mean_shape[self.reye],self.mean_shape[self.reye+self.ldmrkNumber]]
],dtype= np.float32)
self.ptsTn = [self.mean_shape[self.nose],self.mean_shape[self.nose+self.ldmrkNumber]],[self.mean_shape[self.leye],self.mean_shape[self.leye+self.ldmrkNumber]],[self.mean_shape[self.reye],self.mean_shape[self.reye+self.ldmrkNumber]]
self.ptsTnFull = np.column_stack((self.mean_shape[:self.ldmrkNumber],self.mean_shape[self.ldmrkNumber:]))
list_gt = []
list_labels_t = []
list_labels_tE = []
counter_image = 0
annotE_name = 'annot2'
if dir_gt is not None :
annot_name = dir_gt
list_missing = []
for data in data_list :
print(("Opening "+data))
for f in file_walker.walk(self.curDir +data+"/"):
if f.isDirectory: # Check if object is directory
#print((f.name, f.full_path)) # Name is without extension
#c_image,c_ldmark = 0,0
if self.dbType == 1 : #we directly get the VA file in case of sewa
#first get the valence
valFile = f.full_path+"/valence/"+f.name+"_Valence_A_Aligned.csv"
aroFile = f.full_path+"/arousal/"+f.name+"_Arousal_A_Aligned.csv"
list_labels_tE.append([valFile,aroFile])
#print(valFile,aroFile)
for sub_f in f.walk():
if sub_f.isDirectory: # Check if object is directory
list_dta = []
#print(sub_f.name)
if(sub_f.name == annotL_name) : #If that's annot, add to labels_t
for sub_sub_f in sub_f.walk(): #this is the data
if(".npy" not in sub_sub_f.full_path):
list_dta.append(sub_sub_f.full_path)
list_labels_t.append(sorted(list_dta))
c_image = len(list_dta)
elif(sub_f.name == 'img'): #Else it is the image
for sub_sub_f in sub_f.walk(): #this is the data
if(".npy" not in sub_sub_f.full_path):
list_dta.append(sub_sub_f.full_path)
list_gt.append(sorted(list_dta))
counter_image+=len(list_dta)
c_ldmrk = len(list_dta)
elif (sub_f.name == annotE_name) :
if self.dbType == 0 :
#If that's annot, add to labels_t
for sub_sub_f in sub_f.walk(): #this is the data
if(".npy" not in sub_sub_f.full_path):
list_dta.append(sub_sub_f.full_path)
list_labels_tE.append(sorted(list_dta))
if(c_image!=c_ldmrk) and False:
print(f.full_path," is incomplete ",'*'*10,c_image,'-',c_ldmrk)
ori = "/home/deckyal/eclipse-workspace/DeepModel/src/MMTVA/data/allVideo/"
target = '/home/deckyal/eclipse-workspace/DeepModel/src/MMTVA/data/allVideo/retrack/'
#shutil.copy(ori+f.name+".avi",target+f.name+".avi")
list_missing.append(f.name)
self.length = counter_image
print("Now opening keylabels")
list_labelsN = []
list_labelsEN = []
list_labels = []
list_labelsE = []
for ix in range(len(list_labels_t)) : #lbl,lble in (list_labels_t,list_labels_tE) :
lbl_68 = [] #Per folder
lbl_2 = [] #Per folder
lbl_n68 = [] #Per folder
lbl_n2 = [] #Per folder
for jx in range(len (list_labels_t[ix])): #lbl_sub in lbl :
#print(os.path.basename(list_gt[ix][jx]))
#print(os.path.basename(list_labels_t[ix][jx]))
#print(os.path.basename(list_labels_tE[ix][jx]))
lbl_sub = list_labels_t[ix][jx]
if ('pts' in lbl_sub) :
x = []
#print(lbl_sub)
lbl_68.append(read_kp_file(lbl_sub,True))
lbl_n68.append(lbl_sub)
if self.dbType == 0 :
lbl_subE = list_labels_tE[ix][jx]
if ('aro' in lbl_subE) :
x = []
#print(lbl_sub)
with open(lbl_subE) as file:
data2 = [re.split(r'\t+',l.strip()) for l in file]
for i in range(len(data2)) :
#x.append([ float(j) for j in data2[i][0].split()] )
temp = [ float(j) for j in data2[i][0].split()]
temp.reverse() #to give the valence first. then arousal
x.append(temp)
#x.reverse()
lbl_2.append(np.array(x).flatten('F'))
lbl_n2.append(lbl_sub)
if self.dbType == 1 : #sewa
#print(list_labels_t[ix][0])
valFile = np.asarray(readCSV(list_labels_tE[ix][0]))
aroFile = np.asarray(readCSV(list_labels_tE[ix][1]))
lbl_n2.append(list_labels_tE[ix][0])
lbl_2 = np.column_stack((valFile,aroFile))
list_labelsN.append(lbl_n68)
list_labelsEN.append(lbl_n2)
list_labels.append(lbl_68)
list_labelsE.append(lbl_2)
t_l_imgs = []
t_l_gt = []
t_l_gtE = []
t_list_gt_names = []
t_list_gtE_names = []
#print(list_labelsEN)
if not self.isVideo :
#Flatten it to one list
for i in range(0,len(list_gt)): #For each dataset
list_images = []
list_gt_names = []
list_gtE_names = []
indexer = 0
list_ground_truth = np.zeros([len(list_gt[i]),self.ldmrkNumber*2])
list_ground_truthE = np.zeros([len(list_gt[i]),2])
for j in range(0,len(list_gt[i]),step): #for number of data #n_skip is usefull for video data
list_images.append(list_gt[i][j])
list_gt_names.append(list_labelsN[i][j])
if self.dbType == 0 :
list_gtE_names.append(list_labelsEN[i][j])
else :
list_gtE_names.append(list_labelsEN[i][0])
#print(list_labelsEN[i])
'''if len(list_labels[i][j] < 1):
print(list_labels[i][j])'''
#print(len(list_labels[i][j]))
list_ground_truth[indexer] = np.array(list_labels[i][j]).flatten('F')
list_ground_truthE[indexer] = np.array(list_labelsE[i][j]).flatten('F')
indexer += 1
t_l_imgs.append(list_images)
t_l_gt.append(list_ground_truth)
t_l_gtE.append(list_ground_truthE)
t_list_gt_names.append(list_gt_names)
t_list_gtE_names.append(list_gtE_names)
else :
if self.seq_length is None :
list_ground_truth = np.zeros([int(counter_image/(self.seq_length*step)),self.seq_length,136])
indexer = 0;
for i in range(0,len(list_gt)): #For each dataset
counter = 0
for j in range(0,int(len(list_gt[i])/(self.seq_length*step))): #for number of data/batchsize
temp = []
temp2 = np.zeros([self.seq_length,136])
i_temp = 0
for z in range(counter,counter+(self.seq_length*step),step):#1 to seq_size
temp.append(list_gt[i][z])
temp2[i_temp] = list_labels[i][z]
i_temp+=1
list_images.append(temp)
list_ground_truth[indexer] = temp2
indexer += 1
counter+=self.seq_length*step
#print counter
self.l_imgs = list_images
self.l_gt = list_ground_truth
else :
counter_seq = 0;
for i in range(0,len(list_gt)): #For each dataset
indexer = 0;
list_gt_names = []
list_gtE_names = []
list_ground_truth = np.zeros([int(len(list_gt[i])/(self.seq_length*step)),self.seq_length,136]) #np.zeros([counter_image,136])
list_ground_truthE = np.zeros([int(len(list_gt[i])/(self.seq_length*step)),self.seq_length,2])#np.zeros([counter_image,2])
counter = 0
list_images = []
for j in range(0,int(len(list_gt[i])/(self.seq_length*step))): #for number of data/batchsize
tmpn68 = []
tmpn2 = []
temp = []
temp2 = np.zeros([self.seq_length,136])
temp3 = np.zeros([self.seq_length,2])
i_temp = 0
for z in range(counter,counter+(self.seq_length*step),step):#1 to seq_size
temp.append(list_gt[i][z])
temp2[i_temp] = list_labels[i][z].flatten('F')
temp3[i_temp] = list_labelsE[i][z].flatten('F')
tmpn68.append(list_labelsN[i][z])
tmpn2.append(list_labelsEN[i][z])
i_temp+=1
counter_seq+=1
list_images.append(temp)
list_ground_truth[indexer] = temp2
list_ground_truthE[indexer] = temp3
list_gt_names.append(tmpn68)
list_gtE_names.append(tmpn2)
indexer += 1
counter+=self.seq_length*step
#print counter
t_l_imgs.append(list_images)
t_l_gt.append(list_ground_truth)
t_l_gtE.append(list_ground_truthE)
t_list_gt_names.append(list_gt_names)
t_list_gtE_names.append(list_gtE_names)
self.l_imgs = []
self.l_gt = []
self.l_gtE = []
self.list_gt_names = []
self.list_gtE_names = []
#print('cimage : ',counter_image)
if split :
indexer = 0
self.l_gt = []
self.l_gtE = []
totalData = len(t_l_imgs)
perSplit = int(truediv(totalData, nSplit))
for x in listSplit :
print('split : ',x)
begin = x*perSplit
if x == nSplit-1 :
end = begin + (totalData - begin)
else :
end = begin+perSplit
print(begin,end,totalData)
if not self.isVideo :
for i in range(begin,end) :
for j in range(len(t_l_imgs[i])):
#print('append ',t_l_imgs[i][j])
self.l_imgs.append(t_l_imgs[i][j])
self.l_gt.append(t_l_gt[i][j])
self.l_gtE.append(t_l_gtE[i][j])
self.list_gt_names.append(t_list_gt_names[i][j])
self.list_gtE_names.append(t_list_gtE_names[i][j])
indexer+=1
else :
for i in range(begin,end) :
for j in range(len(t_l_imgs[i])): #seq counter
t_img = []
t_gt = []
t_gtE = []
t_gt_N = []
t_gt_EN = []
tmp = 0
for k in range(len(t_l_imgs[i][j])): #seq size
t_img.append(t_l_imgs[i][j][k])
t_gt.append(t_l_gt[i][j][k])
t_gtE.append(t_l_gtE[i][j][k])
t_gt_N.append(t_list_gt_names[i][j][k])
t_gt_EN.append(t_list_gtE_names[i][j][k])
tmp+=1
#print('append ',t_img)
self.l_imgs.append(t_img)
self.l_gt.append(t_gt)
self.l_gtE.append(t_gtE)
self.list_gt_names.append(t_gt_N)
self.list_gtE_names.append(t_gt_EN)
indexer+=1
print(len(self.l_imgs))
self.l_gt = np.asarray(self.l_gt)
self.l_gtE = np.asarray(self.l_gtE)
else :
if not self.isVideo :
self.l_gt = np.zeros([counter_image,136])
self.l_gtE = np.zeros([counter_image,2])
indexer = 0
for i in range(len(t_l_imgs)):
for j in range(len(t_l_imgs[i])):
self.l_imgs.append(t_l_imgs[i][j])
print(i,j,'-',len(t_l_imgs[i]))
self.l_gt[indexer] = t_l_gt[i][j]
self.l_gtE[indexer] = t_l_gtE[i][j]
self.list_gt_names.append(t_list_gt_names[i][j])
self.list_gtE_names.append(t_list_gtE_names[i][j])
indexer+=1
else :
self.l_gt= np.zeros([counter_seq,self.seq_length,136])
self.l_gtE = np.zeros([counter_seq,self.seq_length,2])
indexer = 0
for i in range(len(t_l_imgs)): #dataset
for j in range(len(t_l_imgs[i])): #seq counter
t_img = []
t_gt = np.zeros([self.seq_length,136])
t_gte = np.zeros([self.seq_length,2])
t_gt_n = []
t_gt_en = []
i_t = 0
for k in range(len(t_l_imgs[i][j])): #seq size
t_img.append(t_l_imgs[i][j][k])
t_gt[i_t] = t_l_gt[i][j][k]
t_gte[i_t] = t_l_gtE[i][j][k]
t_gt_n.append(t_list_gt_names[i][j][k])
t_gt_en.append(t_list_gtE_names[i][j][k])
i_t+=1
self.l_imgs.append(t_img)
self.l_gt[indexer] = t_gt
self.l_gtE[indexer] = t_gte
self.list_gt_names.append(t_gt_n)
self.list_gtE_names.append(t_gt_en)
indexer+=1
print('limgs : ',len(self.l_imgs))
def __getitem__(self,index):
#Read all data, transform etc.
#In video, the output will be : [batch_size, sequence_size, channel, width, height]
#Im image : [batch_size, channel, width, height]
l_imgs = []; l_VA = []; l_ldmrk = []; l_nc = []#,torch.FloatTensor(label),newChannel#,x,self.list_gt_names[index]
if not self.isVideo :
x_l = [self.l_imgs[index]];labelE_l =[self.l_gtE[index].copy()];label_l = [self.l_gt[index].copy()];label_n =[self.list_gt_names[index]]
else :
x_l = self.l_imgs[index];labelE_l =self.l_gtE[index].copy();label_l = self.l_gt[index].copy();label_n =self.list_gt_names[index]
for x,labelE,label,ln in zip(x_l,labelE_l,label_l,label_n) :
#print(x,labelE,label,ln)
tImage = Image.open(x).convert("RGB")
tImageB = None
if self.onlyFace :
#crop the face region
#t,l_x,l_y,x1,y1,x_min,y_min,x2,y2 = get_enlarged_bb(the_kp = label,div_x = 2,div_y = 2,images = cv2.imread(x),displacementxy = random.uniform(-.5,.5))
if self.ldmrkNumber > 49 :
t,l_x,l_y,x1,y1,x_min,y_min,x2,y2 = get_enlarged_bb(the_kp = label.copy(),div_x = 8,div_y = 8,images = cv2.imread(x))#,displacementxy = random.uniform(-.5,.5))
else :
t,l_x,l_y,x1,y1,x_min,y_min,x2,y2 = utils.get_enlarged_bb(the_kp = label.copy(),
div_x = 3,div_y = 3,images = cv2.imread(x), n_points = 49)#,displacementxy = random.uniform(-.5,.5))
area = (x1,y1, x2,y2)
tImage = tImage.crop(area)
label[:self.ldmrkNumber] -= x_min
label[self.ldmrkNumber:] -= y_min
tImage = tImage.resize((self.imageWidth,self.imageHeight))
label[:self.ldmrkNumber] *= truediv(self.imageWidth,(x2 - x1))
label[self.ldmrkNumber:] *= truediv(self.imageHeight,(y2 - y1))
#now aliging
if self.align :
tImageT = utils.PILtoOpenCV(tImage.copy())
if self.swap :
ptsSource = torch.tensor([
[label[self.nose+self.ldmrkNumber],label[self.nose]],[label[self.leye+self.ldmrkNumber],label[self.leye]],[label[self.reye+self.ldmrkNumber],label[self.reye]]
])
ptsSn = [
[label[self.nose+self.ldmrkNumber],label[self.nose]],[label[self.leye+self.ldmrkNumber],label[self.leye]],[label[self.reye+self.ldmrkNumber],label[self.reye]]
]
else :
ptsSource = torch.tensor([
[label[self.nose],label[self.nose+self.ldmrkNumber]],[label[self.leye],label[self.leye+self.ldmrkNumber]],[label[self.reye],label[self.reye+self.ldmrkNumber]]
])
ptsSn =[
[label[self.nose],label[self.nose+self.ldmrkNumber]],[label[self.leye],label[self.leye+self.ldmrkNumber]],[label[self.reye],label[self.reye+self.ldmrkNumber]]
]
ptsSnFull = np.column_stack((label[:self.ldmrkNumber],label[self.ldmrkNumber:]))
ptsSnFull = np.asarray(ptsSnFull,np.float32)
ptsSource = ptsSource.numpy()
ptsSource = np.asarray(ptsSource,np.float32)
if self.useNudget :
trans = nudged.estimate(ptsSn,self.ptsTn)
M = np.asarray(trans.get_matrix())[:2,:]
#print("Nudged : ",mN,trans.get_scale(),trans.get_rotation())
else :
#M = cv2.getAffineTransform(ptsSource,self.ptsDst)
#_,_,aff = self.procrustes(ptsSource,self.ptsDst)
#print(ptsSource.shape,'-', self.ptsDst.shape)
#print(ptsSnFull.shape,'-', self.ptsTnFull.shape)
_,_,aff = self.procrustes(self.ptsTnFull,ptsSnFull)
M = aff[:2,:]
dst = cv2.warpAffine(tImageT,M,(self.imageWidth,self.imageHeight))
#print(np.asarray(ptsSn).shape, np.asarray(self.ptsTn).shape,M.shape)
M_full = np.append(M,[[0,0,1]],axis = 0)
l_full = np.stack((label[:self.ldmrkNumber],label[self.ldmrkNumber:],np.ones(self.ldmrkNumber)))
ldmark = np.matmul(M_full, l_full)
if False :
print(ldmark)
for i in range(self.ldmrkNumber) :
cv2.circle(dst,(int(scale(ldmark[0,i])),int(scale(ldmark[1,i]))),2,(0,255,0) )
cv2.imshow('test align',dst)
cv2.waitKey(0)
label = np.concatenate((ldmark[0],ldmark[1]))
tImage = utils.OpenCVtoPIL(dst)
newChannel = None
if self.wHeatmap :
theMiddleName = 'img'
filePath = x.split(os.sep)
ifolder = filePath.index(theMiddleName)
print(ifolder)
image_name = filePath[-1]
annot_name_H = os.path.splitext(image_name)[0]+'.npy'
sDirName = filePath[:ifolder]
dHeatmaps = '/'.join(sDirName)+'/heatmaps'
finalTargetH = dHeatmaps+'/'+annot_name_H
print(finalTargetH)
if isfile(finalTargetH) and False:
newChannel = np.load(finalTargetH)
newChannel = Image.fromarray(newChannel)
else :
checkDirMake(dHeatmaps)
tImageTemp = cv2.cvtColor(np.array(tImage),cv2.COLOR_RGB2BGR)
#tImageTemp = cv2.imread(x)#tImage.copy()
print(len(label),label)
b_channel,g_channel,r_channel = tImageTemp[:,:,0],tImageTemp[:,:,1],tImageTemp[:,:,2]
newChannel = b_channel.copy(); newChannel[:] = 0
t0,t1,t2,t3 = utils.get_bb(label[0:self.ldmrkNumber], label[self.ldmrkNumber:],length=self.ldmrkNumber)
l_cd,rv = utils.get_list_heatmap(0,None,t2-t0,t3-t1,.05)
height, width,_ = tImageTemp.shape
wx = t2-t0
wy = t3-t1
scaler = 255/np.max(rv)
for iter in range(self.ldmrkNumber) :
ix,iy = int(label[iter]),int(label[iter+self.ldmrkNumber])
#Now drawing given the center
for iter2 in range(len(l_cd)) :
value = int(rv[iter2]*scaler)
if newChannel[utils.inBound(iy+l_cd[iter2][0],0,height-1), utils.inBound(ix + l_cd[iter2][1],0,width-1)] < value :
newChannel[utils.inBound(iy+l_cd[iter2][0],0,height-1), utils.inBound(ix + l_cd[iter2][1],0,width-1)] = int(rv[iter2]*scaler)#int(heatmapValue/2 + rv[iter2] * heatmapValue)
'''tImage2 = cv2.merge((b_channel, newChannel,newChannel, newChannel))
cv2.imshow("combined",tImage2)
cv2.waitKey(0)'''
np.save(finalTargetH,newChannel)
newChannel = Image.fromarray(newChannel)
if self.augment :
sel = np.random.randint(0,4)
#0 : neutral, 1 : horizontal flip, 2:random rotation, 3:occlusion
if sel == 0 :
pass
elif sel == 1 :
flip = RandomHorizontalFlip_WL(1,self.ldmrkNumber)
tImage,label,newChannel = flip(tImage,label,newChannel,self.ldmrkNumber)
elif sel == 2 and not self.align :
rot = RandomRotation_WL(45)
tImage,label,newChannel = rot(tImage,label,newChannel,self.ldmrkNumber)
elif sel == 3 :
occ = Occlusion_WL(1)
tImage,label,newChannel = occ(tImage,label,newChannel)
#random crop
if not self.align :
rc = RandomResizedCrop_WL(size = self.imageSize,scale = (0.5,1), ratio = (0.5, 1.5))
tImage,label,newChannel= rc(tImage,label,newChannel)
#additional blurring
if (np.random.randint(1,3)%2==0) and True :
sel_n = np.random.randint(1,6)
#sel_n = 4
rc = GeneralNoise_WL(1)
tImage,label= rc(tImage,label,sel_n,np.random.randint(0,3))
if self.returnM :
if self.swap :
ptsSource = torch.tensor([
[label[self.nose+self.ldmrkNumber],label[self.nose]],[label[self.leye+self.ldmrkNumber],label[self.leye]],[label[self.reye+self.ldmrkNumber],label[self.reye]]
])
ptsSn = [
[label[self.nose+self.ldmrkNumber],label[self.nose]],[label[self.leye+self.ldmrkNumber],label[self.leye]],[label[self.reye+self.ldmrkNumber],label[self.reye]]
]
else :
ptsSource = torch.tensor([
[label[self.nose],label[self.nose+self.ldmrkNumber]],[label[self.leye],label[self.leye+self.ldmrkNumber]],[label[self.reye],label[self.reye+self.ldmrkNumber]]
])
ptsSn =[
[label[self.nose],label[self.nose+self.ldmrkNumber]],[label[self.leye],label[self.leye+self.ldmrkNumber]],[label[self.reye],label[self.reye+self.ldmrkNumber]]
]
ptsSnFull = np.column_stack((label[:self.ldmrkNumber],label[self.ldmrkNumber:]))
ptsSnFull = np.asarray(ptsSnFull,np.float32)
ptsSource = ptsSource.numpy()
ptsSource = np.asarray(ptsSource,np.float32)
if self.useNudget :
trans = nudged.estimate(ptsSn,self.ptsTn)
M = np.asarray(trans.get_matrix())[:2,:]
else :
#M = cv2.getAffineTransform(ptsSource,self.ptsDst)
_,_,aff = self.procrustes(self.ptsTnFull,ptsSnFull)
M = aff[:2,:]
if False :
tImageT = utils.PILtoOpenCV(tImage.copy())
dst = cv2.warpAffine(tImageT,M,(self.imageWidth,self.imageHeight))
print(np.asarray(ptsSn).shape, np.asarray(self.ptsTn).shape,M.shape)
M_full = np.append(M,[[0,0,1]],axis = 0)
l_full = np.stack((label[:self.ldmrkNumber],label[self.ldmrkNumber:],np.ones(self.ldmrkNumber)))
ldmark = np.matmul(M_full, l_full)
print(ldmark)
for i in range(self.ldmrkNumber) :
cv2.circle(dst,(int(scale(ldmark[0,i])),int(scale(ldmark[1,i]))),2,(0,0,255) )
cv2.imshow('test recovered',dst)
cv2.waitKey(0)
Minter = self.param2theta(np.append(M,[[0,0,1]],axis = 0), self.imageWidth,self.imageHeight)
Mt = torch.from_numpy(Minter).float()
else :
Mt = torch.zeros(1)
if self.useIT :
tImage = self.transformInternal(tImage)
else :
tImage = self.transform(tImage)
if not self.wHeatmap :
l_imgs.append(tImage); l_VA.append(torch.FloatTensor(labelE)); l_ldmrk.append(torch.FloatTensor(label))#,x,self.list_gt_names[index]
else :
newChannel = transforms.Resize(224)(newChannel)
newChannel = transforms.ToTensor()(newChannel)
newChannel = newChannel.sub(125)
l_imgs.append(tImage); l_VA.append(torch.FloatTensor(labelE)); l_ldmrk.append(torch.FloatTensor(label)); l_nc.append(newChannel)
#return tImage,torch.FloatTensor(labelE),torch.FloatTensor(label),newChannel#,x,self.list_gt_names[index]
if not self.isVideo :
if self.wHeatmap :
return l_imgs[0], l_VA[0], l_ldmrk[0], l_nc[0], Mt
else :
return l_imgs[0], l_VA[0], l_ldmrk[0], Mt
else :
#lImgs = torch.Tensor(len(l_imgs),3,self.imageHeight,self.imageWidth)
#lVA = torch.Tensor(len(l_VA),2)
#lLD = torch.Tensor(len(l_ldmrk),136)
lImgs = torch.stack(l_imgs)
lVA = torch.stack(l_VA)
lLD = torch.stack(l_ldmrk)
#print(lImgs.shape, l_imgs[0].shape, l_VA[0].shape,len(lImgs))
#torch.cat(l_imgs, out=lImgs)
#torch.cat(l_VA, out=lVA)
#torch.cat(l_ldmrk, out=lLD)
if self.wHeatmap :
#lnc = torch.Tensor(len(l_nc),1,self.imageHeight,self.imageWidth)
#torch.cat(l_nc, out=lnc)
lnc = torch.stack(l_nc)
return lImgs, lVA, lLD, lnc, Mt
else :
return lImgs, lVA, lLD, Mt
def transformInternal(self, img):
transforms.Resize(224)(img)
img = np.array(img, dtype=np.uint8)
#img = img[:, :, ::-1] # RGB -> BGR
img = img.astype(np.float32)
img -= self.mean_bgr
img = img.transpose(2, 0, 1) # C x H x W
img = torch.from_numpy(img).float()
return img
def untransformInternal(self, img, lbl):
img = img.numpy()
img = img.transpose(1, 2, 0)
img += self.mean_bgr
img = img.astype(np.uint8)
img = img[:, :, ::-1]
return img, lbl
def param2theta(self,param, w, h):
param = np.linalg.inv(param)
theta = np.zeros([2,3])
theta[0,0] = param[0,0]
theta[0,1] = param[0,1]*h/w
theta[0,2] = param[0,2]*2/w + theta[0,0] + theta[0,1] - 1
theta[1,0] = param[1,0]*w/h
theta[1,1] = param[1,1]
theta[1,2] = param[1,2]*2/h + theta[1,0] + theta[1,1] - 1
return theta
def procrustes(self, X, Y, scaling=True, reflection='best'):
n,m = X.shape
ny,my = Y.shape
muX = X.mean(0)
muY = Y.mean(0)
X0 = X - muX
Y0 = Y - muY
ssX = (X0**2.).sum()
ssY = (Y0**2.).sum()
# centred Frobenius norm
normX = np.sqrt(ssX)
normY = np.sqrt(ssY)
# scale to equal (unit) norm
X0 /= normX
Y0 /= normY
if my < m:
Y0 = np.concatenate((Y0, np.zeros(n, m-my)),0)
# optimum rotation matrix of Y
A = np.dot(X0.T, Y0)
U,s,Vt = np.linalg.svd(A,full_matrices=False)
V = Vt.T
T = np.dot(V, U.T)
if reflection is not 'best':
# does the current solution use a reflection?
have_reflection = np.linalg.det(T) < 0
# if that's not what was specified, force another reflection
if reflection != have_reflection:
V[:,-1] *= -1
s[-1] *= -1
T = np.dot(V, U.T)
traceTA = s.sum()
if scaling:
# optimum scaling of Y
b = traceTA * normX / normY
# standarised distance between X and b*Y*T + c
d = 1 - traceTA**2
# transformed coords
Z = normX*traceTA*np.dot(Y0, T) + muX
else:
b = 1
d = 1 + ssY/ssX - 2 * traceTA * normY / normX
Z = normY*np.dot(Y0, T) + muX
# transformation matrix
if my < m:
T = T[:my,:]
c = muX - b*np.dot(muY, T)
#transformation values
#tform = {'rotation':T, 'scale':b, 'translation':c}
tform = np.append(b*T,[c],axis = 0).T
tform = np.append(tform,[[0,0,1]],axis = 0)
return d, Z, tform
def __len__(self):
return len(self.l_imgs)
class SEWAFEWReducedLatent(data.Dataset): #return affect on Valence[0], Arousal[1] order
mean_bgr = np.array([91.4953, 103.8827, 131.0912]) # from resnet50_ft.prototxt
def __init__(self, data_list = ["AFEW"],dir_gt = None, image_size =224, step = 1,split = False,
nSplit = 5, listSplit = [0,1,2,3,4],isVideo = False, seqLength = None, dbType = 0,
returnQuadrant = False, returnWeight = False,useAll = False,
splitNumber = None,returnVAQ=False,returnFName = False,isSemaine=False):#dbtype 0 is AFEW, 1 is SEWA
self.dbType = dbType
self.isSemaine = isSemaine
self.seq_length = seqLength
self.isVideo = isVideo
self.returnNoisy = False
self.returnVAQ = returnVAQ
self.returnFName = returnFName
self.curDir = rootDir +"/"#/home/deckyal/eclipse-workspace/DeepModel/src/MMTVA/data/"
if dbType == 0 :
featName = "FT-AF0-0-16-16-Den"
else :
featName = "FT-SW0-0-16-16-Den"
if self.isSemaine :
featName = "FT-SEM0-0-16-16-Den"
if useAll :
featName+="-UA"
featName+="-z"
self.returnWeight = returnWeight
if self.returnWeight :
name = 'VA-Train-'+str(listSplit[0])+'.npy'
if self.dbType == 1 :
name='S-'+name
if isSemaine :
name = 'SE-VA-Train-'+str(listSplit[0])+'.npy'
weight = np.load(rootDir+"/DST-SE-AF/"+name).astype('float')+1
sum = weight.sum(0)
weight = (weight/sum)
#print('1',weight)
weight = 1/weight
#print('2',weight)
sum = weight.sum(0)
weight = weight/sum
#print('3',weight)
self.weight = weight
self.returnQ = returnQuadrant
list_gt = []
list_labels_tE = []
counter_image = 0
annotE_name = 'annot2'
if dir_gt is not None :
annot_name = dir_gt
list_missing = []
for data in data_list :
print(("Opening "+data))
fullDir = self.curDir +data+"/"
listFolder = os.listdir(fullDir)
listFolder.sort()
for tempx in range(len(listFolder)):
f = listFolder[tempx]
fullPath = os.path.join(fullDir,f)
#print('opening fullpath',fullPath)
if os.path.isdir(fullPath): # Check if object is directory
#print((f.name, f.full_path)) # Name is without extension
#c_image,c_ldmark = 0,0
if self.dbType == 1 : #we directly get the VA file in case of sewa
#first get the valence
valFile = fullPath+"/valence/"+f+"_Valence_A_Aligned.csv"
aroFile = fullPath+"/arousal/"+f+"_Arousal_A_Aligned.csv"
list_labels_tE.append([valFile,aroFile])
#print(valFile,aroFile)
#print('fp ',fullPath)
for sub_f in file_walker.walk(fullPath):
if sub_f.isDirectory: # Check if object is directory
list_dta = []
#print(sub_f.name)
if(sub_f.name == featName): #Else it is the image
for sub_sub_f in sub_f.walk(): #this is the data
if(".npy" not in sub_sub_f.full_path):
list_dta.append(sub_sub_f.full_path)
list_gt.append(sorted(list_dta))
counter_image+=len(list_dta)
c_ldmrk = len(list_dta)
elif (sub_f.name == annotE_name) :
if self.dbType == 0 :
#If that's annot, add to labels_t
for sub_sub_f in sub_f.walk(): #this is the data
if(".npy" not in sub_sub_f.full_path):
list_dta.append(sub_sub_f.full_path)
list_labels_tE.append(sorted(list_dta))
self.length = counter_image
print("Now opening keylabels")
list_labelsEN = []
list_labelsE = []
for ix in range(len(list_labels_tE)) : #lbl,lble in (list_labels_t,list_labels_tE) :
lbl_2 = [] #Per folder
lbl_n2 = [] #Per folder
if self.dbType == 1 : #sewa
#print(list_labels_t[ix][0])
valFile = np.asarray(readCSV(list_labels_tE[ix][0]))
aroFile = np.asarray(readCSV(list_labels_tE[ix][1]))
lbl_n2.append(list_labels_tE[ix][0])
lbl_2 = np.column_stack((valFile,aroFile))
else :
for jx in range(len (list_labels_tE[ix])): #lbl_sub in lbl :
#print(os.path.basename(list_gt[ix][jx]))
#print(os.path.basename(list_labels_t[ix][jx]))
#print(os.path.basename(list_labels_tE[ix][jx]))
if self.dbType == 0 :
lbl_subE = list_labels_tE[ix][jx]
if ('aro' in lbl_subE) :
x = []
#print(lbl_sub)
with open(lbl_subE) as file:
data2 = [re.split(r'\t+',l.strip()) for l in file]
for i in range(len(data2)) :
temp = [ float(j) for j in data2[i][0].split()]
temp.reverse() #to give the valence first. then arousal
x.append(temp)
lbl_2.append(np.array(x).flatten('F'))
lbl_n2.append(lbl_subE)
list_labelsEN.append(lbl_n2)
list_labelsE.append(lbl_2)
t_l_imgs = []
t_l_gtE = []
t_list_gtE_names = []
#print(list_labelsEN)
print(len(list_labelsE))
print(len(list_labelsE[0]))
print(len(list_labelsE[0][0]))
print(list_labelsE[0][0])
if not self.isVideo :
#Flatten it to one list
for i in range(0,len(list_gt)): #For each dataset
list_images = []
list_gtE_names = []
indexer = 0
list_ground_truthE = np.zeros([len(list_gt[i]),2])
for j in range(0,len(list_gt[i]),step): #for number of data #n_skip is usefull for video data
list_images.append(list_gt[i][j])
#print(list_labelsEN)
if self.dbType == 0 :
list_gtE_names.append(list_labelsEN[i][j])
else :
list_gtE_names.append(list_labelsEN[i][0])
#print(list_labelsEN[i])
'''if len(list_labels[i][j] < 1):
print(list_labels[i][j])'''
#print(len(list_labels[i][j]))
list_ground_truthE[indexer] = np.array(list_labelsE[i][j]).flatten('F')
indexer += 1
t_l_imgs.append(list_images)
t_l_gtE.append(list_ground_truthE)
t_list_gtE_names.append(list_gtE_names)
else :
if self.seq_length is None :
list_ground_truth = np.zeros([int(counter_image/(self.seq_length*step)),self.seq_length,136])
indexer = 0;
for i in range(0,len(list_gt)): #For each dataset
counter = 0
for j in range(0,int(len(list_gt[i])/(self.seq_length*step))): #for number of data/batchsize
temp = []
temp2 = np.zeros([self.seq_length,136])
i_temp = 0
for z in range(counter,counter+(self.seq_length*step),step):#1 to seq_size
temp.append(list_gt[i][z])
temp2[i_temp] = list_labels[i][z]
i_temp+=1
list_images.append(temp)
list_ground_truth[indexer] = temp2
indexer += 1
counter+=self.seq_length*step
#print counter
self.l_imgs = list_images
self.l_gt = list_ground_truth
else :
counter_seq = 0;
for i in range(0,len(list_gt)): #For each dataset
indexer = 0;
list_gtE_names = []
list_ground_truthE = np.zeros([int(len(list_gt[i])/(self.seq_length*step)),self.seq_length,2])#np.zeros([counter_image,2])
counter = 0
list_images = []
for j in range(0,int(len(list_gt[i])/(self.seq_length*step))): #for number of data/batchsize
temp = []
tmpn2 = []
temp3 = np.zeros([self.seq_length,2])
i_temp = 0
for z in range(counter,counter+(self.seq_length*step),step):#1 to seq_size
temp.append(list_gt[i][z])
#print(list_labelsE[i][z])
temp3[i_temp] = list_labelsE[i][z].flatten('F')
if self.dbType == 0 :
#list_gtE_names.append(list_labelsEN[i][j])
tmpn2.append(list_labelsEN[i][j])
else :
#list_gtE_names.append(list_labelsEN[i][0])
tmpn2.append(list_labelsEN[i][0])
i_temp+=1
counter_seq+=1
list_images.append(temp)
list_ground_truthE[indexer] = temp3
list_gtE_names.append(tmpn2)
indexer += 1
counter+=self.seq_length*step
#print counter
t_l_imgs.append(list_images)
t_l_gtE.append(list_ground_truthE)
t_list_gtE_names.append(list_gtE_names)
self.l_imgs = []
self.l_gtE = []
self.list_gtE_names = []
#print('cimage : ',counter_image)
if split :
indexer = 0
self.l_gtE = []
totalData = len(t_l_imgs)
perSplit = int(truediv(totalData, nSplit))
for x in listSplit :
print('split : ',x)
begin = x*perSplit
if x == nSplit-1 :
end = begin + (totalData - begin)
else :
end = begin+perSplit
print(begin,end,totalData)
if not self.isVideo :
for i in range(begin,end) :
for j in range(len(t_l_imgs[i])):
#print('append ',t_l_imgs[i][j])
self.l_imgs.append(t_l_imgs[i][j])
self.l_gtE.append(t_l_gtE[i][j])
self.list_gtE_names.append(t_list_gtE_names[i][j])
indexer+=1
else :
for i in range(begin,end) :
for j in range(len(t_l_imgs[i])): #seq counter
t_img = []
t_gtE = []
t_gt_EN = []
tmp = 0
for k in range(len(t_l_imgs[i][j])): #seq size
t_img.append(t_l_imgs[i][j][k])
t_gtE.append(t_l_gtE[i][j][k])
t_gt_EN.append(t_list_gtE_names[i][j][k])
tmp+=1
#print('append ',t_img)
self.l_imgs.append(t_img)
self.l_gtE.append(t_gtE)
self.list_gtE_names.append(t_gt_EN)
indexer+=1
print(len(self.l_imgs))
print('limgs : ',len(self.l_imgs))
def __getitem__(self,index):
#Read all data, transform etc.
#In video, the output will be : [batch_size, sequence_size, channel, width, height]
#Im image : [batch_size, channel, width, height]
l_imgs = []; l_ldmrk = []; l_VA = []; l_nc = []; l_qdrnt = []#,torch.FloatTensor(label),newChannel#,x,self.list_gt_names[index]
if self.returnFName :
l_fname = []
if self.returnNoisy :
l_nimgs = []
if self.returnWeight :
l_weights = []
if self.returnVAQ :
l_vaq = []
if not self.isVideo :
x_l = [self.l_imgs[index]];labelE_l =[self.l_gtE[index].copy()];label_n =[self.list_gtE_names[index]]
else :
x_l = self.l_imgs[index];labelE_l =self.l_gtE[index].copy();label_n =self.list_gtE_names[index]
#print('label n ',label_n)
for x,labelE,ln in zip(x_l,labelE_l,label_n) :
tmp = np.load(x);#tImage = np.load(x) #Image.open(x).convert("RGB")
if self.returnFName :
l_fname.append(x)
reduce = True
if reduce :
tImage = tmp['z'][:64]
else :
tImage=tmp['z']
if self.returnVAQ:
vaq = torch.from_numpy(tmp['vaq'])
l_vaq.append(vaq)
#tImage = np.load(x)
nImage = tImage.copy()
label = torch.zeros(1)
Mt = torch.zeros(1)
tImage = torch.from_numpy(tImage)
if self.returnNoisy :
nImage = torch.from_numpy(nImage)
#print('shap e: ', tImage.shape)
l_imgs.append(tImage); l_VA.append(torch.FloatTensor(labelE)); l_ldmrk.append(torch.FloatTensor(label))#,x,self.list_gt_names[index]
if self.returnNoisy :
l_nimgs.append(nImage)
if self.returnQ :
if self.dbType == 1 :
min = 0; max = 1;
elif self.isSemaine == 1:
min = -1; max = 1;
else :
min = -10; max = 10;
l_qdrnt.append(toQuadrant(labelE, min, max, toOneHot=False))
if self.returnWeight :
v = labelE[0]
a = labelE[0]
if self.dbType == 1 :#sewa
v = v*10+1
a = a*10+1
elif self.isSemaine == 1 :
v = v*10+10
a = a*10+10
else :
v = v+10
a = a+10
v,a = int(v),int(a)
l_weights.append([self.weight[v,0],self.weight[a,1]])
l_nc.append(ln)
#print('lnc : ',l_nc)
if not self.isVideo :
if self.returnQ :
if self.returnNoisy :
res = [l_imgs[0], l_VA[0], l_ldmrk[0], Mt,l_nc[0],l_qdrnt[0],l_nimgs[0]]
else :
res = [l_imgs[0], l_VA[0], l_ldmrk[0], Mt,l_nc[0],l_qdrnt[0]]
else :
if self.returnNoisy :
res = [l_imgs[0], l_VA[0], l_ldmrk[0], Mt,l_nc[0],l_nimgs[0]]
else :
res = [l_imgs[0], l_VA[0], l_ldmrk[0], Mt,l_nc[0]]
if self.returnWeight :
res.append(torch.tensor(l_weights[0]))
if self.returnVAQ :
res.append(torch.tensor(l_vaq[0]))
#res.append(l_vaq)
if self.returnFName:
res.append(l_fname[0])
return res
else :
lImgs = torch.stack(l_imgs)
lVA = torch.stack(l_VA)
lLD = torch.stack(l_ldmrk)
#print(l_VA)
l_qdrnt = torch.tensor((l_qdrnt))
if self.returnQ :
if self.returnNoisy :
res = [lImgs, lVA, lLD, Mt,l_nc,l_qdrnt,l_nimgs]
else :
res = [lImgs, lVA, lLD, Mt,l_nc,l_qdrnt]
else :
if self.returnNoisy :
res = [lImgs, lVA, lLD, Mt,l_nc,l_nimgs]
else :
res = [lImgs, lVA, lLD, Mt,l_nc]
if self.returnWeight :
l_weights = torch.tensor(l_weights)
res.append(l_weights)
if self.returnVAQ :
l_vaq = torch.tensor(l_vaq)
res.append(l_vaq)
if self.returnFName:
res.append(l_fname)
return res
def __len__(self):
return len(self.l_imgs)
class SEWAFEWReduced(data.Dataset): #return affect on Valence[0], Arousal[1] order
mean_bgr = np.array([91.4953, 103.8827, 131.0912]) # from resnet50_ft.prototxt
def __init__(self, data_list = ["AFEW"],dir_gt = None,onlyFace = True, image_size =224,
transform = None,useIT = False,augment = False, step = 1,split = False,
nSplit = 5, listSplit = [0,1,2,3,4],isVideo = False, seqLength = None, dbType = 0,
returnQuadrant = False, returnNoisy = False, returnWeight = False, isSemaine = False):#dbtype 0 is AFEW, 1 is SEWA
self.dbType = dbType
self.isSemaine = isSemaine
self.seq_length = seqLength
self.isVideo = isVideo
self.transform = transform
self.onlyFace = onlyFace
self.augment = augment
self.imageSize = image_size
self.imageHeight = image_size
self.imageWidth = image_size
self.useIT = useIT
self.curDir = rootDir +"/"#/home/deckyal/eclipse-workspace/DeepModel/src/MMTVA/data/"
self.returnNoisy = returnNoisy
self.returnWeight = returnWeight
if self.returnWeight :
name = 'VA-Train-'+str(listSplit[0])+'.npy'
if self.dbType == 1 :
name='S-'+name
if isSemaine :
name = 'SE-VA-Train-'+str(listSplit[0])+'.npy'
print('weight',name)
weight = np.load(rootDir+"/DST-SE-AF/"+name).astype('float')+1
sum = weight.sum(0)
weight = (weight/sum)
#print('1',weight)
weight = 1/weight
#print('2',weight)
sum = weight.sum(0)
weight = weight/sum
#print('3',weight)
"just tesing for the latencyh if its possible. "
self.weight = weight
self.returnQ = returnQuadrant
if self.augment :
self.flip = RandomHorizontalFlip(1)
self.rot = RandomRotation(45)
self.occ = Occlusion(1)
self.rc = RandomResizedCrop(size = self.imageSize,scale = (0.5,1), ratio = (0.5, 1.5))
if self.returnNoisy :
self.gn = GeneralNoise(1)
self.occ = Occlusion(1)
list_gt = []
list_labels_tE = []
counter_image = 0
annotE_name = 'annot2'
if dir_gt is not None :
annot_name = dir_gt
list_missing = []
for data in data_list :
print(("Opening "+data))
fullDir = self.curDir +data+"/"
listFolder = os.listdir(fullDir)
listFolder.sort()
for tempx in range(len(listFolder)):
f = listFolder[tempx]
fullPath = os.path.join(fullDir,f)
#print('opening fullpath',fullPath)
if os.path.isdir(fullPath): # Check if object is directory
#print((f.name, f.full_path)) # Name is without extension
#c_image,c_ldmark = 0,0
if self.dbType == 1 : #we directly get the VA file in case of sewa
#first get the valence
valFile = fullPath+"/valence/"+f+"_Valence_A_Aligned.csv"
aroFile = fullPath+"/arousal/"+f+"_Arousal_A_Aligned.csv"
list_labels_tE.append([valFile,aroFile])
#print(valFile,aroFile)
for sub_f in file_walker.walk(fullPath):
if sub_f.isDirectory: # Check if object is directory
list_dta = []
#print(sub_f.name)
if(sub_f.name == 'img-128'): #Else it is the image
for sub_sub_f in sub_f.walk(): #this is the data
if(".npy" not in sub_sub_f.full_path):
list_dta.append(sub_sub_f.full_path)
list_gt.append(sorted(list_dta))
counter_image+=len(list_dta)
c_ldmrk = len(list_dta)
elif (sub_f.name == annotE_name) :
if self.dbType == 0 :
#If that's annot, add to labels_t
for sub_sub_f in sub_f.walk(): #this is the data
if(".npy" not in sub_sub_f.full_path):
list_dta.append(sub_sub_f.full_path)
list_labels_tE.append(sorted(list_dta))
self.length = counter_image
print("Now opening keylabels")
list_labelsEN = []
list_labelsE = []
for ix in range(len(list_labels_tE)) : #lbl,lble in (list_labels_t,list_labels_tE) :
lbl_2 = [] #Per folder
lbl_n2 = [] #Per folder
if self.dbType == 1 : #sewa
#print(list_labels_t[ix][0])
valFile = np.asarray(readCSV(list_labels_tE[ix][0]))
aroFile = np.asarray(readCSV(list_labels_tE[ix][1]))
#lbl_n2.append(list_labels_tE[ix][0])
for it in range(1,len(valFile)+1):
dir,_ = os.path.split(list_labels_tE[ix][0])
newName = str(it).zfill(6)+'.tmp'
lbl_n2.append(os.path.join(dir,newName))
lbl_2 = np.column_stack((valFile,aroFile))
else :
for jx in range(len (list_labels_tE[ix])): #lbl_sub in lbl :
#print(os.path.basename(list_gt[ix][jx]))
#print(os.path.basename(list_labels_t[ix][jx]))
#print(os.path.basename(list_labels_tE[ix][jx]))
if self.dbType == 0 :
lbl_subE = list_labels_tE[ix][jx]
if ('aro' in lbl_subE) :
x = []
#print(lbl_sub)
with open(lbl_subE) as file:
data2 = [re.split(r'\t+',l.strip()) for l in file]
for i in range(len(data2)) :
temp = [ float(j) for j in data2[i][0].split()]
temp.reverse() #to give the valence first. then arousal
x.append(temp)
lbl_2.append(np.array(x).flatten('F'))
lbl_n2.append(lbl_subE)
list_labelsEN.append(lbl_n2)
list_labelsE.append(lbl_2)
t_l_imgs = []
t_l_gtE = []
t_list_gtE_names = []
#print(list_labelsEN)
if not self.isVideo :
#Flatten it to one list
for i in range(0,len(list_gt)): #For each dataset
list_images = []
list_gtE_names = []
indexer = 0
list_ground_truthE = np.zeros([len(list_gt[i]),2])
for j in range(0,len(list_gt[i]),step): #for number of data #n_skip is usefull for video data
list_images.append(list_gt[i][j])
#print(list_labelsEN)
if self.dbType == 0 :
list_gtE_names.append(list_labelsEN[i][j])
else :
#list_gtE_names.append(list_labelsEN[i][0])
list_gtE_names.append(list_labelsEN[i][j])
#print(list_labelsEN[i])
'''if len(list_labels[i][j] < 1):
print(list_labels[i][j])'''
#print(len(list_labels[i][j]))
list_ground_truthE[indexer] = np.array(list_labelsE[i][j]).flatten('F')
indexer += 1
t_l_imgs.append(list_images)
t_l_gtE.append(list_ground_truthE)
t_list_gtE_names.append(list_gtE_names)
else :
if self.seq_length is None :
list_ground_truth = np.zeros([int(counter_image/(self.seq_length*step)),self.seq_length,136])
indexer = 0;
for i in range(0,len(list_gt)): #For each dataset
counter = 0
for j in range(0,int(len(list_gt[i])/(self.seq_length*step))): #for number of data/batchsize
temp = []
temp2 = np.zeros([self.seq_length,136])
i_temp = 0
for z in range(counter,counter+(self.seq_length*step),step):#1 to seq_size
temp.append(list_gt[i][z])
temp2[i_temp] = list_labels[i][z]
i_temp+=1
list_images.append(temp)
list_ground_truth[indexer] = temp2
indexer += 1
counter+=self.seq_length*step
#print counter
self.l_imgs = list_images
self.l_gt = list_ground_truth
else :
counter_seq = 0;
for i in range(0,len(list_gt)): #For each dataset
indexer = 0;
list_gtE_names = []
list_ground_truthE = np.zeros([int(len(list_gt[i])/(self.seq_length*step)),self.seq_length,2])#np.zeros([counter_image,2])
counter = 0
list_images = []
for j in range(0,int(len(list_gt[i])/(self.seq_length*step))): #for number of data/batchsize
temp = []
tmpn2 = []
temp3 = np.zeros([self.seq_length,2])
i_temp = 0
for z in range(counter,counter+(self.seq_length*step),step):#1 to seq_size
temp.append(list_gt[i][z])
temp3[i_temp] = list_labelsE[i][z].flatten('F')
if self.dbType == 0 :
#list_gtE_names.append(list_labelsEN[i][j])
tmpn2.append(list_labelsEN[i][j])
else :
#list_gtE_names.append(list_labelsEN[i][0])
tmpn2.append(list_labelsEN[i][0])
i_temp+=1
counter_seq+=1
list_images.append(temp)
list_ground_truthE[indexer] = temp3
list_gtE_names.append(tmpn2)
indexer += 1
counter+=self.seq_length*step
#print counter
t_l_imgs.append(list_images)
t_l_gtE.append(list_ground_truthE)
t_list_gtE_names.append(list_gtE_names)
self.l_imgs = []
self.l_gtE = []
self.list_gtE_names = []
#print('cimage : ',counter_image)
if split :
indexer = 0
self.l_gtE = []
totalData = len(t_l_imgs)
perSplit = int(truediv(totalData, nSplit))
for x in listSplit :
print('split : ',x)
begin = x*perSplit
if x == nSplit-1 :
end = begin + (totalData - begin)
else :
end = begin+perSplit
print(begin,end,totalData)
if not self.isVideo :
for i in range(begin,end) :
for j in range(len(t_l_imgs[i])):
#print('append ',t_l_imgs[i][j])
self.l_imgs.append(t_l_imgs[i][j])
self.l_gtE.append(t_l_gtE[i][j])
self.list_gtE_names.append(t_list_gtE_names[i][j])
indexer+=1
else :
for i in range(begin,end) :
for j in range(len(t_l_imgs[i])): #seq counter
t_img = []
t_gtE = []
t_gt_EN = []
tmp = 0
for k in range(len(t_l_imgs[i][j])): #seq size
t_img.append(t_l_imgs[i][j][k])
t_gtE.append(t_l_gtE[i][j][k])
t_gt_EN.append(t_list_gtE_names[i][j][k])
tmp+=1
#print('append ',t_img)
self.l_imgs.append(t_img)
self.l_gtE.append(t_gtE)
self.list_gtE_names.append(t_gt_EN)
indexer+=1
print(len(self.l_imgs))
self.l_gtE = np.asarray(self.l_gtE)
else :
if not self.isVideo :
self.l_gtE = np.zeros([counter_image,2])
indexer = 0
for i in range(len(t_l_imgs)):
for j in range(len(t_l_imgs[i])):
self.l_imgs.append(t_l_imgs[i][j])
print(i,j,'-',len(t_l_imgs[i]))
self.l_gtE[indexer] = t_l_gtE[i][j]
self.list_gtE_names.append(t_list_gtE_names[i][j])
indexer+=1
else :
self.l_gtE = np.zeros([counter_seq,self.seq_length,2])
indexer = 0
for i in range(len(t_l_imgs)): #dataset
for j in range(len(t_l_imgs[i])): #seq counter
t_img = []
t_gte = np.zeros([self.seq_length,2])
t_gt_n = []
t_gt_en = []
i_t = 0
for k in range(len(t_l_imgs[i][j])): #seq size
t_img.append(t_l_imgs[i][j][k])
t_gte[i_t] = t_l_gtE[i][j][k]
t_gt_en.append(t_list_gtE_names[i][j][k])
i_t+=1
self.l_imgs.append(t_img)
self.l_gtE[indexer] = t_gte
self.list_gtE_names.append(t_gt_en)
indexer+=1
print('limgs : ',len(self.l_imgs))
def __getitem__(self,index):
#Read all data, transform etc.
#In video, the output will be : [batch_size, sequence_size, channel, width, height]
#Im image : [batch_size, channel, width, height]
l_imgs = []; l_ldmrk = []; l_VA = []; l_nc = []; l_qdrnt = []#,torch.FloatTensor(label),newChannel#,x,self.list_gt_names[index]
if self.returnNoisy :
l_nimgs = []
if self.returnWeight :
l_weights = []
if not self.isVideo :
x_l = [self.l_imgs[index]];labelE_l =[self.l_gtE[index].copy()];label_n =[self.list_gtE_names[index]]
else :
x_l = self.l_imgs[index];labelE_l =self.l_gtE[index].copy();label_n =self.list_gtE_names[index]
#print('label n ',label_n)
for x,labelE,ln in zip(x_l,labelE_l,label_n) :
#print(x,labelE,label,ln)
#print('label : ',ln)
tImage = Image.open(x).convert("RGB")
tImageB = None
newChannel = None
if self.augment :
if self.returnNoisy :
sel = np.random.randint(0,3) #Skip occlusion as noise
else :
sel = np.random.randint(0,4)
#0 : neutral, 1 : horizontal flip, 2:random rotation, 3:occlusion
if sel == 0 :
pass
elif sel == 1 :
#flip = RandomHorizontalFlip_WL(1)
#tImage,label,newChannel = flip(tImage,label,newChannel)
#flip = RandomHorizontalFlip(1)
tImage = self.flip(tImage)
elif sel == 2 :
#rot = RandomRotation_WL(45)
#tImage,label,newChannel = rot(tImage,label,newChannel)
#rot = RandomRotation(45)
tImage = self.rot(tImage)
elif sel == 3 :
#occ = Occlusion_WL(1)
#tImage,label,newChannel = occ(tImage,label,newChannel)
#occ = Occlusion(1)
tImage = self.occ(tImage)
#random crop
if (np.random.randint(1,3)%2==0) :
#rc = RandomResizedCrop_WL(size = self.imageSize,scale = (0.5,1), ratio = (0.5, 1.5))
#tImage,label,newChannel= rc(tImage,label,newChannel)
#rc = RandomResizedCrop(size = self.imageSize,scale = (0.5,1), ratio = (0.5, 1.5))
tImage= self.rc(tImage)
if self.returnNoisy :
nImage = tImage.copy()
#additional blurring
if (np.random.randint(1,3)%2==0):
#sel_n = np.random.randint(1,6)
sel_n = np.random.randint(1,7)
#sel_n = 4
#gn = GeneralNoise_WL(1)
#tImage,label= gn(tImage,label,sel_n,np.random.randint(0,3))
if sel_n > 5 :
#occ = Occlusion(1)
nImage = self.occ(nImage)
else :
#rc = GeneralNoise(1)
#tImage = rc(tImage,sel_n,np.random.randint(0,3))
nImage = self.gn(nImage,sel_n,np.random.randint(0,3))
label = torch.zeros(1)
Mt = torch.zeros(1)
if self.useIT :
tImage = self.transformInternal(tImage)
if self.returnNoisy :
nImage = self.transformInternal(nImage)
else :
tImage = self.transform(tImage)
if self.returnNoisy :
nImage = self.transform(nImage)
l_imgs.append(tImage); l_VA.append(torch.FloatTensor(labelE)); l_ldmrk.append(torch.FloatTensor(label))#,x,self.list_gt_names[index]
if self.returnNoisy :
l_nimgs.append(nImage)
if self.returnQ :
if self.dbType == 1 :
min = 0; max = 1;
elif self.isSemaine == 1:
min = -1; max = 1;
else :
min = -10; max = 10;
l_qdrnt.append(toQuadrant(labelE, min, max, toOneHot=False))
if self.returnWeight :
v = labelE[0]
a = labelE[0]
if self.dbType == 1 :#sewa
v = v*10+1
a = a*10+1
elif self.isSemaine == 1 :
v = v*10+10
a = a*10+10
else :
v = v+10
a = a+10
v,a = int(v),int(a)
'''print('the v :{} a : {} db : {}'.format(v,a,self.dbType))
print(self.weight)
print(self.weight.shape)'''
l_weights.append([self.weight[v,0],self.weight[a,1]])
l_nc.append(ln)
#print('lnc : ',l_nc)
if not self.isVideo :
if self.returnQ :
if self.returnNoisy :
res = [l_imgs[0], l_VA[0], l_ldmrk[0], Mt,l_nc[0],l_qdrnt[0],l_nimgs[0]]
else :
res = [l_imgs[0], l_VA[0], l_ldmrk[0], Mt,l_nc[0],l_qdrnt[0]]
else :
if self.returnNoisy :
res = [l_imgs[0], l_VA[0], l_ldmrk[0], Mt,l_nc[0],l_nimgs[0]]
else :
res = [l_imgs[0], l_VA[0], l_ldmrk[0], Mt,l_nc[0]]
if self.returnWeight :
res.append(torch.tensor(l_weights[0]))
return res
else :
#lImgs = torch.Tensor(len(l_imgs),3,self.imageHeight,self.imageWidth)
#lVA = torch.Tensor(len(l_VA),2)
#lLD = torch.Tensor(len(l_ldmrk),136)
lImgs = torch.stack(l_imgs)
lVA = torch.stack(l_VA)
lLD = torch.stack(l_ldmrk)
#print(l_VA)
l_qdrnt = torch.tensor((l_qdrnt))
#print(lImgs.shape, l_imgs[0].shape, l_VA[0].shape,len(lImgs))
#torch.cat(l_imgs, out=lImgs)
#torch.cat(l_VA, out=lVA)
#torch.cat(l_ldmrk, out=lLD)
if self.returnQ :
if self.returnNoisy :
res = [lImgs, lVA, lLD, Mt,l_nc,l_qdrnt,l_nimgs]
else :
res = [lImgs, lVA, lLD, Mt,l_nc,l_qdrnt]
else :
if self.returnNoisy :
res = [lImgs, lVA, lLD, Mt,l_nc,l_nimgs]
else :
res = [lImgs, lVA, lLD, Mt,l_nc]
if self.returnWeight :
l_weights = torch.tensor(l_weights)
res.append(l_weights)
return res
def transformInternal(self, img):
transforms.Resize(224)(img)
img = np.array(img, dtype=np.uint8)
#img = img[:, :, ::-1] # RGB -> BGR
img = img.astype(np.float32)
img -= self.mean_bgr
img = img.transpose(2, 0, 1) # C x H x W
img = torch.from_numpy(img).float()
return img
def untransformInternal(self, img, lbl):
img = img.numpy()
img = img.transpose(1, 2, 0)
img += self.mean_bgr
img = img.astype(np.uint8)
img = img[:, :, ::-1]
return img, lbl
def param2theta(self,param, w, h):
param = np.linalg.inv(param)
theta = np.zeros([2,3])
theta[0,0] = param[0,0]
theta[0,1] = param[0,1]*h/w
theta[0,2] = param[0,2]*2/w + theta[0,0] + theta[0,1] - 1
theta[1,0] = param[1,0]*w/h
theta[1,1] = param[1,1]
theta[1,2] = param[1,2]*2/h + theta[1,0] + theta[1,1] - 1
return theta
def procrustes(self, X, Y, scaling=True, reflection='best'):
n,m = X.shape
ny,my = Y.shape
muX = X.mean(0)
muY = Y.mean(0)
X0 = X - muX
Y0 = Y - muY
ssX = (X0**2.).sum()
ssY = (Y0**2.).sum()
# centred Frobenius norm
normX = np.sqrt(ssX)
normY = np.sqrt(ssY)
# scale to equal (unit) norm
X0 /= normX
Y0 /= normY
if my < m:
Y0 = np.concatenate((Y0, np.zeros(n, m-my)),0)
# optimum rotation matrix of Y
A = np.dot(X0.T, Y0)
U,s,Vt = np.linalg.svd(A,full_matrices=False)
V = Vt.T
T = np.dot(V, U.T)
if reflection is not 'best':
# does the current solution use a reflection?
have_reflection = np.linalg.det(T) < 0
# if that's not what was specified, force another reflection
if reflection != have_reflection:
V[:,-1] *= -1
s[-1] *= -1
T = np.dot(V, U.T)
traceTA = s.sum()
if scaling:
# optimum scaling of Y
b = traceTA * normX / normY
# standarised distance between X and b*Y*T + c
d = 1 - traceTA**2
# transformed coords
Z = normX*traceTA*np.dot(Y0, T) + muX
else:
b = 1
d = 1 + ssY/ssX - 2 * traceTA * normY / normX
Z = normY*np.dot(Y0, T) + muX
# transformation matrix
if my < m:
T = T[:my,:]
c = muX - b*np.dot(muY, T)
#transformation values
#tform = {'rotation':T, 'scale':b, 'translation':c}
tform = np.append(b*T,[c],axis = 0).T
tform = np.append(tform,[[0,0,1]],axis = 0)
return d, Z, tform
def __len__(self):
return len(self.l_imgs)
def convertName(input):
number = int(re.search(r'\d+', input).group())
if 'train' in input :
return number
elif 'dev' in input :
return 10+number
elif 'test' in input :
return 20+number
def cropImage():
batch_size = 20
image_size = 224
isVideo = False
doConversion = False
lndmrkNumber =68
#lndmarkNumber = 49
isSewa = False
desireS = 224
smll = desireS!=224
ratio = truediv(desireS,224)
if ratio :
displaySize = str(128)
else :
displaySize = str(image_size)
err_denoised = curDir+"de-label-"+'semaine'+".txt"
checkDirMake(os.path.dirname(err_denoised))
print('file of denoising : ',err_denoised)
fileOfDen = open(err_denoised,'w')
fileOfDen.close()
#theDataSet = "AFEW-VA-Small"
#theDataSet = "AFEW-VA-Fixed"
#theDataSet = "SEWA-small"
#theDataSet = "SEWA"
theDataSet = "Sem-Short"
oriDir = '/home/deckyal/eclipse-workspace/DeepModel/src/MMTVA/data/'+theDataSet
#oriDir = '/media/deckyal/INT-2TB/comparisons/'+theDataSet + "/" + str(theNoiseType)+"-"+str(theNoiseParam)+'/'
targetDir = '/home/deckyal/eclipse-workspace/DeepModel/src/MMTVA/data/'+theDataSet+'-ext'
checkDirMake(targetDir)
data_transform = transforms.Compose([
transforms.Resize((image_size,image_size)),
transforms.ToTensor(),
transforms.Normalize(mean = (.5,.5,.5), std = (.5,.5,.5))
])
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
ID = ImageDataset(data_list = [theDataSet],onlyFace=True,transform=data_transform,image_size=image_size
,injectedLink = oriDir,isVideo = isVideo,giveCroppedFace=True,
annotName='annot',lndmarkNumber=lndmrkNumber,isSewa = isSewa)
#annotName = annotOri
dataloader = torch.utils.data.DataLoader(dataset = ID, batch_size = batch_size, shuffle = False)
unorm = UnNormalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))
notNeutral = 0
list_nn = []
list_name_nn = []
print('inside',len(dataloader))
GD = GeneralDAEX(nClass = 3)
dircl1 = '/home/deckyal/eclipse-workspace/FaceTracking/src/toBeUsedT-5Aug/'+'Mix3-combineAE.pt'
dircl2 = '/home/deckyal/eclipse-workspace/FaceTracking/src/toBeUsedT-5Aug/'+'Mix3-combineCL.pt'
outDir = "mix3-"
model_lg = LogisticRegression(512, 3)
netAEC = DAEE()
netAEC.load_state_dict(torch.load(dircl1))
netAEC = netAEC.cuda()
netAEC.eval()
#theDataSetOut = theDataVideo+outDir
model_lg.load_state_dict(torch.load(dircl2))
model_lg = model_lg.cuda()
model_lg.eval()
#print(netAEC.fce.weight)
print(model_lg.linear2.weight)
#exit(0)
isVideo = False
#exit(0)
data_transform = transforms.Compose([
transforms.Resize((image_size,image_size)),
#transforms.CenterCrop(image_size),
transforms.ToTensor(),
#transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
transforms.Normalize(mean = (.5,.5,.5), std = (.5,.5,.5))
])
# Plot some training images
for inside in dataloader : # = next(iter(dataloader))
print(len(inside))
real_batch,gt,cr,x,gtcr = inside[0],inside[1],inside[2],inside[3],inside[4]
if isSewa :
gtcr2 = inside[5]
else :
gtcr2 = gtcr
print(real_batch.size())
for imgs,gts,imgcrs,fileName,gtcrs,gts2 in zip(real_batch.cuda(),gt.numpy(),cr.cuda(),x,gtcr.numpy(),gtcr2.numpy()):
print(fileName)
#first save the original image
#now getting the name and file path
filePath = fileName.split(os.sep)
annotPath = copy.copy(filePath)
if isSewa :
annotPathSewa = copy.copy(filePath)
filePathCleaned = copy.copy(filePath)
filePath[-2]+='-'+displaySize
filePathCleaned[-2]+='-'+displaySize+'-C'
if isSewa :
annotPath[-2]='annotOri-'+displaySize
annotPathSewa[-2]='annot-'+displaySize
else :
annotPath[-2]='annot-'+displaySize
newFilePath = '/'.join(filePath[:-1])
newAnnotPath = '/'.join(annotPath[:-1])
if isSewa :
newAnnotPathSewa = '/'.join(annotPathSewa[:-1])
newClFilePath = '/'.join(filePathCleaned[:-1])
#print(filePath,annotPath)
print(newFilePath, newAnnotPath)
#ifolder = filePath.index(theDataVideo)
image_name = filePath[-1]
annot_name = os.path.splitext(image_name)[0]+'.pts'
'''if isVideo :
middle = filePath[ifolder+2:-2]
print(middle)
middle = '/'.join(middle)
finalTargetPathI = targetDir+middle+'/img/'
finalTargetPathA = targetDir+middle+'/annot/'
else :
finalTargetPathI = targetDir+'img/'
finalTargetPathA = targetDir+'annot/' '''
checkDirMake(newFilePath)
checkDirMake(newAnnotPath)
if isSewa :
checkDirMake(newAnnotPathSewa)
checkDirMake(newClFilePath)
finalTargetImage = newFilePath+'/'+image_name
finalTargetImageCl = newClFilePath+'/'+image_name
finalTargetAnnot = newAnnotPath+'/'+annot_name
if isSewa :
finalTargetAnnotSewa = newAnnotPathSewa+'/'+annot_name
theOri = unorm(imgcrs.detach().cpu()).numpy()*255
theOri = cv2.cvtColor(theOri.transpose((1,2,0)).astype(np.uint8 ),cv2.COLOR_RGB2BGR)
if smll :
theOri = cv2.resize(theOri,(128,128))
cv2.imwrite(finalTargetImage,theOri)
if smll :
gtcrs[:lndmrkNumber] *= ratio
gtcrs[lndmrkNumber:] *= ratio
if isSewa :
gts2[:68] *= ratio
gts2[68:] *= ratio
write_kp_file(finalTargetAnnot,gtcrs,length = lndmrkNumber)
if isSewa :
write_kp_file(finalTargetAnnotSewa,gts2,length = 68)
#print(gtcrs)
#Now see the result back
r_image = cv2.imread(finalTargetImage)
print(finalTargetAnnot)
predicted = utils.read_kp_file(finalTargetAnnot, True)
for z22 in range(lndmrkNumber) :
#print(z22)
cv2.circle(r_image,(int(predicted[z22]),int(predicted[z22+lndmrkNumber])),2,(0,255,0))
if isSewa:
predicted2 = utils.read_kp_file(finalTargetAnnotSewa, True)
for z22 in range(68) :
cv2.circle(r_image,(int(predicted2[z22]),int(predicted2[z22+68])),2,(255,255,255))
cv2.imshow('test',r_image)
cv2.waitKey(1)
#exit(0)
#second get the cleaned one
#if cl_type == 1 :
recon_batch,xe = netAEC(imgs.unsqueeze(0))
#else :
# xe = netAEC(imgs.unsqueeze(0))
labels = model_lg(xe)
x, y = torch.max(labels, 1)
ll = y.cpu()[0]
print('res',ll)
#res = GD.forward(imgs.unsqueeze(0), y[0])[0].detach().cpu()
res = GD.forward(imgcrs.unsqueeze(0), y[0])[0].detach().cpu()
theRest = unorm(res).numpy()*255
print(theRest.shape)
theRest = cv2.cvtColor(theRest.transpose((1,2,0)).astype(np.uint8 ),cv2.COLOR_RGB2BGR)
if smll :
theRest = cv2.resize(theRest,(128,128))
theOri = unorm(imgs.detach().cpu()).numpy()*255
print(theOri.shape)
theOri = cv2.cvtColor(theOri.transpose((1,2,0)).astype(np.uint8 ),cv2.COLOR_RGB2BGR)
cv2.imshow('theori',theRest)
cv2.waitKey(1)
cv2.imwrite(finalTargetImageCl,theRest)
#third save the cleaned one
#exit(0)
'''
#print(theRest.shape)
theImage = theRest
#now getting the name and file path
filePath = fileName.split(os.sep)
ifolder = filePath.index(theDataVideo)
image_name = filePath[-1]
annot_name = os.path.splitext(image_name)[0]+'.pts'
if isVideo :
middle = filePath[ifolder+2:-2]
print(middle)
middle = '/'.join(middle)
finalTargetPathI = targetDir+middle+'/img/'
finalTargetPathA = targetDir+middle+'/annot/'
else :
finalTargetPathI = targetDir+'img/'
finalTargetPathA = targetDir+'annot/'
checkDirMake(finalTargetPathI)
checkDirMake(finalTargetPathA)
finalTargetImage = finalTargetPathI+image_name
finalTargetAnnot = finalTargetPathA+annot_name
print(finalTargetImage,finalTargetAnnot)'''
if ll != 0 or True:
if ll != 0:
notNeutral+=1
list_nn.append(ll)
list_name_nn.append(finalTargetImage)
fileOfDen = open(err_denoised,'a')
fileOfDen.write(str(int(ll))+','+finalTargetImage+"\n")
fileOfDen.close()
print('status : ',ll)
'''
cv2.imshow('ori',theOri)
cv2.waitKey(0)
cv2.imshow('after',theRest)
cv2.waitKey(0)'''
print(y,labels)
print("not neutral count : ",notNeutral)
def getDistributionAC():
import matplotlib.pyplot as plt
targetDir = '/home/deckyal/eclipse-workspace/FaceTracking/FaceTracking-NR/StarGAN_Collections/stargan-master/distribution/'
tname = "AC"
image_size = 112
batch_size = 20000
transform = transforms.Compose([
#transforms.Resize((image_size,image_size)),
transforms.ToTensor(),
transforms.Normalize(mean = (.5,.5,.5), std = (.5,.5,.5))
])
if False :
a = np.array(range(20))
v = np.array(range(20))
tx = np.array(range(20))
for i in range(5) :
z = np.load(targetDir+'VA-Train-'+str(i)+'.npy')
la = z[:,0]
lv = z[:,1]
#print(la,la.shape)
a+=la
v+=lv
z = np.load(targetDir+'VA-Test-'+str(i)+'.npy')
la = z[:,0]
lv = z[:,1]
#print(la,la.shape)
a+=la
v+=lv
fig = plt.figure()
ax = plt.subplot(2, 2, 1)
ax.bar(tx,a)
ax.set_title('a')
ax = plt.subplot(2, 2, 2)
ax.bar(tx,v)
ax.set_title('v')
plt.show()
#print(a)
#print(v)
exit(0)
ID = AFFChallenge(data_list = ["AffectChallenge"],mode = 'Train',onlyFace = True, image_size =112,
transform = transform,useIT = False,augment = False, step = 1,isVideo = False, seqLength = None, dbType = 0,
returnQuadrant = False, returnNoisy = False, returnWeight = False)
VD = AFFChallenge(data_list = ["AffectChallenge"],mode = 'Val',onlyFace = True, image_size =112,
transform = transform,useIT = False,augment = False, step = 1,isVideo = False, seqLength = None, dbType = 0,
returnQuadrant = False, returnNoisy = False, returnWeight = False)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
dataloader = torch.utils.data.DataLoader(dataset = data, batch_size = batch_size, shuffle = True)
dataloaderTrn = torch.utils.data.DataLoader(dataset = ID, batch_size = batch_size, shuffle = False)
dataloaderVal = torch.utils.data.DataLoader(dataset = VD, batch_size = batch_size, shuffle = False)
listV = np.array(range(0,21))
listA = np.array(range(0,21))
listVx = np.array(range(0,21))
listAx = np.array(range(0,21))
listVt = np.array(range(0,21))
listAt = np.array(range(0,21))
#for real_batch,vas,gt,M,_ in (dataloaderTrn) :
x = 0
for lImgs,vas,gt,M,ex in (dataloaderTrn) :
for va in vas :
print(x,len(dataloaderTrn)*batch_size)
#print(ex,gt,M)
#print(va,vas)
lva = (va.cpu().numpy()) * 10+10
name = 'AC-Train'
print(va)
print(lva)
listV[int(round(lva[0]))]+=1
listA[int(round(lva[1]))]+=1
x+=1
x = 0
print(listV,listA)
np.save(targetDir+name+'.npy',np.column_stack((listV,listA)))
for real_batch,vas,gt,M,ex in (dataloaderVal) :
for va in vas :
print(x,len(dataloaderVal)*batch_size)
lva = (va.cpu().numpy()) * 10+10
name = 'AC-Test-'
listVt[int(round(lva[0]))]+=1
listAt[int(round(lva[1]))]+=1
x+=1
print(listVt,listAt)
np.save(targetDir+name+'.npy',np.column_stack((listVt,listAt)))
'''fig, ax = plt.subplots(nrows=1, ncols=2)
for row in ax:
for col in row:
col.plot(x, y)'''
fig = plt.figure()
ax = plt.subplot(2, 2, 1)
ax.bar(listVx,listV)
ax.set_title('v train')
ax = plt.subplot(2, 2, 2)
ax.bar(listAx,listA)
ax.set_title('A train')
ax = plt.subplot(2, 2, 3)
ax.bar(listVx,listVt)
ax.set_title('v test')
ax = plt.subplot(2, 2, 4)
ax.bar(listAx,listAt)
ax.set_title('A test')
#plt.show()
plt.savefig(tname+".png")
exit(0)
def getDistribution():
import matplotlib.pyplot as plt
targetDir = '/home/deckyal/eclipse-workspace/FaceTracking/FaceTracking-NR/StarGAN_Collections/stargan-master/distribution/'
isAFEW = True
isSemaine = True
name = "AFEW"
if not isAFEW :
name = "SEWA"
image_size = 224
batch_size = 1000#5000
transform = transforms.Compose([
#transforms.Resize((image_size,image_size)),
transforms.ToTensor(),
transforms.Normalize(mean = (.5,.5,.5), std = (.5,.5,.5))
])
if False :
a = np.array(range(20))
v = np.array(range(20))
tx = np.array(range(20))
for i in range(5) :
z = np.load(targetDir+'VA-Train-'+str(i)+'.npy')
la = z[:,0]
lv = z[:,1]
#print(la,la.shape)
a+=la
v+=lv
z = np.load(targetDir+'VA-Test-'+str(i)+'.npy')
la = z[:,0]
lv = z[:,1]
#print(la,la.shape)
a+=la
v+=lv
fig = plt.figure()
ax = plt.subplot(2, 2, 1)
ax.bar(tx,a)
ax.set_title('a')
ax = plt.subplot(2, 2, 2)
ax.bar(tx,v)
ax.set_title('v')
plt.show()
#print(a)
#print(v)
exit(0)
for split in range(5) :
minA,minV = 9999,9999
maxA,maxV = -9999,-9999
#split = 1
multi_gpu = False
testSplit = split
print("Test split " , testSplit)
nSplit = 5
listSplit = []
for i in range(nSplit):
if i!=testSplit :
listSplit.append(i)
print(listSplit)
#sem short
#sem small
if not isAFEW :
ID = SEWAFEWReduced(data_list = ["SEWA-small"], dir_gt = None, onlyFace = True, image_size = image_size,
transform = transform, useIT = True, augment = False, step = 1,split=True, nSplit = 5,listSplit=listSplit,
isVideo=False, seqLength = 6,dbType = 1)
VD = SEWAFEWReduced(data_list = ["SEWA-small"], dir_gt = None, onlyFace = True, image_size = image_size,
transform = transform, useIT = True, augment = False, step = 1,split=True, nSplit = 5,listSplit=[testSplit],
isVideo=False, seqLength = 6,dbType = 1)
else :
''' ID = SEWAFEWReduced(data_list = ["AFEW-VA-Fixed"], dir_gt = None, onlyFace = True, image_size = image_size,
transform = transform, useIT = True, augment = False, step = 1,split=True, nSplit = 5,listSplit=listSplit,
isVideo=False, seqLength = 6,dbType = 0)
VD = SEWAFEWReduced(data_list = ["AFEW-VA-Fixed"], dir_gt = None, onlyFace = True, image_size = image_size,
transform = transform, useIT = True, augment = False, step = 1,split=True, nSplit = 5,listSplit=[testSplit],
isVideo=False, seqLength = 6,dbType = 0)
'''
if isSemaine :
ID = SEWAFEWReduced(data_list = ["Sem-Short"], dir_gt = None, onlyFace = True, image_size = image_size,
transform = transform, useIT = True, augment = False, step = 1,split=True, nSplit = 5,listSplit=listSplit,
isVideo=False, seqLength = 6,dbType = 0)
VD = SEWAFEWReduced(data_list = ["Sem-Short"], dir_gt = None, onlyFace = True, image_size = image_size,
transform = transform, useIT = True, augment = False, step = 1,split=True, nSplit = 5,listSplit=[testSplit],
isVideo=False, seqLength = 6,dbType = 0)
else :
ID = SEWAFEWReduced(data_list = ["AFEW-VA-Fixed"], dir_gt = None, onlyFace = True, image_size = image_size,
transform = transform, useIT = True, augment = False, step = 1,split=True, nSplit = 5,listSplit=listSplit,
isVideo=False, seqLength = 6,dbType = 0)
VD = SEWAFEWReduced(data_list = ["AFEW-VA-Fixed"], dir_gt = None, onlyFace = True, image_size = image_size,
transform = transform, useIT = True, augment = False, step = 1,split=True, nSplit = 5,listSplit=[testSplit],
isVideo=False, seqLength = 6,dbType = 0)
dataloaderTrn = torch.utils.data.DataLoader(dataset = ID, batch_size = batch_size, shuffle = True)
dataloaderVal = torch.utils.data.DataLoader(dataset = VD, batch_size = batch_size, shuffle = True)
if isSemaine : #-1 to 1
listV = np.array(range(0,20))
listA = np.array(range(0,20))
listVx = np.array(range(0,20))
listAx = np.array(range(0,20))
listVt = np.array(range(0,20))
listAt = np.array(range(0,20))
listVall = np.array(range(0,20))
listAall = np.array(range(0,20))
elif isAFEW : #-10 to 10
listV = np.array(range(0,20))
listA = np.array(range(0,20))
listVx = np.array(range(0,20))
listAx = np.array(range(0,20))
listVt = np.array(range(0,20))
listAt = np.array(range(0,20))
else : #0 to 1
listV = np.array(range(0,12))
listA = np.array(range(0,12))
listVx = np.array(range(0,12))
listAx = np.array(range(0,12))
listVt = np.array(range(0,12))
listAt = np.array(range(0,12))
x = 0
temp = []
for real_batch,vas,gt,M,_ in (dataloaderTrn) :
for va in vas :
print(x)
#print(va,vas)
print(va)
t = va.cpu().numpy()
if isSemaine :
lva = (va.cpu().numpy()) * 10+10
name = 'SE-VA-Train-'
elif not isAFEW :
lva = (va.cpu().numpy()) * 10+1
name = 'S-VA-Train-'
#name = 'SE-VA-Train-'
else :
#print(va.cpu().numpy())
lva = (va.cpu().numpy())+10
name = 'VA-Train-'
listV[int(round(lva[0]))]+=1
listA[int(round(lva[1]))]+=1
listVall[int(round(lva[1]))]+=1
listAall[int(round(lva[1]))]+=1
print(lva)
temp.append(va[0])
x+=1
if minV > t[0]:
minV = t[0]
if maxV < t[0]:
maxV = t[0]
if minA > t[1]:
minA = t[1]
if maxA < t[1]:
maxA = t[1]
'''plt.plot(temp, linestyle=':',marker='s')
plt.show()'''
x = 0
print(listV,listA)
np.save(targetDir+name+str(testSplit)+'.npy',np.column_stack((listV,listA)))
for real_batch,vas,gt,M,_ in (dataloaderVal) :
for va in vas :
print(x)
t = va.cpu().numpy()
if isSemaine :
lva = (va.cpu().numpy()) * 10+10
#name = 'S-VA-Test-'
name = 'SE-VA-Test-'
elif not isAFEW : #sewa
lva = (va.cpu().numpy()) * 10+1
name = 'S-VA-Test-'
else :
lva = (va.cpu().numpy())+10
name = 'VA-Test-'
listVt[int(round(lva[0]))]+=1
listAt[int(round(lva[1]))]+=1
x+=1
if minV > t[0]:
minV = t[0]
if maxV < t[0]:
maxV = t[0]
if minA > t[1]:
minA = t[1]
if maxA < t[1]:
maxA = t[1]
print(listVt,listAt)
np.save(targetDir+name+str(testSplit)+'.npy',np.column_stack((listVt,listAt)))
print('minmax',minA,minV,maxA,maxV)
'''fig, ax = plt.subplots(nrows=1, ncols=2)
for row in ax:
for col in row:
col.plot(x, y)'''
fig = plt.figure()
ax = plt.subplot(2, 2, 1)
ax.bar(listVx,listV)
ax.set_title('v train')
ax = plt.subplot(2, 2, 2)
ax.bar(listAx,listA)
ax.set_title('A train')
ax = plt.subplot(2, 2, 3)
ax.bar(listVx,listVt)
ax.set_title('v test')
ax = plt.subplot(2, 2, 4)
ax.bar(listAx,listAt)
ax.set_title('A test')
#plt.show()
plt.savefig(name+'-'+str(split)+".png")
exit(0)
exit(0)
def checkQuadrant() :
#Val, arou
x = [-10,-10]
y = [-10,10]
z = [10,-10]
a = [10,10]
def toQuadrant(inputData = None, min = -10, max = 10, toOneHot = False):
threshold = truediv(min,max)
vLow = False
aLow = False
q = 0
if inputData[0] < threshold :
vLow = True
if inputData[1] < threshold :
aLow = True
if vLow and aLow :
q = 2
elif vLow and not aLow :
q = 1
elif not vLow and not aLow :
q = 0
else :
q = 3
if toOneHot :
rest = np.zeros(4)
rest[q]+=1
return rest
else :
return q
print(toQuadrant(inputData = x,toOneHot = True))
print(toQuadrant(inputData = y,toOneHot = True))
print(toQuadrant(inputData = z,toOneHot = True))
print(toQuadrant(inputData = a,toOneHot = True))
| 111,306 | 37.381724 | 243 | py |
PROTES | PROTES-main/protes/protes.py | import jax
import jax.numpy as jnp
import optax
from time import perf_counter as tpc
def protes(f, d, n, m, k=100, k_top=10, k_gd=1, lr=5.E-2, r=5, seed=0,
is_max=False, log=False, log_ind=False, info={}, P=None,
with_info_i_opt_list=False, with_info_full=False):
time = tpc()
info.update({'d': d, 'n': n, 'm_max': m, 'm': 0, 'k': k, 'k_top': k_top,
'k_gd': k_gd, 'lr': lr, 'r': r, 'seed': seed, 'is_max': is_max,
'is_rand': P is None, 't': 0, 'i_opt': None, 'y_opt': None,
'm_opt_list': [], 'i_opt_list': [], 'y_opt_list': []})
if with_info_full:
info.update({
'P_list': [], 'I_list': [], 'y_list': []})
rng = jax.random.PRNGKey(seed)
if P is None:
rng, key = jax.random.split(rng)
P = _generate_initial(d, n, r, key)
elif len(P[1].shape) != 4:
raise ValueError('Initial P tensor should have special format')
optim = optax.adam(lr)
state = optim.init(P)
interface_matrices = jax.jit(_interface_matrices)
sample = jax.jit(jax.vmap(_sample, (None, None, None, None, 0)))
likelihood = jax.jit(jax.vmap(_likelihood, (None, None, None, None, 0)))
@jax.jit
def loss(P_cur, I_cur):
Pl, Pm, Pr = P_cur
Zm = interface_matrices(Pm, Pr)
l = likelihood(Pl, Pm, Pr, Zm, I_cur)
return jnp.mean(-l)
loss_grad = jax.grad(loss)
@jax.jit
def optimize(state, P_cur, I_cur):
grads = loss_grad(P_cur, I_cur)
updates, state = optim.update(grads, state)
P_cur = jax.tree_util.tree_map(lambda u, p: p + u, updates, P_cur)
return state, P_cur
while True:
Pl, Pm, Pr = P
Zm = interface_matrices(Pm, Pr)
rng, key = jax.random.split(rng)
I = sample(Pl, Pm, Pr, Zm, jax.random.split(key, k))
y = f(I)
y = jnp.array(y)
info['m'] += y.shape[0]
is_new = _check(P, I, y, info, with_info_i_opt_list, with_info_full)
if info['m'] >= m:
info['t'] = tpc() - time
break
ind = jnp.argsort(y, kind='stable')
ind = (ind[::-1] if is_max else ind)[:k_top]
for _ in range(k_gd):
state, P = optimize(state, P, I[ind, :])
info['t'] = tpc() - time
_log(info, log, log_ind, is_new)
_log(info, log, log_ind, is_new, is_end=True)
return info['i_opt'], info['y_opt']
def _check(P, I, y, info, with_info_i_opt_list, with_info_full):
"""Check the current batch of function values and save the improvement."""
ind_opt = jnp.argmax(y) if info['is_max'] else jnp.argmin(y)
i_opt_curr = I[ind_opt, :]
y_opt_curr = y[ind_opt]
is_new = info['y_opt'] is None
is_new = is_new or info['is_max'] and info['y_opt'] < y_opt_curr
is_new = is_new or not info['is_max'] and info['y_opt'] > y_opt_curr
if is_new:
info['i_opt'] = i_opt_curr
info['y_opt'] = y_opt_curr
if is_new or with_info_full:
info['m_opt_list'].append(info['m'])
info['y_opt_list'].append(info['y_opt'])
if with_info_i_opt_list or with_info_full:
info['i_opt_list'].append(info['i_opt'].copy())
if with_info_full:
info['P_list'].append([G.copy() for G in P])
info['I_list'].append(I.copy())
info['y_list'].append(y.copy())
return is_new
def _generate_initial(d, n, r, key):
"""Build initial random TT-tensor for probability."""
keyl, keym, keyr = jax.random.split(key, 3)
Yl = jax.random.uniform(keyl, (1, n, r))
Ym = jax.random.uniform(keym, (d-2, r, n, r))
Yr = jax.random.uniform(keyr, (r, n, 1))
return [Yl, Ym, Yr]
def _interface_matrices(Ym, Yr):
"""Compute the "interface matrices" for the TT-tensor."""
def body(Z, Y_cur):
Z = jnp.sum(Y_cur, axis=1) @ Z
Z /= jnp.linalg.norm(Z)
return Z, Z
Z, Zr = body(jnp.ones(1), Yr)
_, Zm = jax.lax.scan(body, Z, Ym, reverse=True)
return jnp.vstack((Zm[1:], Zr))
def _likelihood(Yl, Ym, Yr, Zm, i):
"""Compute the likelihood in a multi-index i for TT-tensor."""
def body(Q, data):
I_cur, Y_cur, Z_cur = data
G = jnp.einsum('r,riq,q->i', Q, Y_cur, Z_cur)
G = jnp.abs(G)
G /= jnp.sum(G)
Q = jnp.einsum('r,rq->q', Q, Y_cur[:, I_cur, :])
Q /= jnp.linalg.norm(Q)
return Q, G[I_cur]
Q, yl = body(jnp.ones(1), (i[0], Yl, Yl[0, i[0], :]))
Q, ym = jax.lax.scan(body, Q, (i[1:-1], Ym, Zm))
Q, yr = body(Q, (i[-1], Yr, jnp.ones(1)))
y = jnp.hstack((jnp.array(yl), ym, jnp.array(yr)))
return jnp.sum(jnp.log(jnp.array(y)))
def _log(info, log=False, log_ind=False, is_new=False, is_end=False):
"""Print current optimization result to output."""
if not log or (not is_new and not is_end):
return
text = f'protes > '
text += f'm {info["m"]:-7.1e} | '
text += f't {info["t"]:-9.3e} | '
text += f'y {info["y_opt"]:-11.4e}'
if log_ind:
text += f' | i {" ".join([str(i) for i in info["i_opt"]])}'
if is_end:
text += ' <<< DONE'
print(text)
def _sample(Yl, Ym, Yr, Zm, key):
"""Generate sample according to given probability TT-tensor."""
def body(Q, data):
key_cur, Y_cur, Z_cur = data
G = jnp.einsum('r,riq,q->i', Q, Y_cur, Z_cur)
G = jnp.abs(G)
G /= jnp.sum(G)
i = jax.random.choice(key_cur, jnp.arange(Y_cur.shape[1]), p=G)
Q = jnp.einsum('r,rq->q', Q, Y_cur[:, i, :])
Q /= jnp.linalg.norm(Q)
return Q, i
keys = jax.random.split(key, len(Ym) + 2)
Q, il = body(jnp.ones(1), (keys[0], Yl, Zm[0]))
Q, im = jax.lax.scan(body, Q, (keys[1:-1], Ym, Zm))
Q, ir = body(Q, (keys[-1], Yr, jnp.ones(1)))
il = jnp.array(il, dtype=jnp.int32)
ir = jnp.array(ir, dtype=jnp.int32)
return jnp.hstack((il, im, ir))
| 5,895 | 28.333333 | 78 | py |
PROTES | PROTES-main/protes/protes_general.py | import jax
import jax.numpy as jnp
import optax
from time import perf_counter as tpc
def protes_general(f, n, m, k=100, k_top=10, k_gd=1, lr=5.E-2, r=5, seed=0,
is_max=False, log=False, log_ind=False, info={}, P=None,
with_info_i_opt_list=False, with_info_full=False):
time = tpc()
info.update({'n': n, 'm_max': m, 'm': 0, 'k': k, 'k_top': k_top,
'k_gd': k_gd, 'lr': lr, 'r': r, 'seed': seed, 'is_max': is_max,
'is_rand': P is None, 't': 0, 'i_opt': None, 'y_opt': None,
'm_opt_list': [], 'i_opt_list': [], 'y_opt_list': []})
if with_info_full:
info.update({
'P_list': [], 'I_list': [], 'y_list': []})
rng = jax.random.PRNGKey(seed)
if P is None:
rng, key = jax.random.split(rng)
P = _generate_initial(n, r, key)
optim = optax.adam(lr)
state = optim.init(P)
sample = jax.jit(jax.vmap(_sample, (None, 0)))
likelihood = jax.jit(jax.vmap(_likelihood, (None, 0)))
@jax.jit
def loss(P_cur, I_cur):
return jnp.mean(-likelihood(P_cur, I_cur))
loss_grad = jax.grad(loss)
@jax.jit
def optimize(state, P_cur, I_cur):
grads = loss_grad(P_cur, I_cur)
updates, state = optim.update(grads, state)
P_cur = jax.tree_util.tree_map(lambda u, p: p + u, updates, P_cur)
return state, P_cur
while True:
rng, key = jax.random.split(rng)
I = sample(P, jax.random.split(key, k))
y = f(I)
y = jnp.array(y)
info['m'] += y.shape[0]
is_new = _check(P, I, y, info, with_info_i_opt_list, with_info_full)
if info['m'] >= m:
info['t'] = tpc() - time
break
ind = jnp.argsort(y, kind='stable')
ind = (ind[::-1] if is_max else ind)[:k_top]
for _ in range(k_gd):
state, P = optimize(state, P, I[ind, :])
info['t'] = tpc() - time
_log(info, log, log_ind, is_new)
_log(info, log, log_ind, is_new, is_end=True)
return info['i_opt'], info['y_opt']
def _check(P, I, y, info, with_info_i_opt_list, with_info_full):
"""Check the current batch of function values and save the improvement."""
ind_opt = jnp.argmax(y) if info['is_max'] else jnp.argmin(y)
i_opt_curr = I[ind_opt, :]
y_opt_curr = y[ind_opt]
is_new = info['y_opt'] is None
is_new = is_new or info['is_max'] and info['y_opt'] < y_opt_curr
is_new = is_new or not info['is_max'] and info['y_opt'] > y_opt_curr
if is_new:
info['i_opt'] = i_opt_curr
info['y_opt'] = y_opt_curr
if is_new or with_info_full:
info['m_opt_list'].append(info['m'])
info['y_opt_list'].append(info['y_opt'])
if with_info_i_opt_list or with_info_full:
info['i_opt_list'].append(info['i_opt'].copy())
if with_info_full:
info['P_list'].append([G.copy() for G in P])
info['I_list'].append(I.copy())
info['y_list'].append(y.copy())
return is_new
def _generate_initial(n, r, key):
"""Build initial random TT-tensor for probability."""
d = len(n)
r = [1] + [r]*(d-1) + [1]
keys = jax.random.split(key, d)
Y = []
for j in range(d):
Y.append(jax.random.uniform(keys[j], (r[j], n[j], r[j+1])))
return Y
def _interface_matrices(Y):
"""Compute the "interface matrices" for the TT-tensor."""
d = len(Y)
Z = [[]] * (d+1)
Z[0] = jnp.ones(1)
Z[d] = jnp.ones(1)
for j in range(d-1, 0, -1):
Z[j] = jnp.sum(Y[j], axis=1) @ Z[j+1]
Z[j] /= jnp.linalg.norm(Z[j])
return Z
def _likelihood(Y, I):
"""Compute the likelihood in a multi-index I for TT-tensor."""
d = len(Y)
Z = _interface_matrices(Y)
G = jnp.einsum('riq,q->i', Y[0], Z[1])
G = jnp.abs(G)
G /= G.sum()
y = [G[I[0]]]
Z[0] = Y[0][0, I[0], :]
for j in range(1, d):
G = jnp.einsum('r,riq,q->i', Z[j-1], Y[j], Z[j+1])
G = jnp.abs(G)
G /= jnp.sum(G)
y.append(G[I[j]])
Z[j] = Z[j-1] @ Y[j][:, I[j], :]
Z[j] /= jnp.linalg.norm(Z[j])
return jnp.sum(jnp.log(jnp.array(y)))
def _log(info, log=False, log_ind=False, is_new=False, is_end=False):
"""Print current optimization result to output."""
if not log or (not is_new and not is_end):
return
text = f'protes > '
text += f'm {info["m"]:-7.1e} | '
text += f't {info["t"]:-9.3e} | '
text += f'y {info["y_opt"]:-11.4e}'
if log_ind:
text += f' | i {" ".join([str(i) for i in info["i_opt"]])}'
if is_end:
text += ' <<< DONE'
print(text)
def _sample(Y, key):
"""Generate sample according to given probability TT-tensor."""
d = len(Y)
keys = jax.random.split(key, d)
I = jnp.zeros(d, dtype=jnp.int32)
Z = _interface_matrices(Y)
G = jnp.einsum('riq,q->i', Y[0], Z[1])
G = jnp.abs(G)
G /= G.sum()
i = jax.random.choice(keys[0], jnp.arange(Y[0].shape[1]), p=G)
I = I.at[0].set(i)
Z[0] = Y[0][0, i, :]
for j in range(1, d):
G = jnp.einsum('r,riq,q->i', Z[j-1], Y[j], Z[j+1])
G = jnp.abs(G)
G /= jnp.sum(G)
i = jax.random.choice(keys[j], jnp.arange(Y[j].shape[1]), p=G)
I = I.at[j].set(i)
Z[j] = Z[j-1] @ Y[j][:, i, :]
Z[j] /= jnp.linalg.norm(Z[j])
return I
| 5,369 | 25.453202 | 78 | py |
PROTES | PROTES-main/protes/animation.py | import jax.numpy as jnp
import matplotlib as mpl
from matplotlib import cm
from matplotlib.animation import FuncAnimation
import matplotlib.pyplot as plt
from matplotlib.ticker import FormatStrFormatter
from matplotlib.ticker import LinearLocator
import numpy as np
import os
from time import perf_counter as tpc
from .protes_general import protes_general
mpl.rc('animation', html='jshtml')
mpl.rcParams['animation.embed_limit'] = 2**128
def _func_on_grid(f, a, b, n1, n2):
I1 = np.arange(n1)
I2 = np.arange(n2)
I1, I2 = np.meshgrid(I1, I2)
I = np.hstack([I1.reshape(-1, 1), I2.reshape(-1, 1)])
Y = f(I).reshape(n1, n2)
X1 = I1 / (n1 - 1) * (b - a) + a
X2 = I2 / (n2 - 1) * (b - a) + a
return X1, X2, Y
def _p_full(P):
return np.einsum('riq,qjs->rijs', *P)[0, :, :, 0]
def _plot_2d(fig, ax, Y, i_opt_real=None):
img = ax.imshow(Y, cmap=cm.coolwarm, alpha=0.8)
if i_opt_real is not None:
ax.scatter(*i_opt_real, s=500, c='#ffbf00', marker='*', alpha=0.9)
ax.set_xlim(0, Y.shape[0])
ax.set_ylim(0, Y.shape[1])
ax.axis('off')
return img
def _plot_3d(fig, ax, title, X1, X2, Y):
ax.set_title(title, fontsize=16)
surf = ax.plot_surface(X1, X2, Y, cmap=cm.coolwarm,
linewidth=0, antialiased=False)
ax.zaxis.set_major_locator(LinearLocator(10))
ax.zaxis.set_major_formatter(FormatStrFormatter('%.02f'))
# fig.colorbar(surf, ax=ax, shrink=0.3, aspect=10)
return surf
def animate(f, a, b, n, info, i_opt_real=None, fpath=None):
y_opt_real = None if i_opt_real is None else f(i_opt_real.reshape(1, -1))[0]
fig = plt.figure(figsize=(16, 16))
plt.subplots_adjust(wspace=0.3, hspace=0.3)
ax1 = fig.add_subplot(221, projection='3d')
ax2 = fig.add_subplot(222)
ax3 = fig.add_subplot(223, projection='3d')
ax4 = fig.add_subplot(224)
X1, X2, Y = _func_on_grid(f, a, b, n, n)
P = _p_full(info['P_list'][0])
img_y_3d = _plot_3d(fig, ax1, 'Target function', X1, X2, Y)
img_p_3d = _plot_3d(fig, ax3, 'Probability tensor', X1, X2, P)
img_y_2d = _plot_2d(fig, ax2, Y, i_opt_real)
img_p_2d = _plot_2d(fig, ax4, P, i_opt_real)
img_opt = ax2.scatter(0, 0, s=150, c='#EE17DA', marker='D')
img_req = ax2.scatter(0, 0, s= 70, c='#8b1d1d')
img_req_top1 = ax2.scatter(0, 0, s= 110, c='#ffcc00', alpha=0.8)
img_req_top2 = ax4.scatter(0, 0, s= 110, c='#ffcc00')
img_hist, = ax2.plot([], [], '--', c='#485536', linewidth=1, markersize=0)
def update(k, *args):
i_opt = info['i_opt_list'][k]
y_opt = info['y_opt_list'][k]
m = info['m_opt_list'][k]
I = info['I_list'][k]
y = info['y_list'][k]
e = None if y_opt_real is None else abs(y_opt_real - y_opt)
P = _p_full(info['P_list'][k])
ind = jnp.argsort(y, kind='stable')
ind = (ind[::-1] if info['is_max'] else ind)[:info['k_top']]
I_top = I[ind, :]
ax3.clear()
_plot_3d(fig, ax3, 'Probability tensor', X1, X2, P)
img_p_2d.set_array(P)
img_opt.set_offsets(np.array([i_opt[0], i_opt[1]]))
img_req.set_offsets(I)
img_req_top1.set_offsets(I_top)
img_req_top2.set_offsets(I_top)
pois_x, pois_y = [], []
for i in info['i_opt_list'][:(k+1)]:
pois_x.append(i[0])
pois_y.append(i[1])
img_hist.set_data(pois_x, pois_y)
title = f'Queries: {m:-7.1e}'
if e is None:
title += f' | Opt : {y_opt:-11.4e}'
else:
title += f' | Error : {e:-7.1e}'
ax2.set_title(title, fontsize=20)
return img_p_2d, img_opt, img_req, img_req_top1, img_req_top2, img_hist
anim = FuncAnimation(fig, update, interval=30,
frames=len(info['y_list']), blit=True, repeat=False)
if fpath:
anim.save(fpath, writer='pillow', fps=0.7)
else:
anim.show()
def animation(f, a, b, n=501, m=int(1.E+4), k=100, k_top=10, k_gd=1, lr=5.E-2,
i_opt_real=None, fpath='animation/animation.gif', is_max=False):
"""Animation of the PROTES work for the 2D case."""
print('\n... start optimization ...')
t = tpc()
info = {}
i_opt, y_opt = protes_general(f, [n, n], m, k, k_top, k_gd, lr, info=info,
is_max=is_max, log=True, with_info_full=True)
print(f'Optimization is ready (total time {tpc()-t:-8.2f} sec)')
print('\n... start building animation ...')
t = tpc()
if os.path.dirname(fpath):
os.makedirs(os.path.dirname(fpath), exist_ok=True)
animate(f, a, b, n, info, i_opt_real, fpath)
print(f'Animation is ready (total time {tpc()-t:-8.2f} sec)')
| 4,670 | 30.993151 | 80 | py |
PROTES | PROTES-main/demo/demo_func.py | import jax.numpy as jnp
from time import perf_counter as tpc
from protes import protes
def func_build(d, n):
"""Ackley function. See https://www.sfu.ca/~ssurjano/ackley.html."""
a = -32.768 # Grid lower bound
b = +32.768 # Grid upper bound
par_a = 20. # Standard parameter values for Ackley function
par_b = 0.2
par_c = 2.*jnp.pi
def func(I):
"""Target function: y=f(I); [samples,d] -> [samples]."""
X = I / (n - 1) * (b - a) + a
y1 = jnp.sqrt(jnp.sum(X**2, axis=1) / d)
y1 = - par_a * jnp.exp(-par_b * y1)
y2 = jnp.sum(jnp.cos(par_c * X), axis=1)
y2 = - jnp.exp(y2 / d)
y3 = par_a + jnp.exp(1.)
return y1 + y2 + y3
return func
def demo():
"""A simple demonstration for discretized multivariate analytic function.
We will find the minimum of an implicitly given "d"-dimensional array
having "n" elements in each dimension. The array is obtained from the
discretization of an analytic function.
The result in console should looks like this (note that the exact minimum
of this function is y = 0 and it is reached at the origin of coordinates):
protes > m 1.0e+02 | t 3.190e+00 | y 2.0214e+01
protes > m 2.0e+02 | t 3.203e+00 | y 1.8211e+01
protes > m 5.0e+02 | t 3.216e+00 | y 1.8174e+01
protes > m 6.0e+02 | t 3.220e+00 | y 1.7491e+01
protes > m 7.0e+02 | t 3.224e+00 | y 1.7078e+01
protes > m 8.0e+02 | t 3.228e+00 | y 1.6180e+01
protes > m 1.1e+03 | t 3.238e+00 | y 1.4116e+01
protes > m 1.4e+03 | t 3.250e+00 | y 8.4726e+00
protes > m 2.7e+03 | t 3.293e+00 | y 0.0000e+00
protes > m 1.0e+04 | t 3.534e+00 | y 0.0000e+00 <<< DONE
RESULT | y opt = 0.0000e+00 | time = 3.5459
"""
d = 7 # Dimension
n = 11 # Mode size
m = int(1.E+4) # Number of requests to the objective function
f = func_build(d, n) # Target function, which defines the array elements
t = tpc()
i_opt, y_opt = protes(f, d, n, m, log=True)
print(f'\nRESULT | y opt = {y_opt:-11.4e} | time = {tpc()-t:-10.4f}')
if __name__ == '__main__':
demo()
| 2,206 | 30.084507 | 78 | py |
PROTES | PROTES-main/calc/calc_one.py | import numpy as np
import os
from time import perf_counter as tpc
from jax.config import config
config.update('jax_enable_x64', True)
os.environ['JAX_PLATFORM_NAME'] = 'cpu'
from protes import protes
from teneva_bm import BmQuboKnapAmba
from opti import *
Optis = {
'Our': OptiProtes,
'BS-1': OptiTTOpt,
'BS-2': OptiOptimatt,
'BS-3': OptiOPO,
'BS-4': OptiPSO,
'BS-5': OptiNB,
'BS-6': OptiSPSA,
'BS-7': OptiPortfolio,
}
class Log:
def __init__(self, fpath='log_one.txt'):
self.fpath = fpath
self.is_new = True
if os.path.dirname(self.fpath):
os.makedirs(os.path.dirname(self.fpath), exist_ok=True)
def __call__(self, text):
print(text)
with open(self.fpath, 'w' if self.is_new else 'a') as f:
f.write(text + '\n')
self.is_new = False
def calc_one(m=int(1.E+5), rep=10):
log = Log()
res = {}
bm = BmQuboKnapAmba(d=50, name='P-14').prep()
log(bm.info())
for name, Opti in Optis.items():
res[name] = []
for seed in range(rep):
np.random.seed(seed)
opti = Opti(name=name)
opti.prep(bm.get, bm.d, bm.n, m, is_f_batch=True)
if name == 'Our':
opti.opts(seed=seed)
opti.optimize()
res[name].append(opti.y)
log(opti.info() + f' # {seed+1:-3d}')
log('')
text = '\n\n\n\n--- RESULT ---\n\n'
for name, Opti in Optis.items():
y = np.array(res[name])
text += name + ' '*max(0, 10-len(name)) + ' >>> '
text += f'Mean: {np.mean(y):-12.6e} | Best: {np.min(y):-12.6e}\n'
log(text)
if __name__ == '__main__':
calc_one()
| 1,716 | 22.202703 | 75 | py |
PROTES | PROTES-main/calc/calc.py | import matplotlib as mpl
import numpy as np
import os
import pickle
import sys
from time import perf_counter as tpc
mpl.rcParams.update({
'font.family': 'normal',
'font.serif': [],
'font.sans-serif': [],
'font.monospace': [],
'font.size': 12,
'text.usetex': False,
})
import matplotlib.cm as cm
import matplotlib.pyplot as plt
from matplotlib.ticker import MaxNLocator
import seaborn as sns
sns.set_context('paper', font_scale=2.5)
sns.set_style('white')
sns.mpl.rcParams['legend.frameon'] = 'False'
from jax.config import config
config.update('jax_enable_x64', True)
os.environ['JAX_PLATFORM_NAME'] = 'cpu'
import jax.numpy as jnp
from constr import ind_tens_max_ones
from teneva_bm import *
bms = [
BmFuncAckley(d=7, n=16, name='P-01'),
BmFuncAlpine(d=7, n=16, name='P-02'),
BmFuncExp(d=7, n=16, name='P-03'),
BmFuncGriewank(d=7, n=16, name='P-04'),
BmFuncMichalewicz(d=7, n=16, name='P-05'),
BmFuncPiston(d=7, n=16, name='P-06'),
BmFuncQing(d=7, n=16, name='P-07'),
BmFuncRastrigin(d=7, n=16, name='P-08'),
BmFuncSchaffer(d=7, n=16, name='P-09'),
BmFuncSchwefel(d=7, n=16, name='P-10'),
BmQuboMaxcut(d=50, name='P-11'),
BmQuboMvc(d=50, name='P-12'),
BmQuboKnapQuad(d=50, name='P-13'),
BmQuboKnapAmba(d=50, name='P-14'),
BmOcSimple(d=25, name='P-15'),
BmOcSimple(d=50, name='P-16'),
BmOcSimple(d=100, name='P-17'),
BmOcSimpleConstr(d=25, name='P-18'),
BmOcSimpleConstr(d=50, name='P-19'),
BmOcSimpleConstr(d=100, name='P-20'),
]
BM_FUNC = ['P-01', 'P-02', 'P-03', 'P-04', 'P-05', 'P-06', 'P-07',
'P-08', 'P-09', 'P-10']
BM_QUBO = ['P-11', 'P-12', 'P-13', 'P-14']
BM_OC = ['P-15', 'P-16', 'P-17']
BM_OC_CONSTR = ['P-18', 'P-19', 'P-20']
from opti import *
Optis = {
'Our': OptiProtes,
'BS-1': OptiTTOpt,
'BS-2': OptiOptimatt,
'BS-3': OptiOPO,
'BS-4': OptiPSO,
'BS-5': OptiNB,
'BS-6': OptiSPSA,
'BS-7': OptiPortfolio,
}
class Log:
def __init__(self, fpath='log.txt'):
self.fpath = fpath
self.is_new = True
if os.path.dirname(self.fpath):
os.makedirs(os.path.dirname(self.fpath), exist_ok=True)
def __call__(self, text):
print(text)
with open(self.fpath, 'w' if self.is_new else 'a') as f:
f.write(text + '\n')
self.is_new = False
def calc(m=int(1.E+4), seed=0):
log = Log()
res = {}
for bm in bms:
np.random.seed(seed)
if bm.name in BM_FUNC:
# We carry out a small random shift of the function's domain,
# so that the optimum does not fall into the middle of the domain:
bm = _prep_bm_func(bm)
else:
bm.prep()
log(bm.info())
res[bm.name] = {}
for opti_name, Opti in Optis.items():
np.random.seed(seed)
opti = Opti(name=opti_name)
opti.prep(bm.get, bm.d, bm.n, m, is_f_batch=True)
if bm.name in BM_OC_CONSTR and opti_name == 'Our':
# Problem with constraint for PROTES (we use the initial
# approximation of the special form in this case):
P = ind_tens_max_ones(bm.d, 3, opti.opts_r)
Pl = jnp.array(P[0], copy=True)
Pm = jnp.array(P[1:-1], copy=True)
Pr = jnp.array(P[-1], copy=True)
P = [Pl, Pm, Pr]
opti.opts(P=P)
opti.optimize()
log(opti.info())
res[bm.name][opti.name] = [opti.m_list, opti.y_list, opti.y]
_save(res)
log('\n\n')
def plot(m_min=1.E+0):
plot_opts = {
'P-02': {},
'P-14': {'y_min': 1.8E+3, 'y_max': 3.2E+3, 'inv': True},
'P-16': {'y_min': 1.E-2, 'y_max': 2.E+0},
}
res = _load()
fig, axs = plt.subplots(1, 3, figsize=(24, 8))
plt.subplots_adjust(wspace=0.3)
i = -1
for bm, item in res.items():
if not bm in plot_opts.keys():
continue
i += 1
ax = axs[i]
ax.set_xlabel('Number of requests')
for opti, data in item.items():
m = np.array(data[0], dtype=int)
y = np.array(data[1])
if plot_opts[bm].get('inv'):
y *= -1
j = np.argmax(m >= m_min)
nm = opti
if nm == 'Our':
nm = 'PROTES'
ax.plot(m[j:], y[j:], label=nm,
marker='o', markersize=8, linewidth=6 if nm == 'PROTES' else 3)
_prep_ax(ax, xlog=True, ylog=True, leg=i==0)
ax.set_xlim(m_min, 2.E+4)
if 'y_min' in plot_opts[bm]:
ax.set_ylim(plot_opts[bm]['y_min'], plot_opts[bm]['y_max'])
#yticks = [1.8E+3, 2.0E+3, 2.2E+3, 2.4E+3, 2.6E+3, 2.8E+3, 3.0E+3, 3.2E+3]
#ax.set(yticks=yticks, yticklabels=[int(])
#ax.get_yaxis().get_major_formatter().labelOnlyBase = False
plt.savefig('deps.png', bbox_inches='tight')
def text():
res = _load()
text = '\n\n% ' + '='*50 + '\n' + '% [START] Auto generated data \n\n'
for i, (bm, item) in enumerate(res.items(), 1):
if i in [11, 15, 18]:
text += '\n\\hline\n'
if i == 1:
text += '\\multirow{10}{*}{\\parbox{1.6cm}{Analytic Functions}}\n'
if i == 11:
text += '\\multirow{3}{*}{QUBO}\n'
if i == 15:
text += '\\multirow{3}{*}{Control}\n'
if i == 18:
text += '\\multirow{3}{*}{\parbox{1.67cm}{Control +constr.}}\n'
text += f' & {bm}\n'
vals = np.array([v[2] for v in item.values()])
for v in vals:
if v < 1.E+40:
text += f' & {v:-8.1e}\n'
else:
text += f' & Fail\n'
text += f' \\\\ \n'
text += '\n\n\\hline\n\n'
text += '\n% [END] Auto generated data \n% ' + '='*50 + '\n\n'
print(text)
def _load(fpath='res.pickle'):
with open(fpath, 'rb') as f:
res = pickle.load(f)
return res
def _prep_ax(ax, xlog=False, ylog=False, leg=False, xint=False, xticks=None):
if xlog:
ax.semilogx()
if ylog:
ax.semilogy()
if leg:
ax.legend(loc='upper right', frameon=True)
ax.grid(ls=":")
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
if xint:
ax.xaxis.set_major_locator(MaxNLocator(integer=True))
if xticks is not None:
ax.set(xticks=xticks, xticklabels=xticks)
def _prep_bm_func(bm):
shift = np.random.randn(bm.d) / 10
a_new = bm.a - (bm.b-bm.a) * shift
b_new = bm.b + (bm.b-bm.a) * shift
bm.set_grid(a_new, b_new)
bm.prep()
return bm
def _save(res, fpath='res.pickle'):
with open(fpath, 'wb') as f:
pickle.dump(res, f, protocol=pickle.HIGHEST_PROTOCOL)
if __name__ == '__main__':
mode = sys.argv[1] if len(sys.argv) > 1 else 'calc'
if mode == 'calc':
calc()
elif mode == 'plot':
plot()
elif mode == 'text':
text()
else:
raise ValueError(f'Invalid computation mode "{mode}"')
| 7,201 | 25.477941 | 79 | py |
DeblendingStarfields | DeblendingStarfields-master/deblending_runjingdev/flux_utils.py | import torch
import numpy as np
from torch import nn
from torch import optim
import deblending_runjingdev.utils as utils
from deblending_runjingdev.simulated_datasets_lib import plot_one_star
from deblending_runjingdev.wake_lib import PlanarBackground
class FluxEstimator(nn.Module):
def __init__(self, observed_image, locs, n_stars,
psf,
planar_background_params,
fmin = 1e-3,
alpha = 0.5,
pad = 5,
init_fluxes = None):
super(FluxEstimator, self).__init__()
self.pad = pad
self.fmin = fmin
# observed image is batchsize (or 1) x n_bands x slen x slen
assert len(observed_image.shape) == 4
self.observed_image = observed_image
# batchsize
assert len(n_stars) == locs.shape[0]
batchsize = locs.shape[0]
# get n_bands
assert observed_image.shape[1] == psf.shape[0]
self.n_bands = psf.shape[0]
self.max_stars = locs.shape[1]
assert locs.shape[2] == 2
# boolean for stars being on
self.is_on_array = utils.get_is_on_from_n_stars(n_stars, self.max_stars)
# set star basis
self.slen = observed_image.shape[-1]
self.psf = psf
self.star_basis = \
plot_one_star(self.slen, locs.view(-1, 2), self.psf,
cached_grid = None).view(batchsize,
self.max_stars,
self.n_bands,
self.slen, self.slen) * \
self.is_on_array[:, :, None, None, None]
# get background
assert planar_background_params.shape[0] == self.n_bands
self.init_background_params = planar_background_params
self.planar_background = PlanarBackground(image_slen=self.slen,
init_background_params=self.init_background_params)
self.background = self.planar_background.forward().detach()
if init_fluxes is None:
self._init_fluxes(locs)
else:
self.init_fluxes = init_fluxes
self.init_param = torch.log(self.init_fluxes.clamp(min = self.fmin + 1) - self.fmin)
self.param = nn.Parameter(self.init_param.clone())
self.alpha = alpha
# TODO: pass these as an argument
self.color_mean = 0.3
self.color_var = 0.15**2
self.init_loss = self.get_loss()
def _init_fluxes(self, locs):
batchsize = locs.shape[0]
locs_indx = torch.round(locs * (self.slen - 1)).type(torch.long).clamp(max = self.slen - 2,
min = 2)
sky_subtr_image = self.observed_image - self.background
self.init_fluxes = torch.zeros(batchsize, self.max_stars, self.n_bands)
for i in range(locs.shape[0]):
if self.observed_image.shape[0] == 1:
obs_indx = 0
else:
obs_indx = i
# # take the min over a box of the location
# init_fluxes_i = torch.zeros(9, self.max_stars, self.n_bands)
# n = 0
# for j in [-1, 0, 1]:
# for k in [-1, 0, 1]:
# init_fluxes_i[n] = sky_subtr_image[obs_indx, :,
# locs_indx[i, :, 0] + j,
# locs_indx[i, :, 1] + k].transpose(0, 1)
# n +=1
#
# self.init_fluxes[i] = init_fluxes_i.mean(0)
self.init_fluxes[i] = \
sky_subtr_image[obs_indx, :,
locs_indx[i, :, 0], locs_indx[i, :, 1]].transpose(0, 1)
self.init_fluxes = self.init_fluxes / self.psf.view(self.n_bands, -1).max(1)[0][None, None, :]
def forward(self, train_background = True):
background = self.planar_background.forward()
if not train_background:
background = background.detach()
fluxes = torch.exp(self.param[:, :, :, None, None]) + self.fmin
recon_mean = (fluxes * self.star_basis).sum(1) + background
return recon_mean.clamp(min = 1e-6)
def get_loss(self, train_background = True):
# log likelihood terms
recon_mean = self.forward(train_background)
error = 0.5 * ((self.observed_image - recon_mean)**2 / recon_mean) + 0.5 * torch.log(recon_mean)
assert (~torch.isnan(error)).all()
neg_loglik = error[:, :, self.pad:(self.slen - self.pad), self.pad:(self.slen - self.pad)].sum()
# prior terms
log_flux = self.param + np.log(self.fmin)
flux_prior = - (self.alpha + 1) * (log_flux[:, :, 0] * self.is_on_array).sum()
if self.n_bands > 1:
colors = 2.5 * (log_flux[:, :, 1:] - log_flux[:, :, 0:1]) / np.log(10.)
color_prior = - 0.5 * (colors - self.color_mean)**2 / self.color_var
flux_prior += (color_prior * self.is_on_array.unsqueeze(-1)).sum()
assert ~torch.isnan(flux_prior)
loss = neg_loglik - flux_prior
return loss
def optimize(self,
train_background = True,
max_outer_iter = 10,
max_inner_iter = 20,
tol = 1e-3,
print_every = False):
optimizer = optim.LBFGS(self.parameters(),
max_iter = max_inner_iter,
line_search_fn = 'strong_wolfe')
def closure():
optimizer.zero_grad()
loss = self.get_loss(train_background)
loss.backward()
return loss
old_loss = 1e16
for i in range(max_outer_iter):
loss = optimizer.step(closure)
if print_every:
print(loss)
diff = (loss - old_loss).abs()
if diff < (tol * self.init_loss):
break
old_loss = loss
def return_fluxes(self):
return torch.exp(self.param.data) + self.fmin
| 6,185 | 34.348571 | 104 | py |
DeblendingStarfields | DeblendingStarfields-master/deblending_runjingdev/image_utils.py | import torch
from torch import nn
import deblending_runjingdev.utils as utils
from deblending_runjingdev.which_device import device
# This function copied from
# https://gist.github.com/dem123456789/23f18fd78ac8da9615c347905e64fc78
def _extract_patches_2d(img,patch_shape,step=[1.0,1.0],batch_first=False):
patch_H, patch_W = patch_shape[0], patch_shape[1]
if(img.size(2)<patch_H):
num_padded_H_Top = (patch_H - img.size(2))//2
num_padded_H_Bottom = patch_H - img.size(2) - num_padded_H_Top
padding_H = nn.ConstantPad2d((0,0,num_padded_H_Top,num_padded_H_Bottom),0)
img = padding_H(img)
if(img.size(3)<patch_W):
num_padded_W_Left = (patch_W - img.size(3))//2
num_padded_W_Right = patch_W - img.size(3) - num_padded_W_Left
padding_W = nn.ConstantPad2d((num_padded_W_Left,num_padded_W_Right,0,0),0)
img = padding_W(img)
step_int = [0,0]
step_int[0] = int(patch_H*step[0]) if(isinstance(step[0], float)) else step[0]
step_int[1] = int(patch_W*step[1]) if(isinstance(step[1], float)) else step[1]
patches_fold_H = img.unfold(2, patch_H, step_int[0])
if((img.size(2) - patch_H) % step_int[0] != 0):
patches_fold_H = torch.cat((patches_fold_H,img[:,:,-patch_H:,].permute(0,1,3,2).unsqueeze(2)),dim=2)
patches_fold_HW = patches_fold_H.unfold(3, patch_W, step_int[1])
if((img.size(3) - patch_W) % step_int[1] != 0):
patches_fold_HW = torch.cat((patches_fold_HW,patches_fold_H[:,:,:,-patch_W:,:].permute(0,1,2,4,3).unsqueeze(3)),dim=3)
patches = patches_fold_HW.permute(2,3,0,1,4,5)
patches = patches.reshape(-1,img.size(0),img.size(1),patch_H,patch_W)
if(batch_first):
patches = patches.permute(1,0,2,3,4)
return patches
def tile_images(images, subimage_slen, step):
# images should be batchsize x n_bands x slen x slen
# breaks up a large image into smaller tiles
# of size subimage_slen x subimage_slen
# the output tensor is (batchsize * tiles per image) x n_bands x subimage_slen x subimage_slen
# where tiles per image is (slen - subimage_sel / step)**2
# NOTE: input and output are torch tensors, not numpy arrays
# (need the unfold command from torch)
assert len(images.shape) == 4
image_xlen = images.shape[2]
image_ylen = images.shape[3]
# my tile coords doens't work otherwise ...
assert (image_xlen - subimage_slen) % step == 0
assert (image_ylen - subimage_slen) % step == 0
n_bands = images.shape[1]
for b in range(n_bands):
image_tiles_b = _extract_patches_2d(images[:, b:(b+1), :, :],
patch_shape = [subimage_slen, subimage_slen],
step = [step, step],
batch_first = True).reshape(-1, 1, subimage_slen, subimage_slen)
if b == 0:
image_tiles = image_tiles_b
else:
image_tiles = torch.cat((image_tiles, image_tiles_b), dim = 1)
return image_tiles
def get_tile_coords(image_xlen, image_ylen, subimage_slen, step):
# this function is used in conjuction with tile_images above.
# this records (x0, x1) indices each image image tile comes from
nx_tiles = ((image_xlen - subimage_slen) // step) + 1
ny_tiles = ((image_ylen - subimage_slen) // step) + 1
n_tiles = nx_tiles * ny_tiles
return_coords = lambda i : [(i // ny_tiles) * step,
(i % ny_tiles) * step]
tile_coords = torch.LongTensor([return_coords(i) \
for i in range(n_tiles)]).to(device)
return tile_coords
def get_params_in_tiles(tile_coords, locs, fluxes, slen, subimage_slen,
edge_padding = 0):
# locs are the coordinates in the full image, in coordinates between 0-1
assert torch.all(locs <= 1.)
assert torch.all(locs >= 0.)
n_tiles = tile_coords.shape[0] # number of tiles in a full image
fullimage_batchsize = locs.shape[0] # number of full images
subimage_batchsize = n_tiles * fullimage_batchsize # total number of tiles
max_stars = locs.shape[1]
tile_coords = tile_coords.unsqueeze(0).unsqueeze(2).float()
locs = locs * (slen - 1)
which_locs_array = (locs.unsqueeze(1) > tile_coords + edge_padding - 0.5) & \
(locs.unsqueeze(1) < tile_coords - 0.5 + subimage_slen - edge_padding) & \
(locs.unsqueeze(1) != 0)
which_locs_array = (which_locs_array[:, :, :, 0] * which_locs_array[:, :, :, 1]).float()
tile_locs = \
(which_locs_array.unsqueeze(3) * locs.unsqueeze(1) - \
(tile_coords + edge_padding - 0.5)).view(subimage_batchsize, max_stars, 2) / \
(subimage_slen - 2 * edge_padding)
tile_locs = torch.relu(tile_locs) # by subtracting off, some are negative now; just set these to 0
if fluxes is not None:
assert fullimage_batchsize == fluxes.shape[0]
assert max_stars == fluxes.shape[1]
n_bands = fluxes.shape[2]
tile_fluxes = \
(which_locs_array.unsqueeze(3) * fluxes.unsqueeze(1)).view(subimage_batchsize, max_stars, n_bands)
else:
tile_fluxes = torch.zeros(tile_locs.shape[0], tile_locs.shape[1], 1)
n_bands = 1
# sort locs so all the zeros are at the end
is_on_array = which_locs_array.view(subimage_batchsize, max_stars).type(torch.bool).to(device)
n_stars_per_tile = is_on_array.float().sum(dim = 1).type(torch.LongTensor).to(device)
is_on_array_sorted = utils.get_is_on_from_n_stars(n_stars_per_tile, n_stars_per_tile.max())
indx = is_on_array_sorted.clone()
indx[indx == 1] = torch.nonzero(is_on_array, as_tuple=False)[:, 1]
tile_fluxes = torch.gather(tile_fluxes, dim = 1, index = indx.unsqueeze(2).repeat(1, 1, n_bands)) * \
is_on_array_sorted.float().unsqueeze(2)
tile_locs = torch.gather(tile_locs, dim = 1, index = indx.unsqueeze(2).repeat(1, 1, 2)) * \
is_on_array_sorted.float().unsqueeze(2)
tile_is_on_array = is_on_array_sorted
return tile_locs, tile_fluxes, n_stars_per_tile, tile_is_on_array
def get_full_params_from_tile_params(tile_locs, tile_fluxes,
tile_coords,
full_slen,
stamp_slen,
edge_padding,
# TODO: default is to assume the full image is square
# make this a systematic change.
full_slen2 = None):
# off stars should have tile_locs == 0 and tile_fluxes == 0
assert (tile_fluxes.shape[0] % tile_coords.shape[0]) == 0
batchsize = int(tile_fluxes.shape[0] / tile_coords.shape[0])
assert (tile_fluxes.shape[0] % batchsize) == 0
n_stars_in_batch = int(tile_fluxes.shape[0] * tile_fluxes.shape[1] / batchsize)
n_bands = tile_fluxes.shape[2]
fluxes = tile_fluxes.view(batchsize, n_stars_in_batch, n_bands)
scale = (stamp_slen - 2 * edge_padding)
bias = tile_coords.repeat(batchsize, 1).unsqueeze(1).float() + edge_padding - 0.5
if full_slen2 is None:
locs = (tile_locs * scale + bias) / (full_slen - 1)
else:
locs = (tile_locs * scale + bias) / torch.Tensor([[[full_slen - 1, full_slen2 - 1]]]).to(device)
locs = locs.view(batchsize, n_stars_in_batch, 2)
tile_is_on_bool = (fluxes > 0).any(2).float() # if flux in any band is nonzero
n_stars = torch.sum(tile_is_on_bool > 0, dim = 1)
# puts all the on stars in front
is_on_array_full = utils.get_is_on_from_n_stars(n_stars, n_stars.max())
indx = is_on_array_full.clone()
indx[indx == 1] = torch.nonzero(tile_is_on_bool)[:, 1]
fluxes = torch.gather(fluxes, dim = 1, index = indx.unsqueeze(2).repeat(1, 1, n_bands)) * \
is_on_array_full.float().unsqueeze(2)
locs = torch.gather(locs, dim = 1, index = indx.unsqueeze(2).repeat(1, 1, 2)) * \
is_on_array_full.float().unsqueeze(2)
return locs, fluxes, n_stars
def trim_images(images, edge_padding):
slen = images.shape[-1] - edge_padding
return images[:, :, edge_padding:slen, edge_padding:slen]
| 8,377 | 42.409326 | 126 | py |
DeblendingStarfields | DeblendingStarfields-master/deblending_runjingdev/daophot_catalog_lib.py | import torch
import numpy as np
from deblending_runjingdev.which_device import device
from deblending_runjingdev.sdss_dataset_lib import convert_mag_to_nmgy
from deblending_runjingdev.image_statistics_lib import get_locs_error
def load_daophot_results(data_file,
nelec_per_nmgy,
wcs,
slen = 100,
x0 = 630,
x1 = 310):
daophot_file = np.loadtxt(data_file)
# load desired quantities
daophot_ra = daophot_file[:, 4]
daophot_decl = daophot_file[:, 5]
daophot_mags = daophot_file[:, 22]
# get pixel coordinates
pix_coords = wcs.wcs_world2pix(daophot_ra, daophot_decl, 0, ra_dec_order = True)
# get locations inside our square
which_locs = (pix_coords[1] > x0) & (pix_coords[1] < (x0 + slen - 1)) & \
(pix_coords[0] > x1) & (pix_coords[0] < (x1 + slen - 1))
# scale between zero and ones
daophot_locs0 = (pix_coords[1][which_locs] - x0) / (slen - 1)
daophot_locs1 = (pix_coords[0][which_locs] - x1) / (slen - 1)
daophot_locs = torch.Tensor(np.array([daophot_locs0, daophot_locs1]).transpose()).to(device)
# get fluxes
daophot_fluxes = convert_mag_to_nmgy(daophot_mags[which_locs]) * \
nelec_per_nmgy
daophot_fluxes = torch.Tensor(daophot_fluxes).unsqueeze(1).to(device)
return daophot_locs, daophot_fluxes
def align_daophot_locs(daophot_locs, daophot_fluxes, hubble_locs, hubble_fluxes,
slen = 100,
align_on_logflux = 4.5):
# take only bright stars
log10_fluxes = torch.log10(daophot_fluxes).squeeze()
log10_hubble_fluxes = torch.log10(hubble_fluxes).squeeze()
which_est_brightest = torch.nonzero(log10_fluxes > align_on_logflux).squeeze()
which_hubble_brightest = torch.nonzero(log10_hubble_fluxes > align_on_logflux).squeeze()
_daophot_locs = daophot_locs[which_est_brightest]
_hubble_locs = hubble_locs[which_hubble_brightest]
# match daophot locations to hubble locations
perm = get_locs_error(_daophot_locs, _hubble_locs).argmin(0)
# get error and estimate bias
locs_err = (_daophot_locs- _hubble_locs[perm]) * (slen - 1)
bias_x1 = locs_err[:, 1].median() / (slen - 1)
bias_x0 = locs_err[:, 0].median() / (slen - 1)
# shift by bias
daophot_locs[:, 0] -= bias_x0
daophot_locs[:, 1] -= bias_x1
# after filtering, some locs are less than 0 or
which_filter = (daophot_locs[:, 0] > 0) & (daophot_locs[:, 0] < 1) & \
(daophot_locs[:, 1] > 0) & (daophot_locs[:, 1] < 1)
daophot_locs = daophot_locs[which_filter]
daophot_fluxes = daophot_fluxes[which_filter]
return daophot_locs, daophot_fluxes | 2,859 | 38.178082 | 96 | py |
DeblendingStarfields | DeblendingStarfields-master/deblending_runjingdev/utils.py | import torch
import numpy as np
from torch.distributions import normal, categorical
from deblending_runjingdev.which_device import device
# Functions to work with n_stars
def get_is_on_from_n_stars(n_stars, max_stars):
assert len(n_stars.shape) == 1
batchsize = len(n_stars)
is_on_array = torch.zeros((batchsize, max_stars),
dtype = torch.long, device = device)
for i in range(max_stars):
is_on_array[:, i] = (n_stars > i)
return is_on_array
def get_is_on_from_n_stars_2d(n_stars, max_stars):
# n stars sis n_samples x batchsize
assert not torch.any(torch.isnan(n_stars)); assert torch.all(n_stars >= 0)
assert torch.all(n_stars <= max_stars)
n_samples = n_stars.shape[0]
batchsize = n_stars.shape[1]
is_on_array = torch.zeros((n_samples, batchsize, max_stars),
dtype = torch.long, device = device)
for i in range(max_stars):
is_on_array[:, :, i] = (n_stars > i)
return is_on_array
def get_one_hot_encoding_from_int(z, n_classes):
z = z.long()
assert len(torch.unique(z)) <= n_classes
z_one_hot = torch.zeros(len(z), n_classes, device = device)
z_one_hot.scatter_(1, z.view(-1, 1), 1)
z_one_hot = z_one_hot.view(len(z), n_classes)
return z_one_hot
# sampling functions
def sample_class_weights(class_weights, n_samples = 1):
"""
draw a sample from Categorical variable with
probabilities class_weights
"""
# draw a sample from Categorical variable with
# probabilities class_weights
assert not torch.any(torch.isnan(class_weights));
cat_rv = categorical.Categorical(probs = class_weights)
return cat_rv.sample((n_samples, )).detach().squeeze()
def sample_normal(mean, logvar):
return mean + torch.exp(0.5 * logvar) * torch.randn(mean.shape, device = device)
# log probabilities
def _logit(x, tol = 1e-8):
return torch.log(x + tol) - torch.log(1 - x + tol)
def eval_normal_logprob(x, mu, log_var):
return - 0.5 * log_var - 0.5 * (x - mu)**2 / (torch.exp(log_var)) - 0.5 * np.log(2 * np.pi)
def eval_logitnormal_logprob(x, mu, log_var):
logit_x = _logit(x)
return eval_normal_logprob(logit_x, mu, log_var)
def eval_lognormal_logprob(x, mu, log_var, tol = 1e-8):
log_x = torch.log(x + tol)
return eval_normal_logprob(log_x, mu, log_var)
| 2,374 | 29.448718 | 95 | py |
DeblendingStarfields | DeblendingStarfields-master/deblending_runjingdev/starnet_lib.py | import torch
import torch.nn as nn
import numpy as np
import deblending_runjingdev.image_utils as image_utils
import deblending_runjingdev.utils as utils
from deblending_runjingdev.which_device import device
from itertools import product
from torch.distributions import poisson
class Flatten(nn.Module):
def forward(self, tensor):
return tensor.view(tensor.size(0), -1)
class Normalize2d(nn.Module):
def forward(self, tensor):
assert len(tensor.shape) == 4
mean = tensor.view(tensor.shape[0], tensor.shape[1], -1).mean(2, keepdim = True).unsqueeze(-1)
var = tensor.view(tensor.shape[0], tensor.shape[1], -1).var(2, keepdim = True).unsqueeze(-1)
return (tensor - mean) / torch.sqrt(var + 1e-5)
class StarEncoder(nn.Module):
def __init__(self, slen, ptile_slen, step, edge_padding,
n_bands, max_detections,
n_source_params = None,
momentum = 0.5,
track_running_stats = True,
constrain_logflux_mean = False,
fmin = 0.0):
super(StarEncoder, self).__init__()
# image parameters
self.slen = slen # dimension of full image: we assume its square for now
self.ptile_slen = ptile_slen # dimension of the individual image padded tiles
self.step = step # number of pixels to shift every subimage
self.n_bands = n_bands # number of bands
self.fmin = fmin
self.constrain_logflux_mean = constrain_logflux_mean
self.edge_padding = edge_padding
self.tile_coords = image_utils.get_tile_coords(self.slen, self.slen,
self.ptile_slen, self.step)
self.n_tiles = self.tile_coords.shape[0]
# max number of detections
self.max_detections = max_detections
# convolutional NN paramters
enc_conv_c = 20
enc_kern = 3
enc_hidden = 256
# convolutional NN
self.enc_conv = nn.Sequential(
nn.Conv2d(self.n_bands, enc_conv_c, enc_kern,
stride=1, padding=1),
nn.ReLU(),
nn.Conv2d(enc_conv_c, enc_conv_c, enc_kern,
stride=1, padding=1),
nn.BatchNorm2d(enc_conv_c, momentum=momentum, track_running_stats=track_running_stats),
nn.ReLU(),
nn.Conv2d(enc_conv_c, enc_conv_c, enc_kern,
stride=1, padding=1),
nn.ReLU(),
nn.Conv2d(enc_conv_c, enc_conv_c, enc_kern,
stride=1, padding=1),
nn.BatchNorm2d(enc_conv_c, momentum=momentum, track_running_stats=track_running_stats),
nn.ReLU(),
Flatten()
)
# output dimension of convolutions
conv_out_dim = \
self.enc_conv(torch.zeros(1, n_bands, ptile_slen, ptile_slen)).size(1)
# fully connected layers
self.enc_fc = nn.Sequential(
nn.Linear(conv_out_dim, enc_hidden),
nn.BatchNorm1d(enc_hidden, momentum=momentum, track_running_stats=track_running_stats),
nn.ReLU(),
nn.Linear(enc_hidden, enc_hidden),
nn.BatchNorm1d(enc_hidden, momentum=momentum, track_running_stats=track_running_stats),
nn.ReLU(),
nn.Linear(enc_hidden, enc_hidden),
nn.BatchNorm1d(enc_hidden, momentum=momentum, track_running_stats=track_running_stats),
nn.ReLU(),
)
if n_source_params is None:
self.n_source_params = self.n_bands
# we will take exp for fluxes
self.constrain_source_params = True
else:
self.n_source_params = n_source_params
# these can be anywhere in the reals
self.constrain_source_params = False
self.n_params_per_star = (4 + 2 * self.n_source_params)
self.dim_out_all = \
int(0.5 * self.max_detections * (self.max_detections + 1) * self.n_params_per_star + \
1 + self.max_detections)
self._get_hidden_indices()
self.enc_final = nn.Linear(enc_hidden, self.dim_out_all)
self.log_softmax = nn.LogSoftmax(dim = 1)
############################
# The layers of our neural network
############################
def _forward_to_pooled_hidden(self, image):
# forward to the layer that is shared by all n_stars
log_img = torch.log(image - image.min() + 1.)
h = self.enc_conv(log_img)
return self.enc_fc(h)
def get_var_params_all(self, image_ptiles):
# concatenate all output parameters for all possible n_stars
h = self._forward_to_pooled_hidden(image_ptiles)
return self.enc_final(h)
######################
# Forward modules
######################
def forward(self, image_ptiles, n_stars = None):
# pass through neural network
h = self.get_var_params_all(image_ptiles)
# get probability of n_stars
log_probs_n = self.get_logprob_n_from_var_params(h)
if n_stars is None:
n_stars = torch.argmax(log_probs_n, dim = 1)
# extract parameters
loc_mean, loc_logvar, \
log_flux_mean, log_flux_logvar = \
self.get_var_params_for_n_stars(h, n_stars)
return loc_mean, loc_logvar, \
log_flux_mean, log_flux_logvar, log_probs_n
def get_logprob_n_from_var_params(self, h):
free_probs = h[:, self.prob_indx]
return self.log_softmax(free_probs)
def get_var_params_for_n_stars(self, h, n_stars):
if len(n_stars.shape) == 1:
n_stars = n_stars.unsqueeze(0)
squeeze_output = True
else:
squeeze_output = False
# this class takes in an array of n_stars, n_samples x batchsize
assert h.shape[1] == self.dim_out_all
assert h.shape[0] == n_stars.shape[1]
n_samples = n_stars.shape[0]
batchsize = h.size(0)
_h = torch.cat((h, torch.zeros(batchsize, 1, device = device)), dim = 1)
loc_logit_mean = torch.gather(_h, 1, self.locs_mean_indx_mat[n_stars.transpose(0, 1)].reshape(batchsize, -1))
loc_logvar = torch.gather(_h, 1, self.locs_var_indx_mat[n_stars.transpose(0, 1)].reshape(batchsize, -1))
log_flux_mean = torch.gather(_h, 1, self.fluxes_mean_indx_mat[n_stars.transpose(0, 1)].reshape(batchsize, -1))
log_flux_logvar = torch.gather(_h, 1, self.fluxes_var_indx_mat[n_stars.transpose(0, 1)].reshape(batchsize, -1))
# reshape
loc_logit_mean = loc_logit_mean.reshape(batchsize, n_samples, self.max_detections, 2).transpose(0, 1)
loc_logvar = loc_logvar.reshape(batchsize, n_samples, self.max_detections, 2).transpose(0, 1)
log_flux_mean = log_flux_mean.reshape(batchsize, n_samples, self.max_detections, self.n_source_params).transpose(0, 1)
log_flux_logvar = log_flux_logvar.reshape(batchsize, n_samples, self.max_detections, self.n_source_params).transpose(0, 1)
loc_mean = torch.sigmoid(loc_logit_mean) * (loc_logit_mean != 0).float()
if self.constrain_logflux_mean:
log_flux_mean = log_flux_mean ** 2
if squeeze_output:
return loc_mean.squeeze(0), loc_logvar.squeeze(0), \
log_flux_mean.squeeze(0), log_flux_logvar.squeeze(0)
else:
return loc_mean, loc_logvar, \
log_flux_mean, log_flux_logvar
def _get_hidden_indices(self):
self.locs_mean_indx_mat = \
torch.full((self.max_detections + 1, 2 * self.max_detections),
self.dim_out_all, device = device, dtype = torch.long)
self.locs_var_indx_mat = \
torch.full((self.max_detections + 1, 2 * self.max_detections),
self.dim_out_all, device = device, dtype = torch.long)
self.fluxes_mean_indx_mat = \
torch.full((self.max_detections + 1, self.n_source_params * self.max_detections),
self.dim_out_all, device = device, dtype = torch.long)
self.fluxes_var_indx_mat = \
torch.full((self.max_detections + 1, self.n_source_params * self.max_detections),
self.dim_out_all, device = device, dtype = torch.long)
self.prob_indx = torch.zeros(self.max_detections + 1, device = device).long()
for n_detections in range(1, self.max_detections + 1):
indx0 = int(0.5 * n_detections * (n_detections - 1) * self.n_params_per_star) + \
(n_detections - 1) + 1
indx1 = (2 * n_detections) + indx0
indx2 = (2 * n_detections) * 2 + indx0
# indices for locations
self.locs_mean_indx_mat[n_detections, 0:(2 * n_detections)] = torch.arange(indx0, indx1)
self.locs_var_indx_mat[n_detections, 0:(2 * n_detections)] = torch.arange(indx1, indx2)
indx3 = indx2 + (n_detections * self.n_source_params)
indx4 = indx3 + (n_detections * self.n_source_params)
# indices for fluxes
self.fluxes_mean_indx_mat[n_detections, 0:(n_detections * self.n_source_params)] = torch.arange(indx2, indx3)
self.fluxes_var_indx_mat[n_detections, 0:(n_detections * self.n_source_params)] = torch.arange(indx3, indx4)
self.prob_indx[n_detections] = indx4
######################
# Modules for tiling images and parameters
######################
def get_image_ptiles(self, images, locs = None, fluxes = None,
clip_max_stars = False):
assert len(images.shape) == 4 # should be batchsize x n_bands x slen x slen
assert images.shape[1] == self.n_bands
slen = images.shape[-1]
if not (images.shape[-1] == self.slen):
# get the coordinates
tile_coords = image_utils.get_tile_coords(slen, slen,
self.ptile_slen,
self.step);
else:
# else, use the cached coordinates
tile_coords = self.tile_coords
batchsize = images.shape[0]
image_ptiles = \
image_utils.tile_images(images,
self.ptile_slen,
self.step)
if (locs is not None) and (fluxes is not None):
assert fluxes.shape[2] == self.n_source_params
# get parameters in tiles as well
tile_locs, tile_fluxes, tile_n_stars, tile_is_on_array = \
image_utils.get_params_in_tiles(tile_coords,
locs,
fluxes,
slen,
self.ptile_slen,
self.edge_padding)
# if (self.weights is None) or (images.shape[0] != self.batchsize):
# self.weights = get_weights(n_stars.clamp(max = self.max_detections))
if tile_locs.shape[1] < self.max_detections:
n_pad = self.max_detections - tile_locs.shape[1]
pad_zeros = torch.zeros(tile_locs.shape[0], n_pad, tile_locs.shape[-1], device = device)
tile_locs = torch.cat((tile_locs, pad_zeros), dim = 1)
pad_zeros2 = torch.zeros(tile_fluxes.shape[0], n_pad, tile_fluxes.shape[-1], device = device)
tile_fluxes = torch.cat((tile_fluxes, pad_zeros2), dim = 1)
pad_zeros3 = torch.zeros((tile_fluxes.shape[0], n_pad), dtype = torch.long, device = device)
tile_is_on_array = torch.cat((tile_is_on_array, pad_zeros3), dim = 1)
if clip_max_stars:
tile_n_stars = tile_n_stars.clamp(max = self.max_detections)
tile_locs = tile_locs[:, 0:self.max_detections, :]
tile_fluxes = tile_fluxes[:, 0:self.max_detections, :]
tile_is_on_array = tile_is_on_array[:, 0:self.max_detections]
else:
tile_locs = None
tile_fluxes = None
tile_n_stars = None
tile_is_on_array = None
return image_ptiles, tile_locs, tile_fluxes, \
tile_n_stars, tile_is_on_array
######################
# Modules to sample our variational distribution and get parameters on the full image
######################
def _get_full_params_from_sampled_params(self, tile_locs_sampled,
tile_fluxes_sampled,
slen):
n_samples = tile_locs_sampled.shape[0]
n_image_ptiles = tile_locs_sampled.shape[1]
assert self.n_source_params == tile_fluxes_sampled.shape[-1]
if not (slen == self.slen):
tile_coords = image_utils.get_tile_coords(slen, slen,
self.ptile_slen,
self.step);
else:
tile_coords = self.tile_coords
assert (n_image_ptiles % tile_coords.shape[0]) == 0
locs, fluxes, n_stars = \
image_utils.get_full_params_from_tile_params(
tile_locs_sampled.reshape(n_samples * n_image_ptiles, -1, 2),
tile_fluxes_sampled.reshape(n_samples * n_image_ptiles, -1, self.n_source_params),
tile_coords,
slen,
self.ptile_slen,
self.edge_padding)
return locs, fluxes, n_stars
def sample_star_encoder(self, image,
n_samples = 1,
return_map_n_stars = False,
return_map_star_params = False,
tile_n_stars = None,
return_log_q = False,
training = False,
enumerate_all_n_stars = False):
# our sampling only works for one image at a time at the moment ...
assert image.shape[0] == 1
slen = image.shape[-1]
# the image ptiles
image_ptiles = self.get_image_ptiles(image,
locs = None, fluxes = None)[0]
# pass through NN
h = self.get_var_params_all(image_ptiles)
# get log probs for number of stars
log_probs_nstar_tile = self.get_logprob_n_from_var_params(h);
if not training:
h = h.detach()
log_probs_nstar_tile = log_probs_nstar_tile.detach()
# sample number of stars
if tile_n_stars is None:
if return_map_n_stars:
tile_n_stars_sampled = \
torch.argmax(log_probs_nstar_tile.detach(), dim = 1).repeat(n_samples).view(n_samples, -1)
elif enumerate_all_n_stars:
all_combs = product(range(0, self.max_detections + 1),
repeat = image_ptiles.shape[0])
l = np.array([comb for comb in all_combs])
tile_n_stars_sampled = torch.Tensor(l).type(torch.LongTensor).to(device)
# repeat if necessary
_n_samples = int(np.ceil(n_samples / tile_n_stars_sampled.shape[0]))
tile_n_stars_sampled = tile_n_stars_sampled.repeat(_n_samples, 1)
n_samples = tile_n_stars_sampled.shape[0]
else:
tile_n_stars_sampled = \
utils.sample_class_weights(torch.exp(log_probs_nstar_tile.detach()), n_samples).view(n_samples, -1)
else:
tile_n_stars_sampled = tile_n_stars.repeat(n_samples).view(n_samples, -1)
# print(tile_n_stars_sampled)
tile_n_stars_sampled = tile_n_stars_sampled.detach()
is_on_array = utils.get_is_on_from_n_stars_2d(tile_n_stars_sampled,
self.max_detections)
# get variational parameters: these are on image ptiles
loc_mean, loc_logvar, \
log_flux_mean, log_flux_logvar = \
self.get_var_params_for_n_stars(h, tile_n_stars_sampled)
if return_map_star_params:
loc_sd = torch.zeros(loc_logvar.shape, device=device)
log_flux_sd = torch.zeros(log_flux_logvar.shape, device=device)
else:
loc_sd = torch.exp(0.5 * loc_logvar)
log_flux_sd = torch.exp(0.5 * log_flux_logvar) # .clamp(max = 0.5)
# sample locations
_locs_randn = torch.randn(loc_mean.shape, device=device)
tile_locs_sampled = (loc_mean + _locs_randn * loc_sd) * \
is_on_array.unsqueeze(3).float()
tile_locs_sampled = tile_locs_sampled.clamp(min = 0., max = 1.)
# sample fluxes
_fluxes_randn = torch.randn(log_flux_mean.shape, device=device);
tile_log_flux_sampled = log_flux_mean + _fluxes_randn * log_flux_sd
tile_log_flux_sampled = tile_log_flux_sampled.clamp(max = np.log(1e12))
if self.constrain_source_params:
tile_fluxes_sampled = \
(torch.exp(tile_log_flux_sampled) + self.fmin) * is_on_array.unsqueeze(3).float()
else:
tile_fluxes_sampled = \
tile_log_flux_sampled * is_on_array.unsqueeze(3).float()
# get parameters on full image
locs, fluxes, n_stars = \
self._get_full_params_from_sampled_params(tile_locs_sampled,
tile_fluxes_sampled,
slen)
if return_log_q:
log_q_locs = (utils.eval_normal_logprob(tile_locs_sampled, loc_mean,
loc_logvar) * \
is_on_array.float().unsqueeze(3)).reshape(n_samples, -1).sum(1)
log_q_fluxes = (utils.eval_normal_logprob(tile_log_flux_sampled, log_flux_mean,
log_flux_logvar) * \
is_on_array.float().unsqueeze(3)).reshape(n_samples, -1).sum(1)
log_q_n_stars = torch.gather(log_probs_nstar_tile, 1,
tile_n_stars_sampled.transpose(0, 1)).transpose(0, 1).sum(1)
else:
log_q_locs = None
log_q_fluxes = None
log_q_n_stars = None
return locs, fluxes, n_stars, \
log_q_locs, log_q_fluxes, log_q_n_stars
| 18,839 | 40.045752 | 130 | py |
DeblendingStarfields | DeblendingStarfields-master/deblending_runjingdev/elbo_lib.py | import torch
import numpy as np
import time
from torch import nn
import deblending_runjingdev.starnet_lib as starnet_lib
from deblending_runjingdev.which_device import device
def get_neg_elbo(simulator, full_image, locs, fluxes, n_stars, \
log_q_locs, log_q_fluxes, log_q_n_stars,
mean_stars,
pad = 0,
clamp = None,
uniform_nstars = False):
# get reconstruction
recon = \
simulator.draw_image_from_params(locs, fluxes, n_stars, add_noise = False)
# option to mask outliers
if clamp is not None:
mask_bool = (((full_image - recon) / recon).abs() < clamp).detach().float()
else:
mask_bool = 1
# get log likelihood-
loglik = (- 0.5 * (full_image - recon)**2 / recon - 0.5 * torch.log(recon)) * mask_bool
padm = full_image.shape[-1] - pad
loglik = loglik[:, :, pad:padm, pad:padm]
loglik = loglik.sum(-1).sum(-1).sum(-1)
# get entropy terms
entropy = - log_q_locs - log_q_fluxes - log_q_n_stars
# TODO: need to pass in prior parameters
alpha = 0.5
if uniform_nstars:
log_prior_nstars = 0.0
else:
log_prior_nstars = n_stars * np.log(mean_stars) - torch.lgamma(n_stars.float() + 1)
is_on_fluxes = (fluxes[:, :, 0] > 0.).detach().float()
log_prior_fluxes = (- (alpha + 1) * torch.log(fluxes[:, :, 0] + 1e-16) * \
is_on_fluxes).sum(-1)
if fluxes.shape[-1] > 1:
# TODO assumes two bands
color = -2.5 * (torch.log10(fluxes[:, :, 0] + 1e-16) - \
torch.log10(fluxes[:, :, 1] + 1e-16)) * is_on_fluxes
log_prior_color = (- 0.5 * color**2).sum(-1)
else:
log_prior_color = 0.0
log_prior = log_prior_nstars + log_prior_fluxes + log_prior_color
return -(loglik + entropy + log_prior), -loglik, -log_prior, recon
def eval_star_encoder_on_elbo(full_image, star_encoder, simulator,
n_samples,
mean_stars,
return_map = False,
training = True,
clamp = None,
pad = 0):
# sample
locs_sampled, fluxes_sampled, n_stars_sampled, \
log_q_locs, log_q_fluxes, log_q_n_stars = \
star_encoder.sample_star_encoder(full_image,
n_samples = n_samples,
training = training,
return_map_n_stars = return_map,
return_map_star_params = return_map,
return_log_q = True)
# get elbo
neg_elbo, neg_loglik, neg_logprior, recon = \
get_neg_elbo(simulator, full_image,
locs_sampled, fluxes_sampled, n_stars_sampled.detach(),
log_q_locs, log_q_fluxes, log_q_n_stars, mean_stars,
clamp = clamp,
pad = pad)
return neg_elbo, neg_loglik, recon, log_q_n_stars
def save_elbo_results(full_image, star_encoder, simulator, mean_stars,
n_samples = 100, pad = 0):
neg_elbo, neg_loglik, _, _ = \
eval_star_encoder_on_elbo(full_image, star_encoder,
simulator,
n_samples = n_samples,
mean_stars = mean_stars,
training = False,
pad = pad)
map_neg_elbo, map_neg_loglik, _, _ = \
eval_star_encoder_on_elbo(full_image, star_encoder,
simulator,
n_samples = 1,
mean_stars = mean_stars,
return_map = True,
training = False,
pad = pad)
print('neg elbo: {:.3e}; neg log-likelihood: {:.3e}'.format(neg_elbo.mean(), neg_loglik.mean()))
print('neg elbo (map): {:.3e}; neg log-likelihood (map): {:.3e}'.format(map_neg_elbo.mean(),
map_neg_loglik.mean()))
return np.array([neg_elbo.detach().mean().cpu().numpy(),
neg_loglik.detach().mean().cpu().numpy(),
map_neg_elbo.detach().mean().cpu().numpy(),
map_neg_loglik.detach().mean().cpu().numpy(),
time.time()])
def get_pseudo_loss(full_image, star_encoder, simulator, mean_stars, n_samples,
pad = 0):
# get elbo
neg_elbo, loglik, _, log_q_n_stars = \
eval_star_encoder_on_elbo(full_image, star_encoder, simulator,
n_samples,
mean_stars,
training = True,
pad = pad)
# get control variate
cv, loglik, _, _ = \
eval_star_encoder_on_elbo(full_image, star_encoder, simulator,
n_samples,
mean_stars,
training = False,
pad = pad)
# get pseudo-loss
ps_loss = ((neg_elbo.detach() - cv.detach()) * log_q_n_stars + \
neg_elbo).mean()
return ps_loss
def get_pseudo_loss_all_sum(full_image, star_encoder, simulator,
mean_stars, n_samples,
clamp = None,
pad = 0):
locs_sampled, fluxes_sampled, n_stars_sampled, \
log_q_locs, log_q_fluxes, log_q_n_stars = \
star_encoder.sample_star_encoder(full_image,
return_map_n_stars = False,
return_map_star_params = False,
n_samples = n_samples,
return_log_q = True,
training = True,
enumerate_all_n_stars = True)
# get elbo
neg_elbo, neg_loglik, neg_logprior, recon = \
get_neg_elbo(simulator, full_image,
locs_sampled, fluxes_sampled, n_stars_sampled.detach(),
log_q_locs, log_q_fluxes, log_q_n_stars, mean_stars,
clamp = clamp,
pad = pad)
return (neg_elbo * log_q_n_stars.exp()).sum() / n_samples
def loss_on_true_nstars(full_image, star_encoder, simulator,
mean_stars, n_samples,
true_locs, true_fluxes,
clamp = None,
pad = 0):
image_ptiles, tile_locs, tile_fluxes, \
tile_n_stars, tile_is_on_array = \
star_encoder.get_image_ptiles(full_image,
true_locs.unsqueeze(0),
true_fluxes.unsqueeze(0))
locs_sampled, fluxes_sampled, n_stars_sampled, \
log_q_locs, log_q_fluxes, log_q_n_stars = \
star_encoder.sample_star_encoder(full_image,
return_map_star_params = False,
n_samples = n_samples,
return_log_q = True,
training = True,
tile_n_stars = tile_n_stars);
# get elbo
neg_elbo, neg_loglik, neg_logprior, recon = \
get_neg_elbo(simulator, full_image,
locs_sampled, fluxes_sampled, n_stars_sampled.detach(),
log_q_locs, log_q_fluxes, log_q_n_stars, mean_stars,
clamp = clamp,
pad = pad)
return neg_elbo.mean()
| 8,194 | 40.180905 | 107 | py |
DeblendingStarfields | DeblendingStarfields-master/deblending_runjingdev/wake_lib.py | import torch
import torch.nn as nn
from torch import optim
import numpy as np
from deblending_runjingdev.simulated_datasets_lib import _get_mgrid, plot_multiple_stars
from deblending_runjingdev.psf_transform_lib import PowerLawPSF
import deblending_runjingdev.utils
from deblending_runjingdev.which_device import device
import time
def _sample_image(observed_image, sample_every = 10):
batchsize = observed_image.shape[0]
n_bands = observed_image.shape[1]
slen = observed_image.shape[-1]
samples = torch.zeros(n_bands,
int(np.floor(slen / sample_every)),
int(np.floor(slen / sample_every)))
for i in range(samples.shape[1]):
for j in range(samples.shape[2]):
x0 = i*sample_every
x1 = j*sample_every
samples[:, i, j] = \
observed_image[:, :,\
x0:(x0+sample_every), x1:(x1+sample_every)].reshape(
batchsize,
n_bands, -1).min(2)[0].mean(0)
return samples
def _fit_plane_to_background(background):
assert len(background.shape) == 3
n_bands = background.shape[0]
slen = background.shape[-1]
planar_params = np.zeros((n_bands, 3))
for i in range(n_bands):
y = background[i].flatten().detach().cpu().numpy()
grid = _get_mgrid(slen).detach().cpu().numpy()
x = np.ones((slen**2, 3))
x[:, 1:] = np.array([grid[:, :, 0].flatten(), grid[:, :, 1].flatten()]).transpose()
xtx = np.einsum('ki, kj -> ij', x, x)
xty = np.einsum('ki, k -> i', x, y)
planar_params[i, :] = np.linalg.solve(xtx, xty)
return planar_params
class PlanarBackground(nn.Module):
def __init__(self, init_background_params,
image_slen = 101):
super(PlanarBackground, self).__init__()
assert len(init_background_params.shape) == 2
self.n_bands = init_background_params.shape[0]
self.init_background_params = init_background_params.clone()
self.image_slen = image_slen
# get grid
_mgrid = _get_mgrid(image_slen).to(device)
self.mgrid = torch.stack([_mgrid for i in range(self.n_bands)], dim = 0)
# initial weights
self.params = nn.Parameter(init_background_params.clone())
def forward(self):
return self.params[:, 0][:, None, None] + \
self.params[:, 1][:, None, None] * self.mgrid[:, :, :, 0] + \
self.params[:, 2][:, None, None] * self.mgrid[:, :, :, 1]
class ModelParams(nn.Module):
def __init__(self, observed_image,
init_psf_params,
init_background_params,
pad = 5):
super(ModelParams, self).__init__()
self.pad = pad
# observed image is batchsize (or 1) x n_bands x slen x slen
assert len(observed_image.shape) == 4
self.observed_image = observed_image
self.slen = observed_image.shape[-1]
# get n_bands
assert observed_image.shape[1] == init_psf_params.shape[0]
self.n_bands = init_psf_params.shape[0]
# get psf
self.init_psf_params = init_psf_params
# if image slen is even, add one. psf dimension must be odd
psf_slen = self.slen + ((self.slen % 2) == 0) * 1
self.power_law_psf = PowerLawPSF(self.init_psf_params,
image_slen = psf_slen)
self.init_psf = self.power_law_psf.forward().detach().clone()
self.psf = self.power_law_psf.forward()
# set up initial background parameters
if init_background_params is None:
self._get_init_background()
else:
assert init_background_params.shape[0] == self.n_bands
self.init_background_params = init_background_params
self.planar_background = PlanarBackground(image_slen=self.slen,
init_background_params=self.init_background_params)
self.init_background = self.planar_background.forward().detach()
self.cached_grid = _get_mgrid(observed_image.shape[-1]).to(device)
def _plot_stars(self, locs, fluxes, n_stars, psf):
self.stars = plot_multiple_stars(self.slen, locs, n_stars,
fluxes, psf, self.cached_grid)
def _get_init_background(self, sample_every = 25):
sampled_background = _sample_image(self.observed_image, sample_every)
self.init_background_params = torch.Tensor(_fit_plane_to_background(sampled_background)).to(device)
self.planar_background = PlanarBackground(image_slen=self.slen,
init_background_params=self.init_background_params)
def get_background(self):
return self.planar_background.forward().unsqueeze(0)
def get_psf(self):
return self.power_law_psf.forward()
def get_loss(self, use_cached_stars = False,
locs = None, fluxes = None, n_stars = None):
background = self.get_background()
if not use_cached_stars:
assert locs is not None
assert fluxes is not None
assert n_stars is not None
psf = self.get_psf()
self._plot_stars(locs, fluxes, n_stars, psf)
else:
assert hasattr(self, 'stars')
self.stars = self.stars.detach()
recon_mean = (self.stars + background).clamp(min = 1e-6)
error = 0.5 * ((self.observed_image - recon_mean)**2 / recon_mean) + 0.5 * torch.log(recon_mean)
loss = error[:, :, self.pad:(self.slen - self.pad),
self.pad:(self.slen - self.pad)].reshape(error.shape[0], -1).sum(1)
return recon_mean, loss
def get_wake_loss(image, star_encoder, model_params, n_samples, run_map = False):
locs_sampled, fluxes_sampled, n_stars_sampled = \
star_encoder.sample_star_encoder(image,
return_map_n_stars = run_map,
return_map_star_params = run_map,
n_samples = n_samples)[0:3]
loss = model_params.get_loss(locs = locs_sampled.detach(),
fluxes = fluxes_sampled.detach(),
n_stars = n_stars_sampled.detach())[1].mean()
return loss
def run_wake(image, star_encoder, init_psf_params,
init_background_params,
n_samples,
out_filename,
n_epochs = 100,
lr = 1e-3,
print_every = 20,
run_map = False):
model_params = ModelParams(image,
init_psf_params,
init_background_params)
avg_loss = 0.0
counter = 0
t0 = time.time()
test_losses = []
optimizer = optim.Adam([{'params': model_params.power_law_psf.parameters(),
'lr': lr},
{'params': model_params.planar_background.parameters(),
'lr': lr}])
# optimizer = optim.LBFGS(model_params.parameters(),
# line_search_fn = 'strong_wolfe')
if run_map:
n_samples = 1
for epoch in range(1, n_epochs + 1):
def closure():
optimizer.zero_grad()
loss = get_wake_loss(image, star_encoder, model_params,
n_samples, run_map)
loss.backward()
return loss
optimizer.step(closure)
# avg_loss += loss.detach()
# counter += 1
if ((epoch % print_every) == 0) or (epoch == n_epochs):
eval_loss = get_wake_loss(image, star_encoder, model_params,
n_samples = 1, run_map = True).detach()
elapsed = time.time() - t0
print('[{}] loss: {:0.4f} \t[{:.1f} seconds]'.format(\
epoch, eval_loss, elapsed))
test_losses.append(eval_loss)
np.savetxt(out_filename + '-wake_losses', test_losses)
# reset
avg_loss = 0.0
counter = 0
t0 = time.time()
np.save(out_filename + '-powerlaw_psf_params',
list(model_params.power_law_psf.parameters())[0].data.cpu().numpy())
np.save(out_filename + '-planarback_params',
list(model_params.planar_background.parameters())[0].data.cpu().numpy())
map_loss = get_wake_loss(image, star_encoder, model_params,
n_samples = 1, run_map = True)
return model_params, map_loss
# class FluxParams(nn.Module):
# def __init__(self, init_fluxes, fmin):
# super(FluxParams, self).__init__()
#
# self.fmin = fmin
# self.init_flux_params = self._free_flux_params(init_fluxes)
# self.flux_params = nn.Parameter(self.init_flux_params.clone())
#
# def _free_flux_params(self, fluxes):
# return torch.log(fluxes.clamp(min = self.fmin + 1) - self.fmin)
#
# def get_fluxes(self):
# return torch.exp(self.flux_params) + self.fmin
#
# class EstimateModelParams(nn.Module):
# def __init__(self, observed_image, locs, n_stars,
# init_psf_params,
# init_background_params,
# init_fluxes = None,
# fmin = 1e-3,
# alpha = 0.5,
# pad = 5):
#
# super(EstimateModelParams, self).__init__()
#
# self.pad = pad
# self.alpha = alpha
# self.fmin = fmin
# self.locs = locs
# self.n_stars = n_stars
#
# # observed image is batchsize (or 1) x n_bands x slen x slen
# assert len(observed_image.shape) == 4
#
# self.observed_image = observed_image
# self.slen = observed_image.shape[-1]
#
# # batchsize
# assert len(n_stars) == locs.shape[0]
# self.batchsize = locs.shape[0]
#
# # get n_bands
# assert observed_image.shape[1] == init_psf_params.shape[0]
# self.n_bands = init_psf_params.shape[0]
#
# # get psf
# self.init_psf_params = init_psf_params
# self.power_law_psf = PowerLawPSF(self.init_psf_params,
# image_slen = self.slen)
# self.init_psf = self.power_law_psf.forward().detach()
#
# self.max_stars = locs.shape[1]
# assert locs.shape[2] == 2
#
# # boolean for stars being on
# self.is_on_array = utils.get_is_on_from_n_stars(n_stars, self.max_stars)
#
# # set up initial background parameters
# if init_background_params is None:
# self._get_init_background()
# else:
# assert init_background_params.shape[0] == self.n_bands
# self.init_background_params = init_background_params
#
# self.planar_background = PlanarBackground(image_slen=self.slen,
# init_background_params=self.init_background_params)
#
# self.init_background = self.planar_background.forward().detach()
#
# # initial flux parameters
# if init_fluxes is None:
# self._get_init_fluxes()
# else:
# self.init_fluxes = init_fluxes
#
# self.flux_params_class = FluxParams(self.init_fluxes, self.fmin)
#
# # TODO: pass these as an argument
# self.color_mean = 0.3
# self.color_var = 0.15**2
#
# self.cached_grid = _get_mgrid(observed_image.shape[-1]).to(device)
# self._set_star_basis(self.init_psf)
#
# def _set_star_basis(self, psf):
# self.star_basis = \
# plot_one_star(self.slen, self.locs.view(-1, 2), psf,
# cached_grid = self.cached_grid).view(self.batchsize,
# self.max_stars,
# self.n_bands,
# self.slen, self.slen) * \
# self.is_on_array[:, :, None, None, None]
#
# def _get_init_background(self, sample_every = 25):
# sampled_background = _sample_image(self.observed_image, sample_every)
# self.init_background_params = torch.Tensor(_fit_plane_to_background(sampled_background)).to(device)
#
# def _get_init_fluxes(self):
#
# locs_indx = torch.round(self.locs * (self.slen - 1)).type(torch.long).clamp(max = self.slen - 2,
# min = 2)
#
# sky_subtr_image = self.observed_image - self.init_background
# self.init_fluxes = torch.zeros(self.batchsize, self.max_stars, self.n_bands).to(device)
#
# for i in range(self.locs.shape[0]):
# if self.observed_image.shape[0] == 1:
# obs_indx = 0
# else:
# obs_indx = i
#
# # # take the min over a box of the location
# # init_fluxes_i = torch.zeros(9, self.max_stars, self.n_bands)
# # n = 0
# # for j in [-1, 0, 1]:
# # for k in [-1, 0, 1]:
# # init_fluxes_i[n] = sky_subtr_image[obs_indx, :,
# # locs_indx[i, :, 0] + j,
# # locs_indx[i, :, 1] + k].transpose(0, 1)
# # n +=1
# #
# # self.init_fluxes[i] = init_fluxes_i.mean(0)
#
# self.init_fluxes[i] = \
# sky_subtr_image[obs_indx, :,
# locs_indx[i, :, 0], locs_indx[i, :, 1]].transpose(0, 1)
#
# self.init_fluxes = self.init_fluxes / self.init_psf.view(self.n_bands, -1).max(1)[0][None, None, :]
#
# def get_fluxes(self):
# return self.flux_params_class.get_fluxes()
#
# def get_background(self):
# return self.planar_background.forward().unsqueeze(0)
#
# def get_psf(self):
# return self.power_law_psf.forward()
#
# def get_loss(self, use_cached_star_basis = False):
# background = self.get_background()
# fluxes = self.get_fluxes()
#
# if not use_cached_star_basis:
# psf = self.get_psf()
# self._set_star_basis(psf)
# else:
# self.star_basis = self.star_basis.detach()
#
#
# recon_mean = (fluxes[:, :, :, None, None] * self.star_basis).sum(1) + \
# background
# recon_mean = recon_mean.clamp(min = 1)
#
# error = 0.5 * ((self.observed_image - recon_mean)**2 / recon_mean) + 0.5 * torch.log(recon_mean)
#
# neg_loglik = error[:, :, self.pad:(self.slen - self.pad), self.pad:(self.slen - self.pad)].sum()
# assert ~torch.isnan(neg_loglik)
#
# # prior terms
# log_flux = torch.log(fluxes)
# flux_prior = - (self.alpha + 1) * (log_flux[:, :, 0] * self.is_on_array).sum()
# if self.n_bands > 1:
# colors = 2.5 * (log_flux[:, :, 1:] - log_flux[:, :, 0:1]) / np.log(10.)
# color_prior = - 0.5 * (colors - self.color_mean)**2 / self.color_var
# flux_prior += (color_prior * self.is_on_array.unsqueeze(-1)).sum()
# assert ~torch.isnan(flux_prior)
#
# loss = neg_loglik - flux_prior
#
# return recon_mean, loss
#
# def _run_optimizer(self, optimizer, tol,
# use_cached_star_basis = False,
# max_iter = 20, print_every = False):
#
# def closure():
# optimizer.zero_grad()
# loss = self.get_loss(use_cached_star_basis)[1]
# loss.backward()
#
# return loss
#
# init_loss = optimizer.step(closure)
# old_loss = init_loss.clone()
#
# for i in range(1, max_iter):
# loss = optimizer.step(closure)
#
# if print_every:
# print(loss)
#
# if (old_loss - loss) < (init_loss * tol):
# break
#
# old_loss = loss
#
# if max_iter > 1:
# if i == (max_iter - 1):
# print('warning: max iterations reached')
#
# def optimize_fluxes_background(self, max_iter = 10):
# optimizer1 = optim.LBFGS(list(self.flux_params_class.parameters()) +
# list(self.planar_background.parameters()),
# max_iter = 10,
# line_search_fn = 'strong_wolfe')
#
# self._run_optimizer(optimizer1,
# tol = 1e-3,
# max_iter = max_iter,
# use_cached_star_basis = True)
#
# def run_coordinate_ascent(self, tol = 1e-3,
# max_inner_iter = 10,
# max_outer_iter = 20):
#
# old_loss = 1e16
# init_loss = self.get_loss(use_cached_star_basis = True)[1].detach()
#
# for i in range(max_outer_iter):
# print('\noptimizing fluxes + background. ')
# optimizer1 = optim.LBFGS(list(self.flux_params_class.parameters()) +
# list(self.planar_background.parameters()),
# max_iter = max_inner_iter,
# line_search_fn = 'strong_wolfe')
#
# self._run_optimizer(optimizer1, tol = 1e-3, max_iter = 1,
# use_cached_star_basis = True)
#
# print('loss: ', self.get_loss(use_cached_star_basis = True)[1].detach())
#
# print('\noptimizing psf. ')
# psf_optimizer = optim.LBFGS(list(self.power_law_psf.parameters()),
# max_iter = max_inner_iter,
# line_search_fn = 'strong_wolfe')
#
# self._run_optimizer(psf_optimizer, tol = 1e-3, max_iter = 1,
# use_cached_star_basis = False)
#
# loss = self.get_loss(use_cached_star_basis = False)[1].detach()
# print('loss: ', loss)
#
# if (old_loss - loss) < (tol * init_loss):
# break
#
# old_loss = loss
#
# if max_outer_iter > 1:
# if i == (max_outer_iter - 1):
# print('warning: max iterations reached')
| 18,610 | 36.147705 | 109 | py |
DeblendingStarfields | DeblendingStarfields-master/deblending_runjingdev/plotting_utils.py | import matplotlib.pyplot as plt
import torch
import numpy as np
import deblending_runjingdev.image_utils as image_utils
from deblending_runjingdev.which_device import device
def plot_image(fig, image,
true_locs = None, estimated_locs = None,
vmin = None, vmax = None,
add_colorbar = False,
global_fig = None,
diverging_cmap = False,
color = 'r', marker = 'x', alpha = 1):
# locations are coordinates in the image, on scale from 0 to 1
image = image.cpu()
slen = image.shape[-1]
if diverging_cmap:
if vmax is None:
vmax = image.abs().max()
im = fig.matshow(image, vmin = -vmax, vmax = vmax,
cmap=plt.get_cmap('bwr'))
else:
im = fig.matshow(image, vmin = vmin, vmax = vmax,
cmap=plt.cm.gray)
if not(true_locs is None):
true_locs = true_locs.cpu()
assert len(true_locs.shape) == 2
assert true_locs.shape[1] == 2
fig.scatter(x = true_locs[:, 1] * (slen - 1),
y = true_locs[:, 0] * (slen - 1),
color = 'b')
if not(estimated_locs is None):
estimated_locs = estimated_locs.cpu()
assert len(estimated_locs.shape) == 2
assert estimated_locs.shape[1] == 2
fig.scatter(x = estimated_locs[:, 1] * (slen - 1),
y = estimated_locs[:, 0] * (slen - 1),
color = color, marker = marker, alpha = alpha)
if add_colorbar:
assert global_fig is not None
global_fig.colorbar(im, ax = fig)
def plot_categorical_probs(log_prob_vec, fig):
n_cat = len(log_prob_vec)
points = [(i, torch.exp(log_prob_vec[i])) for i in range(n_cat)]
for pt in points:
# plot (x,y) pairs.
# vertical line: 2 x,y pairs: (a,0) and (a,b)
fig.plot([pt[0],pt[0]], [0,pt[1]], color = 'blue')
fig.plot(np.arange(n_cat),
torch.exp(log_prob_vec).detach().numpy(),
'o', markersize = 5, color = 'blue')
def plot_subimage(fig, full_image, full_est_locs, full_true_locs,
x0, x1, patch_slen,
vmin = None, vmax = None,
add_colorbar = False,
global_fig = None,
diverging_cmap = False,
color = 'r', marker = 'x', alpha = 1):
assert len(full_image.shape) == 2
# full_est_locs and full_true_locs are locations in the coordinates of the
# full image, in pixel units, scaled between 0 and 1
# trim image to subimage
image_patch = full_image[x0:(x0 + patch_slen), x1:(x1 + patch_slen)]
# get locations in the subimage
if full_est_locs is not None:
assert torch.all(full_est_locs <= 1)
assert torch.all(full_est_locs >= 0)
_full_est_locs = full_est_locs * (full_image.shape[-1] - 1)
which_est_locs = (_full_est_locs[:, 0] > x0) & \
(_full_est_locs[:, 0] < (x0 + patch_slen - 1)) & \
(_full_est_locs[:, 1] > x1) & \
(_full_est_locs[:, 1] < (x1 + patch_slen - 1))
shift = torch.Tensor([[x0, x1]]).to(device)
est_locs = (_full_est_locs[which_est_locs, :] - shift) / (patch_slen - 1)
else:
est_locs = None
which_est_locs = None
if full_true_locs is not None:
assert torch.all(full_true_locs <= 1)
assert torch.all(full_true_locs >= 0)
_full_true_locs = full_true_locs * (full_image.shape[-1] - 1)
which_true_locs = (_full_true_locs[:, 0] > x0) & \
(_full_true_locs[:, 0] < (x0 + patch_slen - 1)) & \
(_full_true_locs[:, 1] > x1) & \
(_full_true_locs[:, 1] < (x1 + patch_slen - 1))
shift = torch.Tensor([[x0, x1]]).to(device)
true_locs = (_full_true_locs[which_true_locs, :] - shift) / (patch_slen - 1)
else:
true_locs = None
which_true_locs = None
plot_image(fig, image_patch,
true_locs = true_locs,
estimated_locs = est_locs,
vmin = vmin, vmax = vmax,
add_colorbar = add_colorbar,
global_fig = global_fig,
diverging_cmap = diverging_cmap,
color = color, marker = marker, alpha = alpha)
return which_true_locs, which_est_locs
| 4,500 | 35.593496 | 84 | py |
DeblendingStarfields | DeblendingStarfields-master/deblending_runjingdev/sdss_dataset_lib.py | import pathlib
import os
import pickle
import numpy as np
from scipy.interpolate import RegularGridInterpolator
import scipy.stats as stats
import torch
from torch.utils.data import Dataset
from astropy.io import fits
from astropy.wcs import WCS
import matplotlib.pyplot as plt
from deblending_runjingdev.simulated_datasets_lib import _trim_psf
from deblending_runjingdev.flux_utils import FluxEstimator
import deblending_runjingdev.psf_transform_lib as psf_transform_lib
from deblending_runjingdev.wake_lib import _sample_image, _fit_plane_to_background
from deblending_runjingdev.which_device import device
def _get_mgrid2(slen0, slen1):
offset0 = (slen0 - 1) / 2
offset1 = (slen1 - 1) / 2
x, y = np.mgrid[-offset0:(offset0 + 1), -offset1:(offset1 + 1)]
# return torch.Tensor(np.dstack((x, y))) / offset
return torch.Tensor(np.dstack((y, x))) / torch.Tensor([[[offset1, offset0]]])
class SloanDigitalSkySurvey(Dataset):
# this is adapted from
# https://github.com/jeff-regier/celeste_net/blob/935fbaa96d8da01dd7931600dee059bf6dd11292/datasets.py#L10
# to run on a specified run, camcol, field, and band
# returns one 1 x 1489 x 2048 image
def __init__(self, sdssdir = '../sdss_stage_dir/',
run = 3900, camcol = 6, field = 269, bands = [2]):
super(SloanDigitalSkySurvey, self).__init__()
self.sdss_path = pathlib.Path(sdssdir)
self.rcfgs = []
self.bands = bands
# meta data for the run + camcol
pf_file = "photoField-{:06d}-{:d}.fits".format(run, camcol)
camcol_path = self.sdss_path.joinpath(str(run), str(camcol))
pf_path = camcol_path.joinpath(pf_file)
pf_fits = fits.getdata(pf_path)
fieldnums = pf_fits["FIELD"]
fieldgains = pf_fits["GAIN"]
# get desired field
for i in range(len(fieldnums)):
_field = fieldnums[i]
gain = fieldgains[i]
if _field == field:
self.rcfgs.append((run, camcol, field, gain))
self.items = [None] * len(self.rcfgs)
def __len__(self):
return len(self.rcfgs)
def __getitem__(self, idx):
if not self.items[idx]:
self.items[idx] = self.get_from_disk(idx)
return self.items[idx]
def get_from_disk(self, idx):
run, camcol, field, gain = self.rcfgs[idx]
camcol_dir = self.sdss_path.joinpath(str(run), str(camcol))
field_dir = camcol_dir.joinpath(str(field))
image_list = []
background_list = []
nelec_per_nmgy_list = []
calibration_list = []
gain_list = []
cache_path = field_dir.joinpath("cache.pkl")
# if cache_path.exists():
# print('loading cached sdss image from ', cache_path)
# return pickle.load(cache_path.open("rb"))
for b, bl in enumerate("ugriz"):
if not(b in self.bands):
continue
frame_name = "frame-{}-{:06d}-{:d}-{:04d}.fits".format(bl, run, camcol, field)
frame_path = str(field_dir.joinpath(frame_name))
print("loading sdss image from", frame_path)
frame = fits.open(frame_path)
calibration = frame[1].data
nelec_per_nmgy = gain[b] / calibration
(sky_small,) = frame[2].data["ALLSKY"]
(sky_x,) = frame[2].data["XINTERP"]
(sky_y,) = frame[2].data["YINTERP"]
small_rows = np.mgrid[0:sky_small.shape[0]]
small_cols = np.mgrid[0:sky_small.shape[1]]
sky_interp = RegularGridInterpolator((small_rows, small_cols), sky_small, method="nearest")
sky_y = sky_y.clip(0, sky_small.shape[0] - 1)
sky_x = sky_x.clip(0, sky_small.shape[1] - 1)
large_points = np.stack(np.meshgrid(sky_y, sky_x)).transpose()
large_sky = sky_interp(large_points)
large_sky_nelec = large_sky * gain[b]
pixels_ss_nmgy = frame[0].data
pixels_ss_nelec = pixels_ss_nmgy * nelec_per_nmgy
pixels_nelec = pixels_ss_nelec + large_sky_nelec
image_list.append(pixels_nelec)
background_list.append(large_sky_nelec)
gain_list.append(gain[b])
nelec_per_nmgy_list.append(nelec_per_nmgy)
calibration_list.append(calibration)
frame.close()
ret = {'image': np.stack(image_list),
'background': np.stack(background_list),
'nelec_per_nmgy': np.stack(nelec_per_nmgy_list),
'gain': np.stack(gain_list),
'calibration': np.stack(calibration_list)}
pickle.dump(ret, field_dir.joinpath("cache.pkl").open("wb+"))
return ret
def convert_mag_to_nmgy(mag):
return 10**((22.5 - mag) / 2.5)
def convert_nmgy_to_mag(nmgy):
return 22.5 - 2.5 * torch.log10(nmgy)
def load_m2_data(sdss_dir = '../../sdss_stage_dir/',
hubble_dir = '../hubble_data/',
slen = 100,
x0 = 630,
x1 = 310,
f_min = 1000.):
# returns the SDSS image of M2 in the r and i bands
# along with the corresponding Hubble catalog
#####################
# Load SDSS data
#####################
run = 2583
camcol = 2
field = 136
sdss_data = SloanDigitalSkySurvey(sdss_dir,
run = run,
camcol = camcol,
field = field,
# returns the r and i band
bands = [2, 3])
# the full SDSS image, ~1500 x 2000 pixels
sdss_image_full = torch.Tensor(sdss_data[0]['image'])
sdss_background_full = torch.Tensor(sdss_data[0]['background'])
#####################
# load hubble catalog
#####################
hubble_cat_file = hubble_dir + \
'hlsp_acsggct_hst_acs-wfc_ngc7089_r.rdviq.cal.adj.zpt'
print('loading hubble data from ', hubble_cat_file)
HTcat = np.loadtxt(hubble_cat_file, skiprows=True)
# hubble magnitude
hubble_rmag_full = HTcat[:,9]
# right ascension and declination
hubble_ra_full = HTcat[:,21]
hubble_dc_full = HTcat[:,22]
# convert hubble r.a and declination to pixel coordinates
# (0, 0) is top left of sdss_image_full
frame_name = "frame-{}-{:06d}-{:d}-{:04d}.fits".format('r', run, camcol, field)
field_dir = pathlib.Path(sdss_dir).joinpath(str(run), str(camcol), str(field))
frame_path = str(field_dir.joinpath(frame_name))
print('getting sdss coordinates from: ', frame_path)
hdulist = fits.open(str(frame_path))
wcs = WCS(hdulist['primary'].header)
# NOTE: pix_coordinates are (column x row), i.e. pix_coord[0] corresponds to a column
pix_coordinates = \
wcs.wcs_world2pix(hubble_ra_full, hubble_dc_full, 0, ra_dec_order = True)
hubble_locs_full_x0 = pix_coordinates[1] # the row of pixel
hubble_locs_full_x1 = pix_coordinates[0] # the column of pixel
# convert hubble magnitude to n_electron count
# only take r band
nelec_per_nmgy_full = sdss_data[0]['nelec_per_nmgy'][0].squeeze()
which_cols = np.floor(hubble_locs_full_x1 / len(nelec_per_nmgy_full)).astype(int)
hubble_nmgy = convert_mag_to_nmgy(hubble_rmag_full)
hubble_r_fluxes_full = hubble_nmgy * nelec_per_nmgy_full[which_cols]
#####################
# using hubble ground truth locations,
# align i-band with r-band
#####################
frame_name_i = "frame-{}-{:06d}-{:d}-{:04d}.fits".format('i',
run, camcol, field)
frame_path_i = str(field_dir.joinpath(frame_name_i))
print('\n aligning images. \n Getting sdss coordinates from: ', frame_path_i)
hdu = fits.open(str(frame_path_i))
wcs_other = WCS(hdu['primary'].header)
# get pixel coords
pix_coordinates_other = wcs_other.wcs_world2pix(hubble_ra_full,
hubble_dc_full, 0,
ra_dec_order = True)
# estimate the amount to shift
shift_x0 = np.median(hubble_locs_full_x0 - pix_coordinates_other[1]) / (sdss_image_full.shape[-2] - 1)
shift_x1 = np.median(hubble_locs_full_x1 - pix_coordinates_other[0]) / (sdss_image_full.shape[-1] - 1)
shift = torch.Tensor([[[[shift_x1, shift_x0 ]]]]) * 2
# align image
grid = _get_mgrid2(sdss_image_full.shape[-2],
sdss_image_full.shape[-1]).unsqueeze(0) - shift
sdss_image_full[1] = \
torch.nn.functional.grid_sample(sdss_image_full[1].unsqueeze(0).unsqueeze(0),
grid, align_corners=True).squeeze()
##################
# Filter to desired subimage
##################
print('\n returning image at x0 = {}, x1 = {}'.format(x0, x1))
which_locs = (hubble_locs_full_x0 > x0) & (hubble_locs_full_x0 < (x0 + slen - 1)) & \
(hubble_locs_full_x1 > x1) & (hubble_locs_full_x1 < (x1 + slen - 1))
# just a subset
sdss_image = sdss_image_full[:, x0:(x0 + slen), x1:(x1 + slen)].to(device)
sdss_background = sdss_background_full[:, x0:(x0 + slen), x1:(x1 + slen)].to(device)
locs = np.array([hubble_locs_full_x0[which_locs] - x0,
hubble_locs_full_x1[which_locs] - x1]).transpose()
hubble_r_fluxes = torch.Tensor(hubble_r_fluxes_full[which_locs])
hubble_locs = torch.Tensor(locs) / (slen - 1)
hubble_fluxes = torch.stack([hubble_r_fluxes,
hubble_r_fluxes]).transpose(0, 1)
# filter by bright stars only
which_bright = hubble_fluxes[:, 0] > f_min
hubble_locs = hubble_locs[which_bright].to(device)
hubble_fluxes = hubble_fluxes[which_bright].to(device)
return sdss_image, sdss_background, \
hubble_locs, hubble_fluxes, \
sdss_data, wcs
| 10,043 | 37.482759 | 110 | py |
DeblendingStarfields | DeblendingStarfields-master/deblending_runjingdev/psf_transform_lib.py | import torch
import torch.nn as nn
from torch.nn.functional import unfold, softmax, pad
from astropy.io import fits
import deblending_runjingdev.image_utils as image_utils
from deblending_runjingdev.utils import eval_normal_logprob
from deblending_runjingdev.simulated_datasets_lib import _get_mgrid, plot_multiple_stars
from deblending_runjingdev.which_device import device
#######################
# Convolutional PSF transform
########################
class PsfLocalTransform(nn.Module):
def __init__(self, psf,
image_slen = 101,
kernel_size = 3,
init_bias = 5):
super(PsfLocalTransform, self).__init__()
assert len(psf.shape) == 3
self.n_bands = psf.shape[0]
assert psf.shape[1] == psf.shape[2]
self.psf_slen = psf.shape[-1]
# only implemented for this case atm
assert image_slen > psf.shape[1]
assert (image_slen % 2) == 1
assert (psf.shape[1] % 2) == 1
self.image_slen = image_slen
self.kernel_size = kernel_size
self.psf = psf.unsqueeze(0)
self.tile_psf()
# for renormalizing the PSF
self.normalization = psf.view(self.n_bands, -1).sum(1)
# initializtion
init_weight = torch.zeros(self.psf_slen ** 2, self.n_bands,\
kernel_size ** 2)
init_weight[:, :, 4] = init_bias
self.weight = nn.Parameter(init_weight)
def tile_psf(self):
psf_unfolded = unfold(self.psf,
kernel_size = self.kernel_size,
padding = (self.kernel_size - 1) // 2).squeeze(0).transpose(0, 1)
self.psf_tiled = psf_unfolded.view(psf_unfolded.shape[0], self.n_bands,
self.kernel_size**2)
def apply_weights(self, w):
tile_psf_transformed = torch.sum(w * self.psf_tiled, dim = 2).transpose(0, 1)
return tile_psf_transformed.view(self.n_bands, self.psf_slen,
self.psf_slen)
def forward(self):
weights_constrained = torch.nn.functional.softmax(self.weight, dim = 2)
psf_transformed = self.apply_weights(weights_constrained)
# TODO: this is experimental
# which_center = (self.psf.squeeze(0) > 1e-2).float()
# psf_transformed = psf_transformed * which_center + \
# (1 - which_center) * self.psf.squeeze(0)
# pad psf for full image
l_pad = (self.image_slen - self.psf_slen) // 2
psf_image = pad(psf_transformed, (l_pad, ) * 4)
psf_image_normalization = psf_image.view(self.n_bands, -1).sum(1)
return psf_image * (self.normalization / psf_image_normalization).unsqueeze(-1).unsqueeze(-1)
########################
# function for Power law PSF
########################
def get_psf_params(psfield_fit_file, bands):
psfield = fits.open(psfield_fit_file)
psf_params = torch.zeros(len(bands), 6)
for i in range(len(bands)):
band = bands[i]
sigma1 = psfield[6].data["psf_sigma1"][0][band] ** 2
sigma2 = psfield[6].data["psf_sigma2"][0][band] ** 2
sigmap = psfield[6].data["psf_sigmap"][0][band] ** 2
beta = psfield[6].data["psf_beta"][0][band]
b = psfield[6].data["psf_b"][0][band]
p0 = psfield[6].data["psf_p0"][0][band]
# I think these parameters are constrained to be positive
# take log; we will take exp later
psf_params[i] = torch.log(torch.Tensor([sigma1, sigma2, sigmap,
beta, b, p0]))
return psf_params
def psf_fun(r, sigma1, sigma2, sigmap, beta, b, p0):
term1 = torch.exp(-r**2 / (2 * sigma1))
term2 = b * torch.exp(-r**2 / (2 * sigma2))
term3 = p0 * (1 + r**2 / (beta * sigmap))**(-beta / 2)
return (term1 + term2 + term3) / (1 + b + p0)
def get_psf(slen, psf_params, cached_radii_grid = None):
assert (slen % 2) == 1
if cached_radii_grid is None:
grid = simulated_datasets_lib._get_mgrid(slen) * (slen - 1) / 2
radii_grid = (grid**2).sum(2).sqrt()
else:
radii_grid = cached_radii_grid
_psf_params = torch.exp(psf_params)
return psf_fun(radii_grid, _psf_params[0], _psf_params[1], _psf_params[2],
_psf_params[3], _psf_params[4], _psf_params[5])
class PowerLawPSF(nn.Module):
def __init__(self, init_psf_params,
psf_slen = 25,
image_slen = 101):
super(PowerLawPSF, self).__init__()
assert len(init_psf_params.shape) == 2
assert image_slen % 2 == 1, 'image_slen must be odd'
self.n_bands = init_psf_params.shape[0]
self.init_psf_params = init_psf_params.clone()
self.psf_slen = psf_slen
self.image_slen = image_slen
grid = _get_mgrid(self.psf_slen) * (self.psf_slen - 1) / 2
self.cached_radii_grid = (grid**2).sum(2).sqrt().to(device)
# initial weights
self.params = nn.Parameter(init_psf_params.clone())
# get normalization_constant
self.normalization_constant = torch.zeros(self.n_bands)
for i in range(self.n_bands):
self.normalization_constant[i] = \
1 / get_psf(self.psf_slen,
self.init_psf_params[i],
self.cached_radii_grid).sum()
# initial psf
self.init_psf = self.get_psf()
# TODO: I belive this init_psf_sum is vacuous (should just be one)
self.init_psf_sum = self.init_psf.sum(-1).sum(-1).detach()
def get_psf(self):
# TODO make the psf function vectorized ...
for i in range(self.n_bands):
_psf = get_psf(self.psf_slen, self.params[i], self.cached_radii_grid) * \
self.normalization_constant[i]
if i == 0:
psf = _psf.unsqueeze(0)
else:
psf = torch.cat((psf, _psf.unsqueeze(0)))
assert (psf >= 0).all()
return psf
def forward(self):
psf = self.get_psf()
psf = psf * (self.init_psf_sum / psf.sum(-1).sum(-1)).unsqueeze(-1).unsqueeze(-1)
l_pad = (self.image_slen - self.psf_slen) // 2
return pad(psf, (l_pad, ) * 4)
| 6,346 | 32.582011 | 101 | py |
DeblendingStarfields | DeblendingStarfields-master/deblending_runjingdev/simulated_datasets_lib.py | import numpy as np
import scipy.stats as stats
import torch
from torch.utils.data import Dataset, DataLoader
import torch
import torch.nn.functional as F
import deblending_runjingdev.utils as utils
from deblending_runjingdev.which_device import device
def _trim_psf(psf, slen):
# crop the psf to length slen x slen
# centered at the middle
assert len(psf.shape) == 3
n_bands = psf.shape[0]
# dimension of the psf should be odd
psf_slen = psf.shape[2]
assert psf.shape[1] == psf_slen
assert (psf_slen % 2) == 1
assert (slen % 2) == 1
psf_center = (psf_slen - 1) / 2
assert psf_slen >= slen
r = np.floor(slen / 2)
l_indx = int(psf_center - r)
u_indx = int(psf_center + r + 1)
return psf[:, l_indx:u_indx, l_indx:u_indx]
def _expand_psf(psf, slen):
# pad the psf with zeros so that it is size slen
# first dimension of psf is number of bands
assert len(psf.shape) == 3
n_bands = psf.shape[0]
psf_slen = psf.shape[2]
assert psf.shape[1] == psf_slen
# dimension of psf should be odd
assert (psf_slen % 2) == 1
# sim for slen
assert (slen % 2) == 1
assert psf_slen <= slen
psf_expanded = torch.zeros((n_bands, slen, slen))
offset = int((slen - psf_slen) / 2)
psf_expanded[:, offset:(offset+psf_slen), offset:(offset+psf_slen)] = psf
return psf_expanded
def _get_mgrid(slen):
offset = (slen - 1) / 2
x, y = np.mgrid[-offset:(offset + 1), -offset:(offset + 1)]
# return torch.Tensor(np.dstack((x, y))) / offset
return (torch.Tensor(np.dstack((y, x))) / offset).to(device)
def plot_one_star(slen, locs, psf, cached_grid = None):
# locs is batchsize x 2: takes values between 0 and 1
# psf is a slen x slen tensor
# assert torch.all(locs <= 1)
# assert torch.all(locs >= 0)
# slen = psf.shape[-1]
# assert slen == psf.shape[-2]
assert len(psf.shape) == 3
n_bands = psf.shape[0]
batchsize = locs.shape[0]
assert locs.shape[1] == 2
if cached_grid is None:
grid = _get_mgrid(slen)
else:
assert cached_grid.shape[0] == slen
assert cached_grid.shape[1] == slen
grid = cached_grid
# scale locs so they take values between -1 and 1 for grid sample
locs = (locs - 0.5) * 2
locs = locs.index_select(1, torch.tensor([1, 0], device=device))
grid_loc = grid.view(1, slen, slen, 2) - locs.view(batchsize, 1, 1, 2)
star = F.grid_sample(psf.expand(batchsize, n_bands, -1, -1), grid_loc, align_corners = True)
# normalize so one star still sums to 1
return star
def plot_multiple_stars(slen, locs, n_stars, fluxes, psf, cached_grid = None):
# locs is batchsize x max_stars x x_loc x y_loc
# fluxes is batchsize x n_bands x max_stars
# n_stars is length batchsize
# psf is a n_bands x slen x slen tensor
n_bands = psf.shape[0]
batchsize = locs.shape[0]
max_stars = locs.shape[1]
assert locs.shape[2] == 2
assert fluxes.shape[0] == locs.shape[0]
assert fluxes.shape[1] == locs.shape[1]
assert fluxes.shape[2] == n_bands
assert len(n_stars) == batchsize
assert len(n_stars.shape) == 1
assert max(n_stars) <= locs.shape[1]
if cached_grid is None:
grid = _get_mgrid(slen)
else:
assert cached_grid.shape[0] == slen
assert cached_grid.shape[1] == slen
grid = cached_grid
stars = 0.
for n in range(max(n_stars)):
is_on_n = (n < n_stars).float()
locs_n = locs[:, n, :] * is_on_n.unsqueeze(1)
fluxes_n = fluxes[:, n, :]
one_star = plot_one_star(slen, locs_n, psf, cached_grid = grid)
stars += one_star * (is_on_n.unsqueeze(1) * fluxes_n).view(batchsize, n_bands, 1, 1)
return stars
def _draw_pareto(f_min, alpha, shape):
uniform_samples = torch.rand(shape, device = device)
return f_min / (1 - uniform_samples)**(1 / alpha)
def _draw_pareto_maxed(f_min, f_max, alpha, shape):
# draw pareto conditioned on being less than f_max
pareto_samples = _draw_pareto(f_min, alpha, shape)
while torch.any(pareto_samples > f_max):
indx = pareto_samples > f_max
pareto_samples[indx] = \
_draw_pareto(f_min, alpha, torch.sum(indx))
return pareto_samples
class StarSimulator:
def __init__(self, psf, slen, background, transpose_psf):
assert len(psf.shape) == 3
assert len(background.shape) == 3
assert background.shape[0] == psf.shape[0]
assert background.shape[1] == slen
assert background.shape[2] == slen
self.background = background
self.n_bands = psf.shape[0]
self.psf_og = psf
# side length of the image
self.slen = slen
# get psf shape to match image shape
# if slen is even, we still make psf dimension odd.
# otherwise, the psf won't have a peak in the center pixel.
_slen = slen + ((slen % 2) == 0) * 1
if (slen >= self.psf_og.shape[-1]):
self.psf = _expand_psf(self.psf_og, _slen).to(device)
else:
self.psf = _trim_psf(self.psf_og, _slen).to(device)
if transpose_psf:
self.psf = self.psf.transpose(1, 2)
self.cached_grid = _get_mgrid(slen)
def draw_image_from_params(self, locs, fluxes, n_stars,
add_noise = True):
images_mean = \
plot_multiple_stars(self.slen, locs, n_stars, fluxes,
self.psf, self.cached_grid) + \
self.background[None, :, :, :]
# add noise
if add_noise:
if torch.any(images_mean <= 0):
print('warning: image mean less than 0')
images_mean = images_mean.clamp(min = 1.0)
images = torch.sqrt(images_mean) * torch.randn(images_mean.shape, device = device) + \
images_mean
else:
images = images_mean
return images
class StarsDataset(Dataset):
def __init__(self, psf, n_images,
slen,
max_stars,
mean_stars,
min_stars,
f_min,
f_max,
background,
alpha,
draw_poisson = True,
transpose_psf = False,
add_noise = True):
self.slen = slen
self.n_bands = psf.shape[0]
self.simulator = StarSimulator(psf, slen, background, transpose_psf)
self.background = background[None, :, :, :]
# image parameters
self.max_stars = max_stars
self.mean_stars = mean_stars
self.min_stars = min_stars
self.add_noise = add_noise
self.draw_poisson = draw_poisson
# prior parameters
self.f_min = f_min
self.f_max = f_max
self.alpha = alpha
# dataset parameters
self.n_images = n_images
# set data
self.set_params_and_images()
def __len__(self):
return self.n_images
def __getitem__(self, idx):
return {'image': self.images[idx],
'background': self.background[0],
'locs': self.locs[idx],
'fluxes': self.fluxes[idx],
'n_stars': self.n_stars[idx]}
def draw_batch_parameters(self, batchsize, return_images = True):
if self.draw_poisson:
# draw number of stars
p = torch.full((1,), self.mean_stars, device=device, dtype = torch.float)
m = torch.distributions.Poisson(p)
n_stars = m.sample((batchsize, ))
n_stars = n_stars.clamp(max = self.max_stars,
min = self.min_stars).long().squeeze(-1)
else:
# TODODODO
assert 1 == 2, 'foo'
is_on_array = utils.get_is_on_from_n_stars(n_stars, self.max_stars)
# draw locations
locs = torch.rand((batchsize, self.max_stars, 2), device = device) * \
is_on_array.unsqueeze(2).float()
# draw fluxes
base_fluxes = _draw_pareto_maxed(self.f_min, self.f_max, alpha = self.alpha,
shape = (batchsize, self.max_stars))
if self.n_bands > 1:
# TODO: we may need to change the color priors
colors = torch.randn(batchsize, self.max_stars, self.n_bands - 1,
device = device) * 1.0
_fluxes = 10**( colors / 2.5) * base_fluxes.unsqueeze(2)
fluxes = torch.cat((base_fluxes.unsqueeze(2), _fluxes), dim = 2) * \
is_on_array.unsqueeze(2).float()
else:
fluxes = (base_fluxes * is_on_array.float()).unsqueeze(2)
if return_images:
images = self.simulator.draw_image_from_params(locs, fluxes, n_stars,
add_noise = self.add_noise)
return locs, fluxes, n_stars, images
else:
return locs, fluxes, n_stars
def set_params_and_images(self):
self.locs, self.fluxes, self.n_stars, self.images = \
self.draw_batch_parameters(self.n_images, return_images = True)
def load_dataset_from_params(psf, data_params,
n_images,
background,
draw_poisson = True,
transpose_psf = False,
add_noise = True):
# data parameters
slen = data_params['slen']
f_min = data_params['f_min']
f_max = data_params['f_max']
alpha = data_params['alpha']
max_stars = data_params['max_stars']
mean_stars = data_params['mean_stars']
min_stars = data_params['min_stars']
# draw data
return StarsDataset(psf,
n_images,
slen = slen,
f_min=f_min,
f_max=f_max,
max_stars = max_stars,
mean_stars = mean_stars,
min_stars = min_stars,
alpha = alpha,
background = background,
draw_poisson = draw_poisson,
transpose_psf = transpose_psf,
add_noise = add_noise)
| 10,604 | 30.751497 | 98 | py |
DeblendingStarfields | DeblendingStarfields-master/deblending_runjingdev/image_statistics_lib.py | import torch
import numpy as np
from deblending_runjingdev.sdss_dataset_lib import convert_nmgy_to_mag
from deblending_runjingdev.which_device import device
def filter_params(locs, fluxes, slen, pad = 5):
assert len(locs.shape) == 2
if fluxes is not None:
assert len(fluxes.shape) == 1
assert len(fluxes) == len(locs)
_locs = locs * (slen - 1)
which_params = (_locs[:, 0] > pad) & (_locs[:, 0] < (slen - pad)) & \
(_locs[:, 1] > pad) & (_locs[:, 1] < (slen - pad))
if fluxes is not None:
return locs[which_params], fluxes[which_params]
else:
return locs[which_params], None
def get_locs_error(locs, true_locs):
# get matrix of Linf error in locations
# truth x estimated
return torch.abs(locs.unsqueeze(0) - true_locs.unsqueeze(1)).max(2)[0]
def get_fluxes_error(fluxes, true_fluxes):
# get matrix of l1 error in log flux
# truth x estimated
return torch.abs(torch.log10(fluxes).unsqueeze(0) - \
torch.log10(true_fluxes).unsqueeze(1))
def get_mag_error(mags, true_mags):
# get matrix of l1 error in magnitude
# truth x estimated
return torch.abs(mags.unsqueeze(0) - \
true_mags.unsqueeze(1))
def get_summary_stats(est_locs, true_locs, slen, est_fluxes, true_fluxes,
nelec_per_nmgy,
pad = 5, slack = 0.5):
# remove border
est_locs, est_fluxes = filter_params(est_locs, est_fluxes, slen, pad)
true_locs, true_fluxes = filter_params(true_locs, true_fluxes, slen, pad)
if (est_fluxes is None) or (true_fluxes is None):
mag_error = 0.
else:
# convert to magnitude
est_mags = convert_nmgy_to_mag(est_fluxes / nelec_per_nmgy)
true_mags = convert_nmgy_to_mag(true_fluxes / nelec_per_nmgy)
mag_error = get_mag_error(est_mags, true_mags)
locs_error = get_locs_error(est_locs * (slen - 1), true_locs * (slen - 1))
tpr_bool = torch.any((locs_error < slack) * (mag_error < slack), dim = 1).float()
ppv_bool = torch.any((locs_error < slack) * (mag_error < slack), dim = 0).float()
return tpr_bool.mean(), ppv_bool.mean(), tpr_bool, ppv_bool
def get_tpr_vec(est_locs, true_locs, slen, est_fluxes, true_fluxes,
nelec_per_nmgy,
pad = 5, mag_vec = None):
est_locs, est_fluxes = filter_params(est_locs, est_fluxes, slen, pad)
true_locs, true_fluxes = filter_params(true_locs, true_fluxes, slen, pad)
# convert to magnitude
true_mags = convert_nmgy_to_mag(true_fluxes / nelec_per_nmgy)
if mag_vec is None:
percentiles = np.linspace(0, 1, 11) * 100
mag_vec = np.percentile(true_mags.cpu(), percentiles)
mag_vec = torch.Tensor(mag_vec).to(device)
tpr_vec = np.zeros(len(mag_vec) - 1)
counts_vec = np.zeros(len(mag_vec) - 1)
for i in range(len(mag_vec) - 1):
which_true = (true_mags > mag_vec[i]) & (true_mags < mag_vec[i + 1])
counts_vec[i] = torch.sum(which_true)
tpr_vec[i] = \
get_summary_stats(est_locs, true_locs[which_true], slen,
est_fluxes, true_fluxes[which_true],
nelec_per_nmgy, pad = pad)[0]
return tpr_vec, mag_vec, counts_vec
def get_ppv_vec(est_locs, true_locs, slen, est_fluxes, true_fluxes,
nelec_per_nmgy,
pad = 5, mag_vec = None):
est_locs, est_fluxes = filter_params(est_locs, est_fluxes, slen, pad)
true_locs, true_fluxes = filter_params(true_locs, true_fluxes, slen, pad)
est_mags = convert_nmgy_to_mag(est_fluxes / nelec_per_nmgy)
if mag_vec is None:
percentiles = np.linspace(0, 1, 11) * 100
mag_vec = np.percentile(est_mags.cpu(), percentiles)
mag_vec = torch.Tensor(mag_vec).to(device)
ppv_vec = np.zeros(len(mag_vec) - 1)
counts_vec = np.zeros(len(mag_vec) - 1)
for i in range(len(mag_vec) - 1):
which_est = (est_mags > mag_vec[i]) & (est_mags < mag_vec[i + 1])
counts_vec[i] = torch.sum(which_est)
if torch.sum(which_est) == 0:
continue
ppv_vec[i] = \
get_summary_stats(est_locs[which_est], true_locs, slen,
est_fluxes[which_est], true_fluxes,
nelec_per_nmgy, pad = pad)[1]
return ppv_vec, mag_vec, counts_vec
def get_l1_error(est_locs, true_locs, slen, est_fluxes, true_fluxes, pad = 5):
est_locs, est_fluxes = filter_params(est_locs, est_fluxes, slen, pad)
true_locs, true_fluxes = filter_params(true_locs, true_fluxes, slen, pad)
fluxes_error = get_fluxes_error(est_fluxes, true_fluxes)
locs_error = get_locs_error(est_locs * (slen - 1), true_locs * (slen - 1))
ppv_bool = torch.any((locs_error < 0.5) * (fluxes_error < 0.5), dim = 0).float()
locs_matched_error = locs_error[:, ppv_bool == 1]
fluxes_matched_error = fluxes_error[:, ppv_bool == 1]
seq_tensor = torch.Tensor([i for i in range(fluxes_matched_error.shape[1])]).type(torch.long)
locs_error, which_match = locs_matched_error.min(0)
return locs_error, fluxes_matched_error[which_match, seq_tensor]
| 5,243 | 35.416667 | 97 | py |
DeblendingStarfields | DeblendingStarfields-master/deblending_runjingdev/sleep_lib.py | import torch
import numpy as np
import math
import time
from torch.distributions import normal
from torch.nn import CrossEntropyLoss
import deblending_runjingdev.utils as utils
import deblending_runjingdev.elbo_lib as elbo_lib
from deblending_runjingdev.which_device import device
from itertools import permutations
def isnan(x):
return x != x
#############################
# functions to get loss for training the counter
############################
def get_categorical_loss(log_probs, one_hot_encoding):
assert torch.all(log_probs <= 0)
assert log_probs.shape[0] == one_hot_encoding.shape[0]
assert log_probs.shape[1] == one_hot_encoding.shape[1]
return torch.sum(
-log_probs * one_hot_encoding, dim = 1)
def _permute_losses_mat(losses_mat, perm):
batchsize = losses_mat.shape[0]
max_stars = losses_mat.shape[1]
assert perm.shape[0] == batchsize
assert perm.shape[1] == max_stars
return torch.gather(losses_mat, 2, perm.unsqueeze(2)).squeeze()
def get_locs_logprob_all_combs(true_locs, loc_mean, loc_log_var):
batchsize = true_locs.shape[0]
# get losses for locations
_loc_mean = loc_mean.view(batchsize, 1, loc_mean.shape[1], 2)
_loc_log_var = loc_log_var.view(batchsize, 1, loc_mean.shape[1], 2)
_true_locs = true_locs.view(batchsize, true_locs.shape[1], 1, 2)
# this is to return a large error if star is off
_true_locs = _true_locs + (_true_locs == 0).float() * 1e16
# this is batchsize x (max_stars x max_detections)
# the log prob for each observed location x mean
locs_log_probs_all = utils.eval_normal_logprob(_true_locs,
_loc_mean, _loc_log_var).sum(dim = 3)
return locs_log_probs_all
def get_fluxes_logprob_all_combs(true_fluxes, log_flux_mean, log_flux_log_var):
batchsize = true_fluxes.shape[0]
n_bands = true_fluxes.shape[2]
_log_flux_mean = log_flux_mean.view(batchsize, 1, log_flux_mean.shape[1], n_bands)
_log_flux_log_var = log_flux_log_var.view(batchsize, 1, log_flux_mean.shape[1], n_bands)
_true_fluxes = true_fluxes.view(batchsize, true_fluxes.shape[1], 1, n_bands)
# this is batchsize x (max_stars x max_detections)
# the log prob for each observed location x mean
flux_log_probs_all = utils.eval_lognormal_logprob(_true_fluxes,
_log_flux_mean, _log_flux_log_var).sum(dim = 3)
return flux_log_probs_all
def _get_log_probs_all_perms(locs_log_probs_all, flux_log_probs_all, is_on_array):
max_detections = flux_log_probs_all.shape[-1]
batchsize = flux_log_probs_all.shape[0]
locs_loss_all_perm = torch.zeros(batchsize,
math.factorial(max_detections),
device = device)
fluxes_loss_all_perm = torch.zeros(batchsize,
math.factorial(max_detections),
device = device)
i = 0
for perm in permutations(range(max_detections)):
locs_loss_all_perm[:, i] = \
(locs_log_probs_all[:, perm, :].diagonal(dim1 = 1, dim2 = 2) * \
is_on_array).sum(1)
fluxes_loss_all_perm[:, i] = \
(flux_log_probs_all[:, perm].diagonal(dim1 = 1, dim2 = 2) * \
is_on_array).sum(1)
i += 1
return locs_loss_all_perm, fluxes_loss_all_perm
def get_min_perm_loss(locs_log_probs_all, flux_log_probs_all, is_on_array):
locs_log_probs_all_perm, fluxes_log_probs_all_perm = \
_get_log_probs_all_perms(locs_log_probs_all, flux_log_probs_all, is_on_array)
locs_loss, indx = torch.min(-locs_log_probs_all_perm, dim = 1)
fluxes_loss = -torch.gather(fluxes_log_probs_all_perm, 1, indx.unsqueeze(1)).squeeze()
return locs_loss, fluxes_loss, indx
def get_params_loss(loc_mean, loc_log_var, \
log_flux_mean, log_flux_log_var, log_probs,
true_locs, true_fluxes, true_is_on_array):
max_detections = log_flux_mean.shape[1]
# this is batchsize x (max_stars x max_detections)
# the log prob for each observed location x mean
locs_log_probs_all = \
get_locs_logprob_all_combs(true_locs,
loc_mean,
loc_log_var)
flux_log_probs_all = \
get_fluxes_logprob_all_combs(true_fluxes, \
log_flux_mean, log_flux_log_var)
locs_loss, fluxes_loss, perm_indx = \
get_min_perm_loss(locs_log_probs_all, flux_log_probs_all, true_is_on_array)
true_n_stars = true_is_on_array.sum(1)
cross_entropy = CrossEntropyLoss(reduction="none").requires_grad_(False)
counter_loss = cross_entropy(log_probs, true_n_stars.long())
loss_vec = (locs_loss * (locs_loss.detach() < 1e6).float() + fluxes_loss + counter_loss)
loss = loss_vec.mean()
return loss, counter_loss, locs_loss, fluxes_loss, perm_indx
def get_inv_kl_loss(star_encoder,
images,
true_locs,
true_fluxes, use_l2_loss = False):
# extract image ptiles
image_ptiles, true_tile_locs, true_tile_fluxes, \
true_tile_n_stars, true_tile_is_on_array = \
star_encoder.get_image_ptiles(images, true_locs, true_fluxes,
clip_max_stars = True)
# get variational parameters on each tile
loc_mean, loc_log_var, \
log_flux_mean, log_flux_log_var, log_probs = \
star_encoder(image_ptiles, true_tile_n_stars)
if use_l2_loss:
loc_log_var = torch.zeros((loc_log_var.shape), device = device)
log_flux_log_var = torch.zeros((log_flux_log_var.shape), device = device)
loss, counter_loss, locs_loss, fluxes_loss, perm_indx = \
get_params_loss(loc_mean, loc_log_var, \
log_flux_mean, log_flux_log_var, log_probs, \
true_tile_locs, true_tile_fluxes,
true_tile_is_on_array.float())
return loss, counter_loss, locs_loss, fluxes_loss, perm_indx, log_probs
def eval_sleep(star_encoder, train_loader,
optimizer = None, train = False):
avg_loss = 0.0
avg_counter_loss = 0.0
avg_locs_loss = 0.0
avg_fluxes_loss = 0.0
for _, data in enumerate(train_loader):
true_fluxes = data['fluxes']
true_locs = data['locs']
images = data['image']
if train:
star_encoder.train()
if optimizer is not None:
optimizer.zero_grad()
else:
star_encoder.eval()
# evaluate log q
loss, counter_loss, locs_loss, fluxes_loss = \
get_inv_kl_loss(star_encoder, images,
true_locs, true_fluxes)[0:4]
if train:
if optimizer is not None:
loss.backward()
optimizer.step()
avg_loss += loss.item() * images.shape[0] / len(train_loader.dataset)
avg_counter_loss += counter_loss.sum().item() / (len(train_loader.dataset) * star_encoder.n_tiles)
avg_fluxes_loss += fluxes_loss.sum().item() / (len(train_loader.dataset) * star_encoder.n_tiles)
avg_locs_loss += locs_loss.sum().item() / (len(train_loader.dataset) * star_encoder.n_tiles)
return avg_loss, avg_counter_loss, avg_locs_loss, avg_fluxes_loss
def run_sleep(star_encoder, loader, optimizer, n_epochs,
out_filename, print_every = 10,
full_image = None, mean_stars = None):
test_losses = np.zeros((4, n_epochs))
# save ELBO as well
if full_image is not None:
star_encoder.eval();
elbo_results_vec = elbo_lib.save_elbo_results(full_image, star_encoder,
loader.dataset.simulator, mean_stars = mean_stars)
for epoch in range(n_epochs):
t0 = time.time()
# draw fresh data
loader.dataset.set_params_and_images()
avg_loss, counter_loss, locs_loss, fluxes_loss = \
eval_sleep(star_encoder, loader, optimizer, train = True)
elapsed = time.time() - t0
print('[{}] loss: {:0.4f}; counter loss: {:0.4f}; locs loss: {:0.4f}; fluxes loss: {:0.4f} \t[{:.1f} seconds]'.format(\
epoch, avg_loss, counter_loss, locs_loss, fluxes_loss, elapsed))
test_losses[:, epoch] = np.array([avg_loss, counter_loss, locs_loss, fluxes_loss])
np.savetxt(out_filename + '-test_losses', test_losses)
if ((epoch % print_every) == 0) or (epoch == (n_epochs-1)):
loader.dataset.set_params_and_images()
foo = eval_sleep(star_encoder, loader, train = True)[0];
star_encoder.eval();
loader.dataset.set_params_and_images()
test_loss, test_counter_loss, test_locs_loss, test_fluxes_loss = \
eval_sleep(star_encoder, loader, train = False)
print('**** test loss: {:.3f}; counter loss: {:.3f}; locs loss: {:.3f}; fluxes loss: {:.3f} ****'.format(\
test_loss, test_counter_loss, test_locs_loss, test_fluxes_loss))
# save ELBO as well
if (full_image is not None) & (epoch > 0):
elbo_results = elbo_lib.save_elbo_results(full_image, star_encoder, loader.dataset.simulator,
mean_stars = mean_stars, pad = star_encoder.edge_padding)
elbo_results_vec = np.vstack((elbo_results_vec, elbo_results))
np.savetxt(out_filename + '-elbo_results', elbo_results_vec)
print("writing the encoder parameters to " + out_filename)
torch.save(star_encoder.state_dict(), out_filename)
| 9,800 | 37.136187 | 127 | py |
DeblendingStarfields | DeblendingStarfields-master/deblending_runjingdev/which_device.py | import torch
device = torch.device("cuda:6" if torch.cuda.is_available() else "cpu")
# device = 'cpu'
| 102 | 24.75 | 71 | py |
DeblendingStarfields | DeblendingStarfields-master/experiments_sparse_field/sparse_field_lib.py | import numpy as np
import torch
import fitsio
from astropy.io import fits
from astropy.wcs import WCS
import deblending_runjingdev.sdss_dataset_lib as sdss_dataset_lib
from deblending_runjingdev.sdss_dataset_lib import _get_mgrid2
def load_data(catalog_file = '../coadd_field_catalog_runjing_liu.fit',
sdss_dir = '../sdss_stage_dir/',
run = 94, camcol = 1, field = 12, bands = [2, 3],
align_bands = True):
n_bands = len(bands)
band_letters = ['ugriz'[bands[i]] for i in range(n_bands)]
##################
# load sdss data
##################
sdss_data = sdss_dataset_lib.SloanDigitalSkySurvey(sdssdir = sdss_dir,
run = run, camcol = camcol,
field = field, bands = bands)
image = torch.Tensor(sdss_data[0]['image'])
slen0 = image.shape[-2]
slen1 = image.shape[-1]
##################
# load coordinate files
##################
frame_names = ["frame-{}-{:06d}-{:d}-{:04d}.fits".format(band_letters[i],
run, camcol, field) for i in range(n_bands)]
wcs_list = []
for i in range(n_bands):
hdulist = fits.open(sdss_dir + str(run) + '/' + str(camcol) + '/' + str(field) + \
'/' + frame_names[i])
wcs_list += [WCS(hdulist['primary'].header)]
min_coords = wcs_list[0].wcs_pix2world(np.array([[0, 0]]), 0)
max_coords = wcs_list[0].wcs_pix2world(np.array([[slen1, slen0]]), 0)
##################
# load catalog
##################
fits_file = fitsio.FITS(catalog_file)[1]
true_ra = fits_file['ra'][:]
true_decl = fits_file['dec'][:]
# make sure our catalog covers the whole image
assert true_ra.min() < min_coords[0, 0]
assert true_ra.max() > max_coords[0, 0]
assert true_decl.min() < min_coords[0, 1]
assert true_decl.max() > max_coords[0, 1]
##################
# align image
##################
if align_bands:
pix_coords_list = [wcs_list[i].wcs_world2pix(true_ra, true_decl, 0, \
ra_dec_order = True) \
for i in range(n_bands)]
for i in range(1, n_bands):
shift_x0 = np.median(pix_coords_list[0][1] - pix_coords_list[i][1])
shift_x1 = np.median(pix_coords_list[0][0] - pix_coords_list[i][0])
grid = _get_mgrid2(slen0, slen1).unsqueeze(0) - \
torch.Tensor([[[[shift_x1 / (slen1 - 1),
shift_x0 / (slen0 - 1)]]]]) * 2
image_i = image[i].unsqueeze(0).unsqueeze(0)
band_aligned = torch.nn.functional.grid_sample(image_i, grid,
mode = 'nearest', align_corners=True).squeeze()
image[i] = band_aligned
return image, fits_file, wcs_list, sdss_data
| 2,936 | 33.151163 | 90 | py |
DeblendingStarfields | DeblendingStarfields-master/experiments_sparse_field/train_sleep-sparse_field.py | import numpy as np
import torch
import torch.optim as optim
from deblending_runjingdev import simulated_datasets_lib
from deblending_runjingdev import starnet_lib
from deblending_runjingdev import sleep_lib
from deblending_runjingdev import psf_transform_lib
from deblending_runjingdev import wake_lib
import json
import time
from deblending_runjingdev.which_device import device
print('device: ', device)
print('torch version: ', torch.__version__)
###############
# set seed
###############
np.random.seed(65765)
_ = torch.manual_seed(3453453)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
###############
# data parameters
###############
with open('../model_params/default_star_parameters.json', 'r') as fp:
data_params = json.load(fp)
data_params['mean_stars'] = 50
data_params['slen'] = 500
print(data_params)
###############
# load psf
###############
bands = [2]
psfield_file = '../sdss_stage_dir/94/1/12/psField-000094-1-0012.fit'
init_psf_params = psf_transform_lib.get_psf_params(
psfield_file,
bands = bands)
power_law_psf = psf_transform_lib.PowerLawPSF(init_psf_params.to(device))
psf_og = power_law_psf.forward().detach()
###############
# sky intensity: for the r band
###############
init_background_params = torch.zeros(len(bands), 3).to(device)
init_background_params[0, 0] = 862.
planar_background = wake_lib.PlanarBackground(image_slen = data_params['slen'],
init_background_params = init_background_params.to(device))
background = planar_background.forward().detach()
###############
# draw data
###############
print('generating data: ')
n_images = 200
t0 = time.time()
star_dataset = \
simulated_datasets_lib.load_dataset_from_params(psf_og,
data_params,
background = background,
n_images = n_images,
transpose_psf = False,
add_noise = True)
print('data generation time: {:.3f}secs'.format(time.time() - t0))
# get loader
batchsize = 1
loader = torch.utils.data.DataLoader(
dataset=star_dataset,
batch_size=batchsize,
shuffle=True)
###############
# define VAE
###############
star_encoder = starnet_lib.StarEncoder(slen = data_params['slen'],
ptile_slen = 50,
step = 50,
edge_padding = 0,
n_bands = psf_og.shape[0],
max_detections = 3,
track_running_stats = False)
star_encoder.to(device)
###############
# define optimizer
###############
learning_rate = 1e-3
weight_decay = 1e-5
optimizer = optim.Adam([
{'params': star_encoder.parameters(),
'lr': learning_rate}],
weight_decay = weight_decay)
###############
# Train!
###############
n_epochs = 141
print_every = 10
print('training')
out_filename = './starnet_sparsefield'
sleep_lib.run_sleep(star_encoder,
loader,
optimizer,
n_epochs,
out_filename = out_filename,
print_every = print_every)
# star_dataset2 = \
# simulated_datasets_lib.load_dataset_from_params(psf_og,
# data_params,
# background = background,
# n_images = 1,
# transpose_psf = False,
# add_noise = True)
# sim_image = star_dataset2[0]['image'].unsqueeze(0)
# true_locs = star_dataset2[0]['locs'][0:star_dataset[0]['n_stars']].unsqueeze(0)
# true_fluxes = star_dataset2[0]['fluxes'][0:star_dataset[0]['n_stars']].unsqueeze(0)
# np.savez('./fits/results_2020-05-10/starnet_ri_sparse_field',
# sim_image = sim_image.cpu().numpy(),
# true_locs = true_locs.cpu().numpy(),
# true_fluxes = true_fluxes.cpu().numpy())
# # check loss
# loss, counter_loss, locs_loss, fluxes_loss, perm_indx = \
# sleep_lib.get_inv_kl_loss(star_encoder, sim_image,
# true_locs, true_fluxes)[0:5]
# print(loss)
# print(counter_loss.mean())
# print(locs_loss.mean())
# print(fluxes_loss.mean())
| 4,469 | 29.827586 | 87 | py |
DeblendingStarfields | DeblendingStarfields-master/experiments_m2/train_wake_sleep.py | import numpy as np
import torch
import torch.optim as optim
import deblending_runjingdev.sdss_dataset_lib as sdss_dataset_lib
import deblending_runjingdev.simulated_datasets_lib as simulated_datasets_lib
import deblending_runjingdev.starnet_lib as starnet_lib
import deblending_runjingdev.sleep_lib as sleep_lib
from deblending_runjingdev.sleep_lib import run_sleep
import deblending_runjingdev.wake_lib as wake_lib
import deblending_runjingdev.psf_transform_lib as psf_transform_lib
import time
import json
import os
from deblending_runjingdev.which_device import device
print('device: ', device)
print('torch version: ', torch.__version__)
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--x0', type=int, default=630)
parser.add_argument('--x1', type=int, default=310)
parser.add_argument('--init_encoder', type=str, default='./fits/results_2020-05-15/starnet_ri')
parser.add_argument('--outfolder', type=str, default='./fits/results_2020-05-15/')
parser.add_argument('--outfilename', type=str, default='starnet_ri_wake-sleep')
parser.add_argument('--n_iter', type=int, default=2)
parser.add_argument('--prior_mu', type=int, default=1500)
parser.add_argument('--prior_alpha', type=float, default=0.5)
args = parser.parse_args()
assert os.path.isfile(args.init_encoder)
assert os.path.isdir(args.outfolder)
#######################
# set seed
########################
np.random.seed(32090275)
_ = torch.manual_seed(120457)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
#######################
# get sdss data
#######################
sdss_image = sdss_dataset_lib.load_m2_data(sdss_dir = './../sdss_stage_dir/',
hubble_dir = './hubble_data/',
x0 = args.x0,
x1 = args.x1)[0]
sdss_image = sdss_image.unsqueeze(0).to(device)
#######################
# simulated data parameters
#######################
with open('../model_params/default_star_parameters.json', 'r') as fp:
data_params = json.load(fp)
data_params['alpha'] = args.prior_alpha
data_params['mean_stars'] = args.prior_mu
print(data_params)
###############
# load model parameters
###############
#### the psf
psfield_file = '../sdss_stage_dir/2583/2/136/psField-002583-2-0136.fit'
init_psf_params = psf_transform_lib.get_psf_params(
psfield_file,
bands = [2, 3]).to(device)
model_params = wake_lib.ModelParams(sdss_image,
init_psf_params = init_psf_params,
init_background_params = None)
psf_og = model_params.get_psf().detach()
background_og = model_params.get_background().detach().squeeze(0)
###############
# draw data
###############
print('generating data: ')
n_images = 200
t0 = time.time()
star_dataset = \
simulated_datasets_lib.load_dataset_from_params(psf_og,
data_params,
n_images = n_images,
background = background_og,
transpose_psf = False,
add_noise = True)
print('data generation time: {:.3f}secs'.format(time.time() - t0))
# get loader
batchsize = 20
loader = torch.utils.data.DataLoader(
dataset=star_dataset,
batch_size=batchsize,
shuffle=True)
###############
# define VAE
###############
star_encoder = starnet_lib.StarEncoder(slen = data_params['slen'],
ptile_slen = 8,
step = 2,
edge_padding = 3,
n_bands = 2,
max_detections = 2)
init_encoder = args.init_encoder
star_encoder.load_state_dict(torch.load(init_encoder,
map_location=lambda storage, loc: storage))
star_encoder.to(device)
star_encoder.eval();
####################
# optimzer
#####################
encoder_lr = 1e-5
sleep_optimizer = optim.Adam([
{'params': star_encoder.parameters(),
'lr': encoder_lr}],
weight_decay = 1e-5)
# initial loss:
sleep_loss, sleep_counter_loss, sleep_locs_loss, sleep_fluxes_loss = \
sleep_lib.eval_sleep(star_encoder, loader, train = False)
print('**** INIT SLEEP LOSS: {:.3f}; counter loss: {:.3f}; locs loss: {:.3f}; fluxes loss: {:.3f} ****'.format(\
sleep_loss, sleep_counter_loss, sleep_locs_loss, sleep_fluxes_loss))
wake_loss = wake_lib.get_wake_loss(sdss_image, star_encoder, model_params,
n_samples = 1, run_map = True).detach()
print('**** INIT WAKE LOSS: {:.3f}'.format(wake_loss))
# file header to save results
outfolder = args.outfolder # './fits/results_2020-03-04/'
outfile_base = outfolder + args.outfilename
print(outfile_base)
############################
# Run wake-sleep
############################
t0 = time.time()
n_iter = args.n_iter
map_losses = torch.zeros(n_iter)
for iteration in range(0, n_iter):
#######################
# wake phase training
#######################
print('RUNNING WAKE PHASE. ITER = ' + str(iteration))
if iteration == 0:
powerlaw_psf_params = init_psf_params
planar_background_params = None
encoder_file = init_encoder
else:
powerlaw_psf_params = \
torch.Tensor(np.load(outfile_base + '-iter' + str(iteration -1) + \
'-powerlaw_psf_params.npy')).to(device)
planar_background_params = \
torch.Tensor(np.load(outfile_base + '-iter' + str(iteration -1) + \
'-planarback_params.npy')).to(device)
encoder_file = outfile_base + '-encoder-iter' + str(iteration)
print('loading encoder from: ', encoder_file)
star_encoder.load_state_dict(torch.load(encoder_file,
map_location=lambda storage, loc: storage))
star_encoder.to(device);
star_encoder.eval();
model_params, map_losses[iteration] = \
wake_lib.run_wake(sdss_image, star_encoder, powerlaw_psf_params,
planar_background_params,
n_samples = 25,
out_filename = outfile_base + '-iter' + str(iteration),
lr = 1e-3,
n_epochs = 100,
run_map = False,
print_every = 10)
print(list(model_params.planar_background.parameters())[0])
print(list(model_params.power_law_psf.parameters())[0])
print(map_losses[iteration])
np.save(outfolder + 'map_losses', map_losses.cpu().detach())
########################
# sleep phase training
########################
print('RUNNING SLEEP PHASE. ITER = ' + str(iteration + 1))
# update psf and background
loader.dataset.simulator.psf = model_params.get_psf().detach()
loader.dataset.simulator.background = model_params.get_background().squeeze(0).detach()
run_sleep(star_encoder,
loader,
sleep_optimizer,
n_epochs = 11,
out_filename = outfile_base + '-encoder-iter' + str(iteration + 1))
print('DONE. Elapsed: {}secs'.format(time.time() - t0))
| 7,431 | 33.567442 | 112 | py |
DeblendingStarfields | DeblendingStarfields-master/experiments_m2/train_sleep.py | import numpy as np
import torch
import torch.optim as optim
import deblending_runjingdev.simulated_datasets_lib as simulated_datasets_lib
import deblending_runjingdev.sdss_dataset_lib as sdss_dataset_lib
import deblending_runjingdev.starnet_lib as starnet_lib
import deblending_runjingdev.sleep_lib as sleep_lib
import deblending_runjingdev.psf_transform_lib as psf_transform_lib
import deblending_runjingdev.wake_lib as wake_lib
import json
import time
from deblending_runjingdev.which_device import device
print('device: ', device)
print('torch version: ', torch.__version__)
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--outfolder', type=str, default='./fits/results_2020-05-15/')
parser.add_argument('--outfilename', type=str, default='starnet_ri')
parser.add_argument('--prior_mu', type=int, default=1500)
parser.add_argument('--prior_alpha', type=float, default=0.5)
args = parser.parse_args()
import os
assert os.path.isdir(args.outfolder)
###############
# set seed
###############
np.random.seed(65765)
_ = torch.manual_seed(3453453)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
###############
# data parameters
###############
with open('../model_params/default_star_parameters.json', 'r') as fp:
data_params = json.load(fp)
data_params['mean_stars'] = args.prior_mu
data_params['alpha'] = args.prior_alpha
print(data_params)
###############
# load psf and background
###############
psfield_file = '../sdss_stage_dir/2583/2/136/psField-002583-2-0136.fit'
init_psf_params = psf_transform_lib.get_psf_params(
psfield_file,
bands = [2, 3])
# init_psf_params = torch.Tensor(np.load('./data/fitted_powerlaw_psf_params.npy'))
power_law_psf = psf_transform_lib.PowerLawPSF(init_psf_params.to(device))
psf_og = power_law_psf.forward().detach()
# load background
sdss_background = \
sdss_dataset_lib.load_m2_data(sdss_dir = './../sdss_stage_dir/',
hubble_dir = './hubble_data/')[1]
sdss_background = sdss_background.to(device)
###############
# draw data
###############
print('generating data: ')
n_images = 200
t0 = time.time()
star_dataset = \
simulated_datasets_lib.load_dataset_from_params(psf_og,
data_params,
background = sdss_background,
n_images = n_images,
transpose_psf = False,
add_noise = True)
print('data generation time: {:.3f}secs'.format(time.time() - t0))
# get loader
batchsize = 20
loader = torch.utils.data.DataLoader(
dataset=star_dataset,
batch_size=batchsize,
shuffle=True)
###############
# define VAE
###############
star_encoder = starnet_lib.StarEncoder(slen = data_params['slen'],
ptile_slen = 8,
step = 2,
edge_padding = 3,
n_bands = psf_og.shape[0],
max_detections = 2)
star_encoder.to(device)
###############
# define optimizer
###############
learning_rate = 1e-3
weight_decay = 1e-5
optimizer = optim.Adam([
{'params': star_encoder.parameters(),
'lr': learning_rate}],
weight_decay = weight_decay)
###############
# Train!
###############
n_epochs = 201
print_every = 5
print('training')
t0 = time.time()
out_filename = args.outfolder + args.outfilename # './fits/results_2020-05-15/starnet_ri'
sleep_lib.run_sleep(star_encoder, loader, optimizer, n_epochs,
out_filename = out_filename,
print_every = print_every,
full_image = None)
print('DONE. Elapsed: {}secs'.format(time.time() - t0))
| 3,978 | 28.474074 | 89 | py |
DeblendingStarfields | DeblendingStarfields-master/experiments_deblending/train_encoder.py | import numpy as np
import torch
import torch.optim as optim
import deblending_runjingdev.simulated_datasets_lib as simulated_datasets_lib
import deblending_runjingdev.starnet_lib as starnet_lib
import deblending_runjingdev.sleep_lib as sleep_lib
import deblending_runjingdev.psf_transform_lib as psf_transform_lib
import json
import time
from deblending_runjingdev.which_device import device
print('device: ', device)
print('torch version: ', torch.__version__)
###############
# set seed
###############
np.random.seed(5751)
_ = torch.manual_seed(1151)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
###############
# data parameters
###############
with open('../model_params/default_star_parameters.json', 'r') as fp:
data_params = json.load(fp)
data_params['min_stars'] = 1
# mean set so that P(n_stars <= 1) \approx 0.5
data_params['mean_stars'] = 1.65
data_params['max_stars'] = 2
data_params['slen'] = 7
data_params['f_max'] = 10000
print(data_params)
###############
# load psf
###############
bands = [2, 3]
psfield_file = '../sdss_stage_dir/2583/2/136/psField-002583-2-0136.fit'
init_psf_params = psf_transform_lib.get_psf_params(
psfield_file,
bands = bands)
power_law_psf = psf_transform_lib.PowerLawPSF(init_psf_params.to(device))
psf = power_law_psf.forward().detach()
###############
# set background
###############
background = torch.zeros(len(bands), data_params['slen'], data_params['slen']).to(device)
background[0] = 686.
background[1] = 1123.
###############
# draw data
###############
print('generating data: ')
n_images = 60000
t0 = time.time()
star_dataset = \
simulated_datasets_lib.load_dataset_from_params(psf,
data_params,
background = background,
n_images = n_images,
transpose_psf = False,
add_noise = True)
print('data generation time: {:.3f}secs'.format(time.time() - t0))
# get data loader
batchsize = 2000
loader = torch.utils.data.DataLoader(
dataset=star_dataset,
batch_size=batchsize,
shuffle=True)
###############
# define VAE
###############
star_encoder = starnet_lib.StarEncoder(slen = data_params['slen'],
ptile_slen = data_params['slen'],
step = data_params['slen'],
edge_padding = 0,
n_bands = len(bands),
max_detections = 2)
star_encoder.to(device)
###############
# define optimizer
###############
learning_rate = 1e-3
weight_decay = 1e-3
optimizer = optim.Adam([
{'params': star_encoder.parameters(),
'lr': learning_rate}],
weight_decay = weight_decay)
###############
# Train!
###############
n_epochs = 30
print_every = 10
print('training')
out_filename = './starnet'
sleep_lib.run_sleep(star_encoder,
loader,
optimizer,
n_epochs,
out_filename = out_filename,
print_every = print_every)
| 3,330 | 25.862903 | 89 | py |
DeblendingStarfields | DeblendingStarfields-master/experiments_elbo_vs_sleep/train_elbo.py | import numpy as np
import torch
import torch.optim as optim
import deblending_runjingdev.elbo_lib as elbo_lib
import deblending_runjingdev.simulated_datasets_lib as simulated_datasets_lib
import deblending_runjingdev.starnet_lib as starnet_lib
import deblending_runjingdev.psf_transform_lib as psf_transform_lib
import time
from deblending_runjingdev.which_device import device
print('device: ', device)
print('torch version: ', torch.__version__)
###############
# set seed
###############
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--seed", type = int, default = 0)
parser.add_argument("--test_image",
type = str,
default = 'small')
parser.add_argument("--grad_estimator",
type = str,
default = 'reinforce')
args = parser.parse_args()
print(args.seed)
np.random.seed(8910 + args.seed * 17)
_ = torch.manual_seed(8910 + args.seed * 13)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
###############
# load psf
###############
bands = [2, 3]
psfield_file = '../sdss_stage_dir/2583/2/136/psField-002583-2-0136.fit'
init_psf_params = psf_transform_lib.get_psf_params(
psfield_file,
bands = bands)
power_law_psf = psf_transform_lib.PowerLawPSF(init_psf_params.to(device))
psf_og = power_law_psf.forward().detach()
###############
# Get image
###############
if args.test_image == 'small':
# test image file
test_image_file = './test_image_20x20.npz'
# parameters for encoder
ptile_slen = 10
step = 10
edge_padding = 0
# prior parameters
mean_stars = 4
elif args.test_image == 'large':
# test image file
test_image_file = './test_image_100x100.npz'
# parameters for encoder
ptile_slen = 20
step = 10
edge_padding = 5
# prior parameters
mean_stars = 50
else:
print('Specify whether to use the large (100 x 100) test image',
'or the small (20 x 20) test image')
raise NotImplementedError()
full_image_np = np.load(test_image_file)['image']
full_image = torch.Tensor(full_image_np).unsqueeze(0).to(device)
slen = full_image.shape[-1]
fmin = 1000.
###############
# background
###############
background = torch.zeros(len(bands), slen, slen).to(device)
background[0] = 686.
background[1] = 1123.
###############
# Get simulator
###############
simulator = simulated_datasets_lib.StarSimulator(psf_og,
slen,
background,
transpose_psf = False)
###############
# define VAE
###############
star_encoder = starnet_lib.StarEncoder(slen = slen,
ptile_slen = ptile_slen,
step = step,
edge_padding = edge_padding,
n_bands = psf_og.shape[0],
max_detections = 2,
fmin = fmin,
constrain_logflux_mean = True,
track_running_stats = False)
star_encoder.eval();
star_encoder.to(device);
###############
# define optimizer
###############
learning_rate = 1e-3
weight_decay = 1e-3
optimizer = optim.Adam([
{'params': star_encoder.parameters(),
'lr': learning_rate}],
weight_decay = weight_decay)
###############
# Train!
###############
out_filename = './fits/starnet_elbo_' + args.grad_estimator + '-restart' + str(args.seed)
if args.test_image == 'small':
n_epochs = 2500
print_every = 100
n_samples = 2000
out_filename = out_filename + '_20x20'
else:
raise NotImplementedError()
out_filename = out_filename + '_100x100'
print('training')
elbo_results_vec = elbo_lib.save_elbo_results(full_image, star_encoder,
simulator, mean_stars,
n_samples = n_samples,
pad = star_encoder.edge_padding)
t0 = time.time()
for epoch in range(1, n_epochs + 1):
optimizer.zero_grad()
# get pseudo loss
if args.grad_estimator == 'reinforce':
ps_loss = elbo_lib.get_pseudo_loss(full_image, star_encoder,
simulator, mean_stars,
n_samples = n_samples,
pad = star_encoder.edge_padding)
elif args.grad_estimator == 'reparam':
ps_loss = elbo_lib.get_pseudo_loss_all_sum(full_image, star_encoder,
simulator, mean_stars, n_samples)
else:
print(args.grad_estimator, 'not implemented. Specify either reinforce or reparam')
raise NotImplementedError()
ps_loss.backward()
optimizer.step()
if ((epoch % print_every) == 0) or (epoch == n_epochs):
print('epoch = {}; elapsed = {:.1f}sec'.format(epoch, time.time() - t0))
elbo_results = elbo_lib.save_elbo_results(full_image, star_encoder,
simulator, mean_stars,
n_samples = n_samples,
pad = star_encoder.edge_padding)
elbo_results_vec = np.vstack((elbo_results_vec, elbo_results))
np.savetxt(out_filename + '-elbo_results', elbo_results_vec)
print("writing the encoder parameters to " + out_filename)
torch.save(star_encoder.state_dict(), out_filename)
t0 = time.time()
| 5,869 | 30.55914 | 90 | py |
DeblendingStarfields | DeblendingStarfields-master/experiments_elbo_vs_sleep/train_elbo-Copy1.py | import numpy as np
import torch
import torch.optim as optim
import deblending_runjingdev.elbo_lib as elbo_lib
import deblending_runjingdev.simulated_datasets_lib as simulated_datasets_lib
import deblending_runjingdev.starnet_lib as starnet_lib
import deblending_runjingdev.psf_transform_lib as psf_transform_lib
import time
from deblending_runjingdev.which_device import device
print('device: ', device)
print('torch version: ', torch.__version__)
###############
# set seed
###############
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--seed", type = int, default = 0)
args = parser.parse_args()
print(args.seed)
np.random.seed(575 + args.seed * 17)
_ = torch.manual_seed(1512 + args.seed * 13)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
###############
# load psf
###############
bands = [2, 3]
psfield_file = '../sdss_stage_dir/2583/2/136/psField-002583-2-0136.fit'
init_psf_params = psf_transform_lib.get_psf_params(
psfield_file,
bands = bands)
# init_psf_params = torch.Tensor(np.load('./data/fitted_powerlaw_psf_params.npy'))
power_law_psf = psf_transform_lib.PowerLawPSF(init_psf_params.to(device))
psf_og = power_law_psf.forward().detach()
###############
# Get image
###############
test_image_file = './test_image_20x20.npz'
full_image_np = np.load(test_image_file)['image']
full_image = torch.Tensor(full_image_np).unsqueeze(0).to(device)
slen = full_image.shape[-1]
fmin = 1000.
mean_stars = 4
# load true locations and fluxes
true_locs = torch.Tensor(np.load(test_image_file)['locs']).to(device)
true_fluxes = torch.Tensor(np.load(test_image_file)['fluxes']).to(device)
###############
# background
###############
background = torch.zeros(len(bands), slen, slen).to(device)
background[0] = 686.
background[1] = 1123.
###############
# Get simulator
###############
simulator = simulated_datasets_lib.StarSimulator(psf_og,
slen,
background,
transpose_psf = False)
###############
# define VAE
###############
star_encoder = starnet_lib.StarEncoder(slen = slen,
ptile_slen = 10,
step = 10,
edge_padding = 0,
n_bands = psf_og.shape[0],
max_detections = 2,
fmin = fmin,
constrain_logflux_mean = True,
track_running_stats = False)
# star_encoder = elbo_lib.MFVBEncoder(slen = slen,
# patch_slen = 10,
# step = 10,
# edge_padding = 0,
# n_bands = psf_og.shape[0],
# max_detections = 2,
# fmin = 1000.)
#
# star_encoder.load_state_dict(torch.load('./fits/results_2020-04-29/starnet_klpq',
# map_location=lambda storage, loc: storage))
star_encoder.eval();
star_encoder.to(device);
###############
# define optimizer
###############
learning_rate = 1e-3
weight_decay = 1e-3
optimizer = optim.Adam([
{'params': star_encoder.parameters(),
'lr': learning_rate}],
weight_decay = weight_decay)
###############
# Train!
###############
out_filename = './foo' # './fits/results_2020-05-06/starnet_encoder_allsum-restart' + str(args.seed)
n_epochs = 2500
print_every = 100
n_samples = 2000
print('training')
elbo_results_vec = elbo_lib.save_elbo_results(full_image, star_encoder,
simulator, mean_stars, n_samples)
t0 = time.time()
for epoch in range(1, n_epochs + 1):
optimizer.zero_grad()
# get pseudo loss
# ps_loss = elbo_lib.get_pseudo_loss(full_image, star_encoder,
# simulator,mean_stars, n_samples)
ps_loss = elbo_lib.get_pseudo_loss_all_sum(full_image, star_encoder,
simulator, mean_stars, n_samples)
# ps_loss = elbo_lib.loss_on_true_nstars(full_image, star_encoder, simulator,
# mean_stars, n_samples,
# true_locs, true_fluxes)
ps_loss.backward()
optimizer.step()
if ((epoch % print_every) == 0) or (epoch == n_epochs):
print('epoch = {}; elapsed = {:.1f}sec'.format(epoch, time.time() - t0))
elbo_results = elbo_lib.save_elbo_results(full_image, star_encoder,
simulator, mean_stars, n_samples)
elbo_results_vec = np.vstack((elbo_results_vec, elbo_results))
np.savetxt(out_filename + '-elbo_results', elbo_results_vec)
print("writing the encoder parameters to " + out_filename)
torch.save(star_encoder.state_dict(), out_filename)
# torch.save(star_encoder.params, out_filename)
t0 = time.time()
| 5,279 | 33.509804 | 100 | py |
DeblendingStarfields | DeblendingStarfields-master/experiments_elbo_vs_sleep/simulate_test_images.py | import numpy as np
import torch
import json
import matplotlib.pyplot as plt
import deblending_runjingdev.simulated_datasets_lib as simulated_datasets_lib
import deblending_runjingdev.psf_transform_lib as psf_transform_lib
from deblending_runjingdev.which_device import device
np.random.seed(65765)
_ = torch.manual_seed(3453453)
# get the SDSS point spread function
bands = [2, 3]
psfield_file = './../sdss_stage_dir/2583/2/136/psField-002583-2-0136.fit'
init_psf_params = psf_transform_lib.get_psf_params(
psfield_file,
bands = bands)
power_law_psf = psf_transform_lib.PowerLawPSF(init_psf_params.to(device))
psf_og = power_law_psf.forward().detach().to(device)
##############################
# Simulate the 20 x 20 image
##############################
slen = 20
# get background
background = torch.zeros(len(bands), slen, slen)
background[0] = 686.
background[1] = 1123.
background = background.to(device)
# the simulator
simulator = simulated_datasets_lib.StarSimulator(psf_og, slen, background, transpose_psf = False)
# set locations and fluxes
true_locs = torch.Tensor([[2, 3],
[5.5, 7.5],
[12.5, 6.5],
[8.5, 14.5]]).unsqueeze(0) / slen
true_locs = true_locs.to(device)
true_fluxes = torch.zeros(true_locs.shape[0], true_locs.shape[1], len(bands),
device = device) + 4000.
# simulate image
full_image = simulator.draw_image_from_params(locs = true_locs,
fluxes = true_fluxes,
n_stars= torch.Tensor([4]).to(device).long(),
add_noise = True)
# save
fname = './test_image_20x20.npz'
print('saving 20 x 20 test image into: ', fname)
np.savez(fname,
image = full_image.cpu().squeeze(0),
locs = true_locs.cpu().squeeze(0),
fluxes = true_fluxes.cpu().squeeze(0))
##############################
# Simulate 100 x 100 image
##############################
np.random.seed(652)
_ = torch.manual_seed(3143)
# data parameters
with open('./../model_params/default_star_parameters.json', 'r') as fp:
data_params = json.load(fp)
data_params['min_stars'] = 50
data_params['max_stars'] = 50
data_params['mean_stars'] = 50
data_params['slen'] = 110
# background
background = torch.zeros(len(bands), data_params['slen'], data_params['slen'])
background[0] = 686.
background[1] = 1123.
background = background.to(device)
# simulate image
n_images = 1
star_dataset = \
simulated_datasets_lib.load_dataset_from_params(psf_og,
data_params,
background = background,
n_images = n_images,
transpose_psf = False,
add_noise = True)
fname = './test_image_100x100.npz'
print('saving 100 x 100 test image into: ', fname)
full_image = star_dataset[0]['image'].unsqueeze(0)
true_locs = star_dataset[0]['locs']
true_fluxes = star_dataset[0]['fluxes']
np.savez(fname,
image = full_image.cpu().squeeze(0),
locs = true_locs.cpu().squeeze(0),
fluxes = true_fluxes.cpu().squeeze(0))
| 3,256 | 29.439252 | 97 | py |
DeblendingStarfields | DeblendingStarfields-master/experiments_elbo_vs_sleep/train_sleep.py | import numpy as np
import torch
import torch.optim as optim
import deblending_runjingdev.simulated_datasets_lib as simulated_datasets_lib
import deblending_runjingdev.starnet_lib as starnet_lib
import deblending_runjingdev.sleep_lib as sleep_lib
import deblending_runjingdev.psf_transform_lib as psf_transform_lib
import deblending_runjingdev.elbo_lib as elbo_lib
import json
import time
from deblending_runjingdev.which_device import device
print('device: ', device)
print('torch version: ', torch.__version__)
###############
# set seed
###############
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--seed", type = int, default = 0)
parser.add_argument("--test_image",
type = str,
default = 'small')
args = parser.parse_args()
print(args.seed)
np.random.seed(5751 + args.seed * 17)
_ = torch.manual_seed(11512 + args.seed * 13)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
####################
# Get test image
#####################
if args.test_image == 'small':
# test image file
test_image_file = './test_image_20x20.npz'
# parameters for encoder
ptile_slen = 10
step = 10
edge_padding = 0
# prior parameters
mean_stars = 4
max_stars = 6
elif args.test_image == 'large':
# test image file
test_image_file = './test_image_100x100.npz'
# parameters for encoder
ptile_slen = 20
step = 10
edge_padding = 5
# prior parameters
mean_stars = 50
max_stars = 100
else:
print('Specify whether to use the large (100 x 100) test image',
'or the small (30 x 30) test image')
raise NotImplementedError()
full_image_np = np.load(test_image_file)['image']
full_image = torch.Tensor(full_image_np).unsqueeze(0).to(device)
###############
# data parameters
###############
with open('../model_params/default_star_parameters.json', 'r') as fp:
data_params = json.load(fp)
data_params['mean_stars'] = mean_stars
data_params['min_stars'] = 0
data_params['max_stars'] = max_stars
data_params['slen'] = full_image.shape[-1]
print(data_params)
###############
# load psf
###############
bands = [2, 3]
psfield_file = '../sdss_stage_dir/2583/2/136/psField-002583-2-0136.fit'
init_psf_params = psf_transform_lib.get_psf_params(
psfield_file,
bands = bands)
power_law_psf = psf_transform_lib.PowerLawPSF(init_psf_params.to(device))
psf_og = power_law_psf.forward().detach()
###############
# set background
###############
background = torch.zeros(len(bands), data_params['slen'], data_params['slen']).to(device)
background[0] = 686.
background[1] = 1123.
###############
# draw data
###############
print('generating data: ')
n_images = 20000
t0 = time.time()
star_dataset = \
simulated_datasets_lib.load_dataset_from_params(psf_og,
data_params,
background = background,
n_images = n_images,
transpose_psf = False,
add_noise = True)
print('data generation time: {:.3f}secs'.format(time.time() - t0))
# get loader
batchsize = 64
loader = torch.utils.data.DataLoader(
dataset=star_dataset,
batch_size=batchsize,
shuffle=True)
###############
# define VAE
###############
star_encoder = starnet_lib.StarEncoder(slen = data_params['slen'],
ptile_slen = ptile_slen,
step = step,
edge_padding = edge_padding,
n_bands = psf_og.shape[0],
max_detections = 2)
star_encoder.to(device)
###############
# define optimizer
###############
learning_rate = 1e-3
weight_decay = 1e-5
optimizer = optim.Adam([
{'params': star_encoder.parameters(),
'lr': learning_rate}],
weight_decay = weight_decay)
###############
# Train!
###############
out_filename = './fits/starnet_klpq-restart' + str(args.seed)
if args.test_image == 'small':
n_epochs = 31
print_every = 1
out_filename = out_filename + '_20x20'
else:
n_epochs = 500
print_every = 10
out_filename = out_filename + '_100x100'
print('training')
sleep_lib.run_sleep(star_encoder, loader, optimizer, n_epochs,
out_filename = out_filename,
print_every = print_every,
full_image = full_image,
mean_stars = data_params['mean_stars'])
| 4,752 | 26.316092 | 89 | py |
lightkurve | lightkurve-main/docs/source/conf.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__name__), '..'))
import lightkurve
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.mathjax',
'sphinx.ext.intersphinx',
'sphinx.ext.viewcode',
'nbsphinx',
'numpydoc',
'sphinxcontrib.rawfiles']
autosummary_generate = True
# Disable RequireJS because it creates a conflict with bootstrap.js.
# This conflict breaks the navigation toggle button.
# The exact consequence of disabling RequireJS is not understood
# -- likely it means that notebook widgets may not work?
nbsphinx_requirejs_path = ""
numpydoc_show_class_members = False
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# Exclude build directory and Jupyter backup files:
exclude_patterns = ['_build', '**.ipynb_checkpoints']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = ".".join(lightkurve.__version__.split('.')[:2])
# The full version, including alpha/beta/rc tags.
release = lightkurve.__version__
# General information about the project.
project = f'Lightkurve v{version}'
copyright = 'Lightkurve developers'
author = 'Lightkurve developers'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ["**/.ipynb_checkpoints"]
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# Execute notebooks? Possible values: 'always', 'never', 'auto' (default)
nbsphinx_execute = "auto"
# Some notebook cells take longer than 60 seconds to execute
nbsphinx_timeout = 500
# PUT PROLOG HERE
nbsphinx_prolog = r"""
{% set docname = env.doc2path(env.docname, base=None) %}
.. only:: html
.. raw:: html
<div style="float:right; margin-bottom:1em;">
<a href="https://github.com/lightkurve/lightkurve/raw/main/docs/source/{{ docname }}"><img src="https://img.shields.io/badge/Notebook-Download-130654?logo=Jupyter&labelColor=fafafa"></a>
<a href="https://timeseries.science.stsci.edu/hub/user-redirect/git-pull?repo=https%3A%2F%2Fgithub.com%2Flightkurve%2Flightkurve&urlpath=lab%2Ftree%2Flightkurve%2Fdocs%2Fsource%2F{{ docname }}&branch=main"><img src="https://img.shields.io/badge/Notebook-Open%20in%20TIKE-130654?logo=Jupyter&labelColor=fafafa"></a>
</div>
<br style="clear:both;">
"""
# -- Options for HTML output ----------------------------------------------
html_theme = 'pydata_sphinx_theme'
html_theme_options = {
"external_links": [],
"github_url": "https://github.com/lightkurve/lightkurve",
"google_analytics_id": "UA-69171-9",
}
html_title = "Lightkurve "
html_static_path = ['_static']
html_css_files = [
'css/custom.css',
]
html_sidebars = {
"tutorials/*": [],
"tutorials/*/*": [],
"tutorials/*/*/*": [],
}
# Raw files we want to copy using the sphinxcontrib-rawfiles extension:
# - CNAME tells GitHub the domain name to use for hosting the docs
# - .nojekyll prevents GitHub from hiding the `_static` dir
rawfiles = ['CNAME', '.nojekyll']
# Make sure text marked up `like this` will be interpreted as Python objects
default_role = 'py:obj'
# intersphinx enables links to classes/functions in the packages defined here:
intersphinx_mapping = {'python': ('https://docs.python.org/3/', None),
'numpy': ('https://docs.scipy.org/doc/numpy/', None),
'scipy': ('https://docs.scipy.org/doc/scipy/reference', None),
'matplotlib': ('https://matplotlib.org', None),
'pandas': ('https://pandas.pydata.org/pandas-docs/stable/', None),
'astropy': ('https://docs.astropy.org/en/latest/', None)} | 5,016 | 33.363014 | 326 | py |
miccai2022-roigan | miccai2022-roigan-main/main.py | import os
import argparse
import yaml
import collections
import itertools
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from torchvision import datasets
from sklearn.model_selection import train_test_split
from src import models, utils
def main(args, config):
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
utils.write_flush(device)
# Create sample and checkpoint directories
os.makedirs('images/%s' % args.job_number, exist_ok=True)
os.makedirs('saved_models/%s' % args.job_number, exist_ok=True)
# Losses
criterion_GAN = torch.nn.MSELoss()
criterion_cycle = torch.nn.L1Loss()
criterion_identity = torch.nn.L1Loss()
cuda = torch.cuda.is_available()
input_shape = (3, 256, 256)
# Initialize generator and discriminator
G_AB = models.GeneratorResNet(input_shape, config.nb_residuals).to(device)
G_BA = models.GeneratorResNet(input_shape, config.nb_residuals).to(device)
D_A = models.Discriminator(input_shape).to(device)
D_B = models.Discriminator(input_shape).to(device)
D_ROI_A = models.DiscriminatorROI().to(device)
D_ROI_B = models.DiscriminatorROI().to(device)
# Optimizers
optimizer_G = torch.optim.Adam(
itertools.chain(G_AB.parameters(), G_BA.parameters()), lr=2e-4, betas=(0.5, 0.999)
)
optimizer_D_A = torch.optim.Adam(D_A.parameters(), lr=2e-4, betas=(0.5, 0.999))
optimizer_D_B = torch.optim.Adam(D_B.parameters(), lr=2e-4, betas=(0.5, 0.999))
optimizer_D_A_ROI = torch.optim.Adam(D_ROI_A.parameters(), lr=2e-4, betas=(0.5, 0.999))
optimizer_D_B_ROI = torch.optim.Adam(D_ROI_B.parameters(), lr=2e-4, betas=(0.5, 0.999))
# Learning rate update schedulers
lr_scheduler_G = torch.optim.lr_scheduler.LambdaLR(
optimizer_G, lr_lambda=utils.LambdaLR(config.nb_epochs, 0, 12).step
)
lr_scheduler_D_A = torch.optim.lr_scheduler.LambdaLR(
optimizer_D_A, lr_lambda=utils.LambdaLR(config.nb_epochs, 0, 12).step
)
lr_scheduler_D_B = torch.optim.lr_scheduler.LambdaLR(
optimizer_D_B, lr_lambda=utils.LambdaLR(config.nb_epochs, 0, 12).step
)
lr_scheduler_D_A_ROI = torch.optim.lr_scheduler.LambdaLR(
optimizer_D_A_ROI, lr_lambda=utils.LambdaLR(config.nb_epochs, 0, 12).step
)
lr_scheduler_D_B_ROI = torch.optim.lr_scheduler.LambdaLR(
optimizer_D_B_ROI, lr_lambda=utils.LambdaLR(config.nb_epochs, 0, 12).step
)
# Buffers of previously generated samples
fake_A_buffer = utils.ReplayBuffer()
fake_B_buffer = utils.ReplayBuffer()
hes_images, hes_dfs_list = utils.load_data(config.hes_dir, config.hes_library)
ihc_images, ihc_dfs_list = utils.load_data(config.ihc_dir, config.ihc_library)
# Data generators
hes_images_tr, hes_images_te, hes_bboxes_tr,hes_bboxes_te = train_test_split(
hes_images, hes_dfs_list, test_size=0.1, random_state=42)
ihc_images_tr, ihc_images_te, ihc_bboxes_tr, ihc_bboxes_te = train_test_split(
ihc_images, ihc_dfs_list, test_size=0.1, random_state=42)
# ----------
# Training
# ----------
gen_A = utils.data_generator(hes_images_tr, hes_bboxes_tr, nb_batch=config.nb_batch, nb_rois=config.nb_rois)
gen_B = utils.data_generator(ihc_images_tr, ihc_bboxes_tr, nb_batch=config.nb_batch, nb_rois=config.nb_rois)
for epoch in range(config.nb_epochs):
for i in range(config.steps_per_epoch):
data = next(gen_A)
real_A, condition_A, bboxes_A = (data[0].to(device),
data[1].to(device),
data[2].to(device))
data = next(gen_B)
real_B, condition_B, bboxes_B = (data[0].to(device),
data[1].to(device),
data[2].to(device))
fake = torch.zeros((config.nb_batch, *D_A.output_shape)).to(device)
valid = torch.ones((config.nb_batch, *D_A.output_shape)).to(device)
fake_roi = torch.zeros((config.nb_rois,)).to(device)
valid_roi = torch.ones((config.nb_rois,)).to(device)
# ------------------
# Train Generators
# ------------------
G_AB.train()
G_BA.train()
optimizer_G.zero_grad()
# Identity loss
loss_id_A = criterion_identity(G_BA(real_A), real_A)
loss_id_B = criterion_identity(G_AB(real_B), real_B)
loss_identity = (loss_id_A + loss_id_B) / 2
# GAN loss
fake_B = G_AB(real_A)
loss_GAN_AB = criterion_GAN(D_B(fake_B), valid)
fake_A = G_BA(real_B)
loss_GAN_BA = criterion_GAN(D_A(fake_A), valid)
loss_GAN = (loss_GAN_AB + loss_GAN_BA) / 2
# ROI loss
validity_ROI_A = D_ROI_A(fake_A, condition_B, bboxes_B)
validity_ROI_B = D_ROI_B(fake_B, condition_A, bboxes_A)
loss_ROI_A = criterion_GAN(validity_ROI_A, valid_roi)
loss_ROI_B = criterion_GAN(validity_ROI_B, valid_roi)
loss_ROI = (loss_ROI_A + loss_ROI_B) / 2
# Cycle loss
recov_A = G_BA(fake_B)
loss_cycle_A = criterion_cycle(recov_A, real_A)
recov_B = G_AB(fake_A)
loss_cycle_B = criterion_cycle(recov_B, real_B)
loss_cycle = (loss_cycle_A + loss_cycle_B) / 2
# Total loss
loss_G = loss_GAN + config.lambda_roi * loss_ROI + config.lambda_cyc * loss_cycle + config.lambda_id * loss_identity
loss_G.backward()
optimizer_G.step()
# -----------------------
# Train Discriminator A
# -----------------------
optimizer_D_A.zero_grad()
# Real loss
loss_real = criterion_GAN(D_A(real_A), valid)
# Fake loss (on batch of previously generated samples)
fake_A_ = fake_A_buffer.push_and_pop(fake_A)
loss_fake = criterion_GAN(D_A(fake_A_.detach()), fake)
# Total loss
loss_D_A = (loss_real + loss_fake) / 2
loss_D_A.backward()
optimizer_D_A.step()
# -----------------------
# Train Discriminator B
# -----------------------
optimizer_D_B.zero_grad()
# Real loss
loss_real = criterion_GAN(D_B(real_B), valid)
# Fake loss (on batch of previously generated samples)
fake_B_ = fake_B_buffer.push_and_pop(fake_B)
loss_fake = criterion_GAN(D_B(fake_B_.detach()), fake)
# Total loss
loss_D_B = (loss_real + loss_fake) / 2
loss_D_B.backward()
optimizer_D_B.step()
loss_D = (loss_D_A + loss_D_B) / 2
# --------------------------
# Train Discriminator ROI A
# --------------------------
optimizer_D_A_ROI.zero_grad()
roi_outputs = D_ROI_A(real_A, condition_A, bboxes_A)
real_loss = criterion_GAN(roi_outputs, valid_roi)
roi_outputs = D_ROI_A(fake_A.detach(), condition_B, bboxes_B)
fake_loss = criterion_GAN(roi_outputs, fake_roi)
d_ROI_A_loss = (real_loss + fake_loss) / 2
d_ROI_A_loss.backward()
optimizer_D_A_ROI.step()
# --------------------------
# Train Discriminator ROI B
# --------------------------
optimizer_D_B_ROI.zero_grad()
roi_outputs = D_ROI_B(real_B, condition_B, bboxes_B)
real_loss = criterion_GAN(roi_outputs, valid_roi)
roi_outputs = D_ROI_B(fake_B.detach(), condition_A, bboxes_A)
fake_loss = criterion_GAN(roi_outputs, fake_roi)
d_ROI_B_loss = (real_loss + fake_loss) / 2
d_ROI_B_loss.backward()
optimizer_D_B_ROI.step()
# --------------
# Log Progress
# --------------
# Print log
utils.write_flush(
'\r[Epoch %d/%d] [Batch %d/%d] [D loss: %f] [D_ROI_A loss: %f] [D_ROI_B loss: %f] [G loss: %f, adv: %f, cycle: %f, identity: %f]'
% (epoch, config.nb_epochs, i, config.steps_per_epoch, loss_D.item(), d_ROI_A_loss.item(), d_ROI_B_loss.item(), loss_G.item(), loss_GAN.item(), loss_cycle.item(), loss_identity.item()))
batches_done = epoch * config.steps_per_epoch + i
if batches_done % 100 == 0:
utils.sample_images(args.job_number, batches_done, G_AB, G_BA, hes_images_te, hes_bboxes_te, ihc_images_te, ihc_bboxes_te, device)
# Update learning rates
lr_scheduler_G.step()
lr_scheduler_D_A.step()
lr_scheduler_D_B.step()
lr_scheduler_D_A_ROI.step()
lr_scheduler_D_B_ROI.step()
if epoch % 5 == 0:
# Save model checkpoints
torch.save(G_AB.state_dict(), 'saved_models/%s/G_AB_%d.pth' % (args.job_number, epoch))
torch.save(G_BA.state_dict(), 'saved_models/%s/G_BA_%d.pth' % (args.job_number, epoch))
torch.save(D_A.state_dict(), 'saved_models/%s/D_A_%d.pth' % (args.job_number, epoch))
torch.save(D_B.state_dict(), 'saved_models/%s/D_B_%d.pth' % (args.job_number, epoch))
torch.save(D_ROI_A.state_dict(), 'saved_models/%s/D_A_ROI_%d.pth' % (args.job_number, epoch))
torch.save(D_ROI_B.state_dict(), 'saved_models/%s/D_B_ROI_%d.pth' % (args.job_number, epoch))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Region-guided CycleGAN for stain transfer on whole slide images')
parser.add_argument('job_number', type=int)
parser.add_argument('config', type=str)
args = parser.parse_args()
utils.write_flush(str(args))
with open(args.config, 'r') as fp:
cfg = yaml.safe_load(fp)
config = collections.namedtuple('Config', cfg.keys())(*cfg.values())
main(args, config)
| 10,158 | 36.487085 | 201 | py |
miccai2022-roigan | miccai2022-roigan-main/src/utils.py | import os
import sys
import h5py
import random
import numpy as np
import pandas as pd
import torch
from torch.autograd import Variable
from torchvision.utils import save_image, make_grid
from torch.utils.data import DataLoader
def write_flush(*text_args, stream=sys.stdout):
stream.write(', '.join(map(str, text_args)) + '\n')
stream.flush()
return
def load_data(data_dir, library_dir):
patches_dir = os.path.join(data_dir, 'patches/')
patches_files = os.listdir(patches_dir)
images = {file_name : h5py.File(os.path.join(patches_dir, file_name))['x'][()]
for file_name in sorted(patches_files)}
dfs = {file_name : pd.read_csv(os.path.join(library_dir, file_name.split('.')[0] + '.csv'), index_col=0)
for file_name in patches_files}
imgs = np.vstack([images[key] for key in patches_files])
dfs_list = []
for key in patches_files:
nb_tiles = images[key].shape[0]
df = dfs[key]
dfs_list.extend([df[df.tile == tile] for tile in range(nb_tiles)])
assert imgs.shape[0] == len(dfs_list)
return imgs, dfs_list
def draw_conditions(bboxes, dim):
condition = torch.zeros((1, 2, dim, dim))
noise = torch.zeros((1, 2, dim, dim))
for i, bbox in enumerate(bboxes):
xmin, ymin, xmax, ymax, cls = map(int, bbox)
ymin = max(0, ymin + 5)
xmin = max(0, xmin + 5)
ymax = min(dim, ymax - 5)
xmax = min(dim, xmax - 5)
condition[0, cls, ymin:ymax, xmin:xmax] = 1
z = torch.randn(1, 1, ymax-ymin, xmax-xmin)
noise[0, cls, ymin:ymax, xmin:xmax] = z
return condition, noise
def sample_bboxes(df_bboxes, nb_samples):
"""
For a purely negative tile take 3/4 * nb_samples
For a purely positive tile take 5/4 * nb_samples
For a mixed tile, take 1/4 * nb_samples positives and nb_samples positives
e.g. For nb_samples = 8:
Take 6 pos if positive
Take 10 neg if negative
Take 2 pos, 8 neg if mixed
This guarantees 64 rois are taken, and, because positive and negative tiles
are balanced, the roi classes are roughly balanced also.
"""
df_neg = df_bboxes[df_bboxes['class'] == 0]
df_pos = df_bboxes[df_bboxes['class'] == 1]
if df_pos.shape[0] == 0: # purely negative tile
df_sample = df_neg.sample(3 * (nb_samples // 4), replace=True)
elif df_neg.shape[0] == 0: # purely positive tile
df_sample = df_pos.sample(5 * (nb_samples // 4), replace=True)
else: # mixed tile
df_sample = pd.concat([df_pos.sample(nb_samples, replace=True),
df_neg.sample(nb_samples // 4, replace=True)])
return df_sample[['xmin', 'ymin', 'xmax', 'ymax', 'class']].values
def data_augmentation(x_batch, bbox_batch, img_dim):
if np.random.randn() > 0.5:
x_batch = x_batch.flip(dims=(3,))
left = bbox_batch[:, 1].copy()
right = bbox_batch[:, 3].copy()
bbox_batch[:, 1] = img_dim - right
bbox_batch[:, 3] = img_dim - left
if np.random.randn() > 0.5:
x_batch = x_batch.flip(dims=(2,))
top = bbox_batch[:, 2].copy()
bottom = bbox_batch[:, 4].copy()
bbox_batch[:, 2] = img_dim - bottom
bbox_batch[:, 4] = img_dim - top
return x_batch, bbox_batch
def data_generator(imgs, bboxes, nb_batch, nb_rois=64):
idx_non_empty = [idx for idx, df in enumerate(bboxes) if not df.empty]
idx_pos = [idx for idx in idx_non_empty if 1 in bboxes[idx]['class'].values]
idx_neg = [idx for idx in idx_non_empty if not 1 in bboxes[idx]['class'].values]
img_dim = imgs.shape[2]
while True:
idx_pos_batch = np.random.choice(idx_pos, size=nb_batch // 2)
idx_neg_batch = np.random.choice(idx_neg, size=nb_batch // 2)
x_batch = np.vstack([imgs[idx_pos_batch], imgs[idx_neg_batch]])
x_batch = torch.Tensor(np.moveaxis(x_batch, 3, 1) / 127.5 - 1)
nb_samples = nb_rois // nb_batch
df_bbox_batch = [bboxes[i] for i in list(idx_pos_batch) + list(idx_neg_batch)]
bbox_data = [sample_bboxes(df_bbox, nb_samples) for df_bbox in df_bbox_batch]
bbox_batch = np.vstack([np.hstack([i * np.ones((bboxes.shape[0], 1)), bboxes])
for i, bboxes in enumerate(bbox_data)])
x_batch, bbox_batch = data_augmentation(x_batch, bbox_batch, img_dim)
condition_batch = []
for i in range(nb_batch):
rois = bbox_batch[bbox_batch[:, 0]==i]
condition, noise = draw_conditions(rois[:, 1:], img_dim)
condition_batch.append(condition)
condition_batch = torch.cat(condition_batch, axis=0)
yield torch.Tensor(x_batch), condition_batch, torch.Tensor(bbox_batch)
"""
N.B. There is generally on a few hundred samples in the val/test data.
Hence, drawing 25 samples leads to duplicates with high probability
(see the birthday paradox). Furthermore, the duplicates will always
be consecutive in the batch, as the indices are sorted in the data_generator
function.
"""
def sample_images(output_dir, batches_done, G_AB, G_BA, hes_images_te, hes_bboxes_te, ihc_images_te, ihc_bboxes_te, device):
"""Saves a generated sample from the test set"""
# imgs = next(iter(val_dataloader))
gen_A = data_generator(hes_images_te, hes_bboxes_te, nb_batch=25)
gen_B = data_generator(ihc_images_te, ihc_bboxes_te, nb_batch=25)
real_A = next(gen_A)[0].to(device)
real_B = next(gen_B)[0].to(device)
G_AB.eval()
G_BA.eval()
fake_B = G_AB(real_A)
fake_A = G_BA(real_B)
# Arrange images along x-axis
real_A = make_grid(real_A, nrow=5, normalize=True)
real_B = make_grid(real_B, nrow=5, normalize=True)
fake_A = make_grid(fake_A, nrow=5, normalize=True)
fake_B = make_grid(fake_B, nrow=5, normalize=True)
# Arange images along y-axis
image_grid = torch.cat((real_A, fake_B, real_B, fake_A), 1)
save_image(image_grid, "images/%s/%s.png" % (output_dir, batches_done), normalize=False)
class ReplayBuffer:
def __init__(self, max_size=50):
assert max_size > 0, "Empty buffer or trying to create a black hole. Be careful."
self.max_size = max_size
self.data = []
def push_and_pop(self, data):
to_return = []
for element in data.data:
element = torch.unsqueeze(element, 0)
if len(self.data) < self.max_size:
self.data.append(element)
to_return.append(element)
else:
if random.uniform(0, 1) > 0.5:
i = random.randint(0, self.max_size - 1)
to_return.append(self.data[i].clone())
self.data[i] = element
else:
to_return.append(element)
return Variable(torch.cat(to_return))
class LambdaLR:
def __init__(self, n_epochs, offset, decay_start_epoch):
assert (n_epochs - decay_start_epoch) > 0, "Decay must start before the training session ends!"
self.n_epochs = n_epochs
self.offset = offset
self.decay_start_epoch = decay_start_epoch
def step(self, epoch):
return 1.0 - max(0, epoch + self.offset - self.decay_start_epoch) / (self.n_epochs - self.decay_start_epoch)
| 7,345 | 32.543379 | 124 | py |
miccai2022-roigan | miccai2022-roigan-main/src/models.py | import torch
import torch.nn as nn
from torchvision.ops import RoIAlign
def weights_init_normal(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
torch.nn.init.normal_(m.weight.data, 0.0, 0.02)
if hasattr(m, 'bias') and m.bias is not None:
torch.nn.init.constant_(m.bias.data, 0.0)
elif classname.find('BatchNorm2d') != -1:
torch.nn.init.normal_(m.weight.data, 1.0, 0.02)
torch.nn.init.constant_(m.bias.data, 0.0)
class DiscriminatorROI(nn.Module):
def __init__(self, base_filters=64):
super(DiscriminatorROI, self).__init__()
def conv_block(in_filters, out_filters, normalise=True):
layers = [
nn.Conv2d(in_filters, out_filters, kernel_size=4, stride=2, padding=1, bias=False)]
if normalise:
layers.append(nn.BatchNorm2d(out_filters, momentum=0.8))
layers.append(nn.LeakyReLU(0.2, inplace=True))
return layers
self.conv_layers = nn.Sequential(
*conv_block(5, base_filters, normalise=False),
*conv_block(1 * base_filters, 2 * base_filters),
*conv_block(2 * base_filters, 4 * base_filters),
*conv_block(4 * base_filters, 8 * base_filters))
self.roi_pool = RoIAlign(output_size=(3, 3), spatial_scale=0.0625, sampling_ratio=-1)
self.classifier = nn.Sequential(
nn.Conv2d(8 * base_filters, 1, kernel_size=3, padding=0, bias=False))
self.apply(weights_init_normal)
def forward(self, inputs, condition, bboxes):
bbox_batch = bboxes[:, :-1]
x = torch.cat([inputs, condition], axis=1)
x = self.conv_layers(x)
pool = self.roi_pool(x, bbox_batch)
outputs = self.classifier(pool)
return outputs.squeeze()
class ResidualBlock(nn.Module):
def __init__(self, in_features):
super(ResidualBlock, self).__init__()
self.block = nn.Sequential(
nn.ReflectionPad2d(1),
nn.Conv2d(in_features, in_features, 3),
nn.InstanceNorm2d(in_features),
nn.ReLU(inplace=True),
nn.ReflectionPad2d(1),
nn.Conv2d(in_features, in_features, 3),
nn.InstanceNorm2d(in_features),
)
def forward(self, x):
return x + self.block(x)
class GeneratorResNet(nn.Module):
def __init__(self, input_shape, num_residual_blocks):
super(GeneratorResNet, self).__init__()
channels = input_shape[0]
# Initial convolution block
out_features = 64
model = [
nn.ReflectionPad2d(channels),
nn.Conv2d(channels, out_features, 7),
nn.InstanceNorm2d(out_features),
nn.ReLU(inplace=True),
]
in_features = out_features
# Downsampling
for _ in range(2):
out_features *= 2
model += [
nn.Conv2d(in_features, out_features, 3, stride=2, padding=1),
nn.InstanceNorm2d(out_features),
nn.ReLU(inplace=True),
]
in_features = out_features
# Residual blocks
for _ in range(num_residual_blocks):
model += [ResidualBlock(out_features)]
# Upsampling
for _ in range(2):
out_features //= 2
model += [
nn.Upsample(scale_factor=2),
nn.Conv2d(in_features, out_features, 3, stride=1, padding=1),
nn.InstanceNorm2d(out_features),
nn.ReLU(inplace=True),
]
in_features = out_features
# Output layer
model += [nn.ReflectionPad2d(channels), nn.Conv2d(out_features, channels, 7), nn.Tanh()]
self.model = nn.Sequential(*model)
self.apply(weights_init_normal)
def forward(self, x):
return self.model(x)
class Discriminator(nn.Module):
def __init__(self, input_shape):
super(Discriminator, self).__init__()
channels, height, width = input_shape
# Calculate output shape of image discriminator (PatchGAN)
self.output_shape = (1, height // 2 ** 5, width // 2 ** 5)
def discriminator_block(in_filters, out_filters, normalize=True):
"""Returns downsampling layers of each discriminator block"""
layers = [nn.Conv2d(in_filters, out_filters, 4, stride=2, padding=1)]
if normalize:
layers.append(nn.InstanceNorm2d(out_filters))
layers.append(nn.LeakyReLU(0.2, inplace=True))
return layers
self.model = nn.Sequential(
*discriminator_block(channels, 64, normalize=False),
*discriminator_block(64, 128),
*discriminator_block(128, 256),
*discriminator_block(256, 512),
*discriminator_block(512, 512),
nn.ZeroPad2d((1, 0, 1, 0)),
nn.Conv2d(512, 1, 4, padding=1)
)
self.apply(weights_init_normal)
def forward(self, img):
return self.model(img)
| 5,076 | 31.132911 | 99 | py |
hgp | hgp-main/hgp/core/kernels.py | # MIT License
# Copyright (c) 2021 Pashupati Hegde.
# Copyright (c) 2023 Magnus Ross.
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import functorch
import numpy as np
import torch
from torch import nn
from torch.distributions import Normal
from torch.nn import init
from hgp.misc.constraint_utils import invsoftplus, softplus
from ..misc.ham_utils import build_J
prior_weights = Normal(0.0, 1.0)
def sample_normal(shape, seed=None):
rng = np.random.RandomState() if seed is None else np.random.RandomState(seed)
return torch.tensor(rng.normal(size=shape).astype(np.float32))
class RBF(torch.nn.Module):
"""
Implements squared exponential kernel with kernel computation and weights and frquency sampling for Fourier features
"""
def __init__(self, D_in, D_out=None, dimwise=False, init_ls=2.0, init_var=0.5):
"""
@param D_in: Number of input dimensions
@param D_out: Number of output dimensions
@param dimwise: If True, different kernel parameters are given to output dimensions
"""
super(RBF, self).__init__()
self.D_in = D_in
self.D_out = D_in if D_out is None else D_out
self.dimwise = dimwise
lengthscales_shape = (self.D_out, self.D_in) if dimwise else (self.D_in,)
variance_shape = (self.D_out,) if dimwise else (1,)
self.unconstrained_lengthscales = nn.Parameter(
torch.ones(size=lengthscales_shape), requires_grad=True
)
self.unconstrained_variance = nn.Parameter(
torch.ones(size=variance_shape), requires_grad=True
)
self._initialize(init_ls, init_var)
def _initialize(self, init_ls, init_var):
init.constant_(
self.unconstrained_lengthscales, invsoftplus(torch.tensor(init_ls)).item()
)
init.constant_(
self.unconstrained_variance, invsoftplus(torch.tensor(init_var)).item()
)
@property
def lengthscales(self):
return softplus(self.unconstrained_lengthscales)
@lengthscales.setter
def lengthscales(self, value):
self.unconstrained_lengthscales = nn.Parameter(
invsoftplus(value), requires_grad=True
)
@property
def variance(self):
return softplus(self.unconstrained_variance)
@variance.setter
def variance(self, value):
self.unconstrained_variance = nn.Parameter(
invsoftplus(value), requires_grad=True
)
def square_dist_dimwise(self, X, X2=None):
"""
Compues squared euclidean distance (scaled) for dimwise kernel setting
@param X: Input 1 (N,D_in)
@param X2: Input 2 (M,D_in)
@return: Tensor (D_out, N,M)
"""
X = X.unsqueeze(0) / self.lengthscales.unsqueeze(1) # (D_out,N,D_in)
Xs = torch.sum(torch.pow(X, 2), dim=2) # (D_out,N)
if X2 is None:
return (
-2 * torch.einsum("dnk, dmk -> dnm", X, X)
+ Xs.unsqueeze(-1)
+ Xs.unsqueeze(1)
) # (D_out,N,N)
else:
X2 = X2.unsqueeze(0) / self.lengthscales.unsqueeze(1) # (D_out,M,D_in)
X2s = torch.sum(torch.pow(X2, 2), dim=2) # (D_out,N)
return (
-2 * torch.einsum("dnk, dmk -> dnm", X, X2)
+ Xs.unsqueeze(-1)
+ X2s.unsqueeze(1)
) # (D_out,N,M)
def square_dist(self, X, X2=None):
"""
Compues squared euclidean distance (scaled) for non dimwise kernel setting
@param X: Input 1 (N,D_in)
@param X2: Input 2 (M,D_in)
@return: Tensor (N,M)
"""
X = X / self.lengthscales # (N,D_in)
Xs = torch.sum(torch.pow(X, 2), dim=1) # (N,)
if X2 is None:
return (
-2 * torch.matmul(X, X.t())
+ torch.reshape(Xs, (-1, 1))
+ torch.reshape(Xs, (1, -1))
) # (N,1)
else:
X2 = X2 / self.lengthscales # (M,D_in)
X2s = torch.sum(torch.pow(X2, 2), dim=1) # (M,)
return (
-2 * torch.matmul(X, X2.t())
+ torch.reshape(Xs, (-1, 1))
+ torch.reshape(X2s, (1, -1))
) # (N,M)
def K(self, X, X2=None):
"""
Computes K(\X, \X_2)
@param X: Input 1 (N,D_in)
@param X2: Input 2 (M,D_in)
@return: Tensor (D,N,M) if dimwise else (N,M)
"""
if self.dimwise:
sq_dist = torch.exp(-0.5 * self.square_dist_dimwise(X, X2)) # (D_out,N,M)
return self.variance[:, None, None] * sq_dist # (D_out,N,M)
else:
sq_dist = torch.exp(-0.5 * self.square_dist(X, X2) / 2) # (N,M)
return self.variance * sq_dist # (N,M)
def sample_freq(self, S, seed=None):
"""
Computes random samples from the spectral density for Sqaured exponential kernel
@param S: Number of features
@param seed: random seed
@return: Tensor a random sample from standard Normal (D_in, S, D_out) if dimwise else (D_in, S)
"""
omega_shape = (self.D_in, S, self.D_out) if self.dimwise else (self.D_in, S)
omega = sample_normal(omega_shape, seed) # (D_in, S, D_out) or (D_in, S)
lengthscales = (
self.lengthscales.T.unsqueeze(1)
if self.dimwise
else self.lengthscales.unsqueeze(1)
) # (D_in,1,D_out) or (D_in,1)
return omega / lengthscales # (D_in, S, D_out) or (D_in, S)
class DerivativeRBF(RBF):
"""
Implements squared exponential kernel with kernel computation and weights and frquency sampling for Fourier features.
Additionally implements gradients and hessians of kernels, only applies for single output.
"""
def __init__(self, D_in, init_ls=2.0, init_var=0.5):
assert D_in % 2 == 0, "D_in must be even."
super(DerivativeRBF, self).__init__(
D_in, D_out=1, dimwise=False, init_ls=init_ls, init_var=init_var
)
self.J = build_J(D_in)
def single_k(self, xi, yi):
"""Kernel at a single point"""
xi = xi / self.lengthscales
yi = yi / self.lengthscales
return self.variance[0] * torch.exp(-0.5 * torch.sum((xi - yi) ** 2 / 2))
def grad_single_k(self, xi, yi, use_J=False):
"""Grad of kernel at a single point"""
if use_J:
J = self.J
else:
J = torch.eye(self.D_in)
return J @ functorch.grad(self.single_k, argnums=0)(xi, yi)
def grad_K(self, X, X2=None, use_J=False):
"""Grad of kernel at a set of points"""
N1D = X.shape[0] * X.shape[1]
N2 = X.shape
if X2 is not None:
N2 = X2.shape[0]
if X2 is None:
X2 = X
return (
functorch.vmap(
lambda ti: functorch.vmap(
lambda tpi: self.grad_single_k(tpi, ti, use_J=use_J)
)(X)
)(X2)
.permute(2, 1, 0)
.reshape(N1D, N2)
)
def hess_single_k(self, x, xp, use_J=False):
"""Hessian of kernel at a single point"""
if use_J:
J = self.J
else:
J = torch.eye(self.D_in)
return -J @ functorch.hessian(self.single_k)(x, xp) @ J.T
def hess_K(self, X, X2=None, use_J=False):
"""Hessian of kernel at a set of points"""
if X2 is not None:
raise NotImplementedError
ND = X.shape[0] * X.shape[1]
return (
functorch.vmap(
lambda ti: functorch.vmap(
lambda tpi: self.hess_single_k(ti, tpi, use_J=use_J)
)(X)
)(X)
.permute(2, 0, 3, 1)
.reshape(ND, ND)
)
| 8,840 | 34.939024 | 121 | py |
hgp | hgp-main/hgp/core/flow.py | # MIT License
# Copyright (c) 2021 Pashupati Hegde.
# Copyright (c) 2023 Magnus Ross.
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import torch
import torch.nn as nn
from torchdiffeq import odeint as odeint_nonadjoint
from torchdiffeq import odeint_adjoint
class ODEfunc(nn.Module):
def __init__(self, diffeq):
"""
Defines the ODE function:
mainly calls layer.build_cache() method to fix the draws from random variables.
Modified from https://github.com/rtqichen/ffjord/
@param diffeq: Layer of GPODE/npODE/neuralODE
"""
super(ODEfunc, self).__init__()
self.diffeq = diffeq
self.register_buffer("_num_evals", torch.tensor(0.0))
def before_odeint(self, rebuild_cache):
self._num_evals.fill_(0)
if rebuild_cache:
self.diffeq.build_cache()
def num_evals(self):
return self._num_evals.item()
def forward(self, t, states):
self._num_evals += 1
dy = self.diffeq(t, states)
return dy
class Flow(nn.Module):
def __init__(
self, diffeq, solver="dopri5", atol=1e-6, rtol=1e-6, use_adjoint=False
):
"""
Defines an ODE flow:
mainly defines forward() method for forward numerical integration of an ODEfunc object
See https://github.com/rtqichen/torchdiffeq for more information on numerical ODE solvers.
@param diffeq: Layer of GPODE/npODE/neuralODE
@param solver: Solver to be used for ODE numerical integration
@param atol: Absolute tolerence for the solver
@param rtol: Relative tolerence for the solver
@param use_adjoint: Use adjoint method for computing loss gradients, calls odeint_adjoint fro torchdiffeq
"""
super(Flow, self).__init__()
self.odefunc = ODEfunc(diffeq)
self.solver = solver
self.atol = atol
self.rtol = rtol
self.use_adjoint = use_adjoint
def forward(self, x0, ts, return_energy=False):
"""
Numerical solution of an IVP, and optionally compute divergence term for density transformation computation
@param x0: Initial state (N,D) tensor x(t_0).
@param ts: Time sequence of length T, first value is considered as t_0
@param return_divergence: Bool flag deciding the divergence computation
@return: xs: (N,T,D) tensor
"""
odeint = odeint_adjoint if self.use_adjoint else odeint_nonadjoint
self.odefunc.before_odeint(rebuild_cache=True)
if return_energy:
xs = odeint(
self.odefunc, x0, ts, atol=self.atol, rtol=self.rtol, method=self.solver
)
energy = self.odefunc.diffeq.hamiltonian(
ts, xs.reshape(-1, xs.shape[-1])
).reshape(xs.shape[0], xs.shape[1], 1)
return xs.permute(1, 0, 2), energy.permute(1, 0, 2) # (N,T,D), # (N,T,1)
else:
xs = odeint(
self.odefunc, x0, ts, atol=self.atol, rtol=self.rtol, method=self.solver
)
return xs.permute(1, 0, 2) # (N,T,D)
def num_evals(self):
return self.odefunc.num_evals()
def kl(self):
"""
Calls KL() computation from the diffeq layer
"""
return self.odefunc.diffeq.kl().sum()
def log_prior(self):
"""
Calls log_prior() computation from the diffeq layer
"""
return self.odefunc.diffeq.log_prior().sum()
| 4,501 | 36.831933 | 115 | py |
hgp | hgp-main/hgp/core/constraint_likelihoods.py | import torch
import torch.nn as nn
from torch import distributions
from torch.nn import init
from hgp.misc.constraint_utils import invsoftplus, softplus
class Gaussian(nn.Module):
"""
Gaussian likelihood with an optionally trainable scale parameter
"""
def __init__(
self, d: int = 1, scale: float = 1.0, requires_grad: bool = True
) -> None:
super(Gaussian, self).__init__()
self.unconstrained_scale = torch.nn.Parameter(
torch.ones(d), requires_grad=requires_grad
)
self._initialize(scale)
def _initialize(self, x: float) -> None:
init.constant_(self.unconstrained_scale, invsoftplus(torch.tensor(x)).item())
@property
def scale(self):
return softplus(self.unconstrained_scale)
def distribution(self, loc: torch.Tensor) -> torch.distributions.Distribution:
return distributions.Normal(loc=loc, scale=self.scale)
@property
def variance(self):
return self.distribution(loc=torch.zeros_like(self.scale)).variance
def log_prob(self, f: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
log_prob = self.distribution(loc=f).log_prob(y)
assert log_prob.shape == f.shape
return log_prob
class Laplace(nn.Module):
"""
Laplace likelihood with an optionally trainable scale parameter
"""
def __init__(
self, d: int = 1, scale: float = 1.0, requires_grad: bool = True
) -> None:
super(Laplace, self).__init__()
self.unconstrained_scale = torch.nn.Parameter(
torch.ones(d), requires_grad=requires_grad
)
self._initialize(scale)
def _initialize(self, x: float) -> None:
init.constant_(self.unconstrained_scale, invsoftplus(torch.tensor(x)).item())
@property
def scale(self):
return softplus(self.unconstrained_scale)
def distribution(self, loc):
return distributions.Laplace(loc=loc, scale=self.scale)
@property
def variance(self):
return self.distribution(loc=torch.zeros_like(self.scale)).variance
def log_prob(self, f: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
log_prob = self.distribution(loc=f).log_prob(y)
assert log_prob.shape == f.shape
return log_prob
| 2,282 | 29.44 | 85 | py |
hgp | hgp-main/hgp/core/dsvgp.py | # MIT License
# Copyright (c) 2021 Pashupati Hegde.
# Copyright (c) 2023 Magnus Ross.
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import functorch
import numpy as np
import torch
from hgp.core.kernels import RBF, DerivativeRBF
from hgp.misc import transforms
from hgp.misc.ham_utils import build_J
from hgp.misc.param import Param
from hgp.misc.settings import settings
torch.use_deterministic_algorithms(False)
jitter = 1e-5
def sample_normal(shape, seed=None):
# sample from standard Normal with a given shape
rng = np.random.RandomState() if seed is None else np.random.RandomState(seed)
return torch.tensor(rng.normal(size=shape).astype(np.float32)).to(settings.device)
def sample_uniform(shape, seed=None):
# random Uniform sample of a given shape
rng = np.random.RandomState() if seed is None else np.random.RandomState(seed)
return torch.tensor(rng.uniform(low=0.0, high=1.0, size=shape).astype(np.float32)).to(settings.device)
def compute_divergence(dx, y):
# stolen from FFJORD : https://github.com/rtqichen/ffjord/blob/master/lib/layers/odefunc.py
sum_diag = 0.0
for i in range(y.shape[1]):
sum_diag += (
torch.autograd.grad(dx[:, i].sum(), y, create_graph=True)[0]
.contiguous()[:, i]
.contiguous()
)
return sum_diag.contiguous()
class DSVGP_Layer(torch.nn.Module):
"""
A layer class implementing decoupled sampling of SVGP posterior
@InProceedings{pmlr-v119-wilson20a,
title = {Efficiently sampling functions from {G}aussian process posteriors},
author = {Wilson, James and Borovitskiy, Viacheslav and Terenin, Alexander and Mostowsky, Peter and Deisenroth, Marc},
booktitle = {Proceedings of the 37th International Conference on Machine Learning},
pages = {10292--10302},
year = {2020},
editor = {Hal Daumé III and Aarti Singh},
volume = {119},
series = {Proceedings of Machine Learning Research},
publisher = {PMLR},
pdf = {http://proceedings.mlr.press/v119/wilson20a/wilson20a.pdf}
}
"""
def __init__(self, D_in, D_out, M, S, q_diag=False, dimwise=True):
"""
@param D_in: Number of input dimensions
@param D_out: Number of output dimensions
@param M: Number of inducing points
@param S: Number of features to consider for Fourier feature maps
@param q_diag: Diagonal approximation for inducing posteior
@param dimwise: If True, different kernel parameters are given to output dimensions
"""
super(DSVGP_Layer, self).__init__()
self.kern = RBF(D_in, D_out)
self.q_diag = q_diag
self.dimwise = dimwise
self.D_out = D_out
self.D_in = D_in
self.M = M
self.S = S
self.inducing_loc = Param(
np.random.normal(size=(M, D_in)), name="Inducing locations"
) # (M,D_in)
self.Um = Param(
# np.random.normal(size=(M, D_out)) * 1e-1,
np.zeros((M, D_out)),
name="Inducing distirbution (mean)",
) # (M,D_out)
if self.q_diag:
self.Us_sqrt = Param(
np.ones(shape=(M, D_out)) * 1e-1, # (M,D_out)
transform=transforms.SoftPlus(),
name="Inducing distirbution (scale)",
)
else:
self.Us_sqrt = Param(
np.stack([np.eye(M)] * D_out) * 1e-1, # (D_out,M,M)
transform=transforms.LowerTriangular(M, D_out),
name="Inducing distirbution (scale)",
)
def sample_inducing(self):
"""
Generate a sample from the inducing posterior q(u) ~ N(m, S)
@return: inducing sample (M,D) tensor
"""
epsilon = sample_normal(shape=(self.M, self.D_out), seed=None) # (M, D_out)
if self.q_diag:
ZS = self.Us_sqrt() * epsilon # (M, D_out)
else:
ZS = torch.einsum("dnm, md->nd", self.Us_sqrt(), epsilon) # (M, D_out)
u_sample = ZS + self.Um() # (M, D_out)
return u_sample # (M, D_out)
def build_cache(self):
"""
Builds a cache of computations that uniquely define a sample from posteiror process
1. Generate and fix parameters of Fourier features
2. Generate and fix induing posterior sample
3. Intermediate computations based on the inducing sample for pathwise update
"""
# generate parameters required for the Fourier feature maps
self.rff_weights = sample_normal((self.S, self.D_out)) # (S,D_out)
self.rff_omega = self.kern.sample_freq(self.S) # (D_in,S) or (D_in,S,D_out)
phase_shape = (1, self.S, self.D_out) if self.dimwise else (1, self.S)
self.rff_phase = sample_uniform(phase_shape) * 2 * np.pi # (S,D_out)
# generate sample from the inducing posterior
inducing_val = self.sample_inducing() # (M,D)
# compute th term nu = k(Z,Z)^{-1}(u-f(Z)) in whitened form of inducing variables
# equation (13) from http://proceedings.mlr.press/v119/wilson20a/wilson20a.pdf
Ku = self.kern.K(self.inducing_loc()) # (M,M) or (D,M,M)
Lu = torch.linalg.cholesky(Ku + torch.eye(self.M) * jitter) # (M,M) or (D,M,M)
u_prior = self.rff_forward(self.inducing_loc()) # (M,D)
if not self.dimwise:
nu = torch.linalg.solve_triangular(Lu, u_prior, upper=False) # (M,D)
nu = torch.linalg.solve_triangular(
Lu.T, (inducing_val - nu), upper=True
) # (M,D)
else:
nu = torch.linalg.solve_triangular(
Lu, u_prior.T.unsqueeze(2), upper=False
) # (D,M,1)
nu = torch.linalg.solve_triangular(
Lu.permute(0, 2, 1), (inducing_val.T.unsqueeze(2) - nu), upper=True
) # (D,M,1)
self.nu = nu # (D,M)
def rff_forward(self, x):
"""
Calculates samples from the GP prior with random Fourier Features
@param x: input tensor (N,D)
@return: function values (N,D_out)
"""
# compute feature map
xo = torch.einsum(
"nd,dfk->nfk" if self.dimwise else "nd,df->nf", x, self.rff_omega
) # (N,S) or (N,S,D_in)
phi_ = torch.cos(xo + self.rff_phase) # (N,S) or (N,S,D_in)
phi = phi_ * torch.sqrt(self.kern.variance / self.S) # (N,S) or (N,S,D_in)
# compute function values
f = torch.einsum(
"nfk,fk->nk" if self.dimwise else "nf,fd->nd", phi, self.rff_weights
) # (N,D_out)
return f # (N,D_out)
def build_conditional(self, x, full_cov=False):
"""
Calculates conditional distribution q(f(x)) = N(m(x), Sigma(x))
where m(x) = k(x,Z)k(Z,Z)^{-1}u, k(x,x)
Sigma(x) = k(x,Z)k(Z,Z)^{-1}(S-K(Z,Z))k(Z,Z)^{-1}k(Z,x))
@param x: input tensor (N,D)
@param full_cov: if True, returns fulll Sigma(x) else returns only diagonal
@return: m(x), Sigma(x)
"""
Ku = self.kern.K(self.inducing_loc()) # (M,M) or (D,M,M)
Lu = torch.linalg.cholesky(Ku + torch.eye(self.M) * jitter) # (M,M) or (D,M,M)
Kuf = self.kern.K(self.inducing_loc(), x) # (M,N) or (D,M,N)
A = torch.linalg.solve_triangular(
Lu, Kuf, upper=False
) # (M,M)@(M,N) --> (M,N) or (D,M,M)@(D,M,N) --> (D,M,N)
Us_sqrt = (
self.Us_sqrt().T[:, :, None] if self.q_diag else self.Us_sqrt()
) # (D,M,1) or (D,M,M)
SK = (Us_sqrt @ Us_sqrt.permute(0, 2, 1)) - torch.eye(Ku.shape[1]).unsqueeze(
0
) # (D,M,M)
B = torch.einsum(
"dme, den->dmn" if self.dimwise else "dmi, in->dmn", SK, A
) # (D,M,N)
if full_cov:
delta_cov = torch.einsum(
"dme, dmn->den" if self.dimwise else "me, dmn->den", A, B
) # (D,M,N)
Kff = (
self.kern.K(x) if self.dimwise else self.kern.K(x).unsqueeze(0)
) # (1,N,N) or (D,N,N)
else:
delta_cov = ((A if self.dimwise else A.unsqueeze(0)) * B).sum(1) # (D,N)
if self.dimwise:
Kff = torch.diagonal(self.kern.K(x), dim1=1, dim2=2) # (N,)
else:
Kff = torch.diagonal(self.kern.K(x), dim1=0, dim2=1) # (D,N)
var = Kff + delta_cov
mean = torch.einsum(
"dmn, md->nd" if self.dimwise else "mn, md->nd", A, self.Um()
)
return mean, var.T # (N,D) , (N,D) or (N,N,D)
def forward(self, t, x):
"""
Compute sample from the SVGP posterior using decoupeld sampling approach
Involves two steps:
1. Generate sample from the prior :: rff_forward
2. Compute pathwise updates using samples from inducing posterior :: build_cache
@param t: time value, usually None as we define time-invariant ODEs
@param x: input tensor in (N,D)
@return: f(x) where f is a sample from GP posterior
"""
# generate a prior sample using rff
f_prior = self.rff_forward(x) # (N,D))
# compute pathwise updates
if not self.dimwise:
Kuf = self.kern.K(self.inducing_loc(), x) # (M,N)
f_update = torch.einsum("md, mn -> nd", self.nu, Kuf) # (N,D)
else:
Kuf = self.kern.K(self.inducing_loc(), x) # (D,M,N)
f_update = torch.einsum("dm, dmn -> nd", self.nu.squeeze(2), Kuf) # (N,D)
# sample from the GP posterior
dx = f_prior + f_update # (N,D)
return dx # (N,D)
def kl(self):
"""
Computes KL divergence for inducing variables in whitened form
Calculated as KL between multivariate Gaussians q(u) ~ N(m,S) and p(U) ~ N(0,I)
@return: KL divergence value tensor
"""
alpha = self.Um() # (M,D)
if self.q_diag:
Lq = Lq_diag = self.Us_sqrt() # (M,D)
else:
Lq = torch.tril(self.Us_sqrt()) # (D,M,M)
Lq_diag = torch.diagonal(Lq, dim1=1, dim2=2).t() # (M,D)
# compute ahalanobis term
mahalanobis = torch.pow(alpha, 2).sum(dim=0, keepdim=True) # (1,D)
# log-determinant of the covariance of q(u)
logdet_qcov = torch.log(torch.pow(Lq_diag, 2)).sum(dim=0, keepdim=True) # (1,D)
# trace term
if self.q_diag:
trace = torch.pow(Lq, 2).sum(dim=0, keepdim=True) # (M,D) --> (1,D)
else:
trace = torch.pow(Lq, 2).sum(dim=(1, 2)).unsqueeze(0) # (D,M,M) --> (1,D)
logdet_pcov = 0.0
constant = -torch.tensor(self.M)
twoKL = logdet_pcov - logdet_qcov + mahalanobis + trace + constant
kl = 0.5 * twoKL.sum()
return kl
class Hamiltonian_DSVGP_Layer(DSVGP_Layer):
def __init__(self, D_in, M, S, q_diag=False):
"""
@param D_in: Number of input dimensions
@param M: Number of inducing points
@param S: Number of features to consider for Fourier feature maps
@param q_diag: Diagonal approximation for inducing posteior
"""
super(Hamiltonian_DSVGP_Layer, self).__init__(
D_in, 1, M, S, q_diag=q_diag, dimwise=False
)
self.kern = DerivativeRBF(D_in)
self.J = build_J(D_in)
def hamiltonian(self, t, x):
H = super(Hamiltonian_DSVGP_Layer, self).forward(t, x)
return H[:, 0]
def forward(self, t, x):
dHdx = functorch.grad(lambda xi: self.hamiltonian(t, xi).sum())(x)
return dHdx @ self.J.T
| 12,813 | 39.169279 | 138 | py |
hgp | hgp-main/hgp/core/nn.py | import functorch
import torch
from torch import nn
from hgp.misc.ham_utils import build_J
def Linear(chin, chout, zero_bias=False, orthogonal_init=False):
linear = nn.Linear(chin, chout)
if zero_bias:
torch.nn.init.zeros_(linear.bias)
if orthogonal_init:
torch.nn.init.orthogonal_(linear.weight, gain=0.5)
return linear
def FCtanh(chin, chout, zero_bias=False, orthogonal_init=False):
return nn.Sequential(Linear(chin, chout, zero_bias, orthogonal_init), nn.Tanh())
class NNModel(nn.Module):
def __init__(self, D_in, D_out, N_nodes, N_layers):
super(NNModel, self).__init__()
self.D_out = D_out
self.D_in = D_in
chs = [self.D_in] + N_layers * [N_nodes]
self.net = nn.Sequential(
*[
FCtanh(chs[i], chs[i + 1], zero_bias=False, orthogonal_init=False)
for i in range(N_layers)
],
Linear(chs[-1], D_out, zero_bias=False, orthogonal_init=False)
)
def forward(self, t, x):
return self.net(x)
def build_cache(self):
pass
class HamiltonianNNModel(NNModel):
"""
Implements a NN model with Hamiltonian restriction.
"""
def __init__(self, D_in, N_nodes, N_layers):
super(HamiltonianNNModel, self).__init__(D_in, 1, N_nodes, N_layers)
self.J = build_J(D_in)
def hamiltonian(self, t, x):
H = super(HamiltonianNNModel, self).forward(t, x)
return H[:, 0]
def forward(self, t, x):
dHdx = functorch.grad(lambda xi: self.hamiltonian(t, xi).sum())(x)
f = dHdx @ self.J.T
return f
| 1,637 | 25.852459 | 84 | py |
hgp | hgp-main/hgp/core/states.py | # MIT License
# Copyright (c) 2021 Pashupati Hegde.
# Copyright (c) 2023 Magnus Ross.
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import numpy as np
import torch
from torch import nn
from torch.distributions import MultivariateNormal
from hgp.misc import settings, transforms
from hgp.misc.param import Param
initial_state_scale = 1e-2
jitter = 1e-5
def sample_normal(shape, seed=None):
rng = np.random.RandomState() if seed is None else np.random.RandomState(seed)
return torch.tensor(rng.normal(size=shape).astype(np.float32))
class StateInitialDistribution(nn.Module):
"""
Base class defining Initial state posterior q(x_0)
"""
def __init__(self, dim_n, dim_d):
super(StateInitialDistribution, self).__init__()
self.dim_n = dim_n
self.dim_d = dim_d
def _initialize(self, x0, xs):
raise NotImplementedError
def sample(self, num_samples, seed=None):
raise NotImplementedError
def log_prob(self, x):
raise NotImplementedError
def kl(self):
raise NotImplementedError
class DeltaInitialDistrubution(StateInitialDistribution):
def __init__(self, dim_n, dim_d):
super(DeltaInitialDistrubution, self).__init__(dim_n, dim_d)
self.param_mean = Param(
np.random.normal(size=(dim_n, dim_d)) * initial_state_scale,
name="Initiali state distirbution (mean)",
)
def _initialize(self, x, xs):
self.param_mean().data = x
def mean(self):
return self.param_mean()
def sample(self, num_samples=1, seed=None):
return self.param_mean().unsqueeze(0).repeat(num_samples, 1, 1)
class StateInitialVariationalGaussian(StateInitialDistribution):
"""
Full rank multivariate Gaussian approximation for the Initial state posterior q(x_0) = N(m, S)
where x is (N,D), m is (N,D), S is (N,D,D)
N being the number of sequences, D being the number of state dimensions.
"""
def __init__(self, dim_n, dim_d):
"""
@param dim_n: N number of sequences
@param dim_d: D state dimensionality
"""
super(StateInitialVariationalGaussian, self).__init__(dim_n, dim_d)
self.param_mean = Param(
np.random.normal(size=(dim_n, dim_d)) * initial_state_scale,
name="Initiali state distirbution (mean)",
)
self.param_lchol = Param(
np.stack([np.eye(dim_d)] * dim_n) * initial_state_scale, # NxDxD
transform=transforms.LowerTriangular(dim_d, dim_n),
name="Initial state distirbution (scale)",
)
def _initialize(self, x, xs):
self.param_mean().data = x
def mean(self):
return self.param_mean()
def lchol(self):
return self.param_lchol()
def distirbution(self):
x0_mean = self.mean()
x0_lchol = self.lchol()
x0_qcov = torch.einsum("nij, nkj -> nik", x0_lchol, x0_lchol)
dist = MultivariateNormal(
loc=x0_mean,
covariance_matrix=x0_qcov
+ torch.eye(x0_qcov.shape[-1]).unsqueeze(0) * jitter,
)
return dist
def sample_numpy(self, num_samples=1, seed=None):
x0_mean = self.mean().unsqueeze(0)
x0_lchol = self.lchol()
epsilon = sample_normal(
shape=(num_samples, self.dim_n, self.dim_d), seed=seed
) # (S,N,D)
zs = torch.einsum("nij, snj -> sni", x0_lchol, epsilon)
return zs + x0_mean # (S,N,D)
def sample(self, num_samples=1, seed=None):
s = self.distirbution().rsample((num_samples,))
return s
def log_prob(self, x):
return self.distirbution().log_prob(x)
def kl(self):
alpha = self.mean() # NxD
Lq = torch.tril(self.lchol()) # force lower triangle # NxDxD
Lq_diag = torch.diagonal(Lq, dim1=1, dim2=2) # NxD
# Mahalanobis term: μqᵀ Σp⁻¹ μq
mahalanobis = torch.pow(alpha, 2).sum(dim=1, keepdim=True) # Nx1
# Log-determinant of the covariance of q(x):
logdet_qcov = torch.log(torch.pow(Lq_diag, 2)).sum(dim=1, keepdim=True) # Nx1
# Trace term: tr(Σp⁻¹ Σq)
trace = torch.pow(Lq, 2).sum(dim=(1, 2)).unsqueeze(1) # NxDxD --> Nx1
logdet_pcov = 0.0
constant = -torch.tensor(self.dim_d)
twoKL = logdet_pcov - logdet_qcov + mahalanobis + trace + constant
kl = 0.5 * twoKL.sum()
return kl # Nx1
class StateSequenceVariationalDistribution(nn.Module):
"""
Base class defining state sequence posterior
"""
def __init__(self, dim_n, dim_t, dim_d):
super(StateSequenceVariationalDistribution, self).__init__()
self.dim_n = dim_n
self.dim_t = dim_t
self.dim_d = dim_d
def _add_intial_state(self):
raise NotImplementedError
def _initialize(self, x0, xs):
raise NotImplementedError
def sample(self, num_samples, **kwargs):
raise NotImplementedError
def log_prob(self, x):
raise NotImplementedError
def entropy(self):
raise NotImplementedError
class DeltaStateSequenceDistribution(StateSequenceVariationalDistribution):
def __init__(self, dim_n, dim_t, dim_d):
"""
@param dim_n: N number of sequences
@param dim_n: T sequence length
@param dim_d: D state dimensionality
"""
super(DeltaStateSequenceDistribution, self).__init__(dim_n, dim_t, dim_d)
self.param_mean = Param(
np.random.normal(size=(self.dim_n, self.dim_t, self.dim_d))
* initial_state_scale,
name="State distribution (mean)",
) # (N,T,D)
self._add_initial_state()
def _add_initial_state(self):
self.x0 = DeltaInitialDistrubution(self.dim_n, self.dim_d)
def _initialize(self, x0, xs):
self.x0._initialize(x0, None)
self.param_mean().data = xs
def mean(self):
return self.param_mean()
def sample(self, num_samples=1, seed=None):
return torch.cat(
[
self.x0.sample(num_samples).unsqueeze(2),
self.param_mean().unsqueeze(0).repeat(num_samples, 1, 1, 1),
],
2,
)
class StateSequenceVariationalFactorizedGaussian(StateSequenceVariationalDistribution):
"""
Full rank multivariate Gaussian approximation for the state sequence posterior q(x_s) = N(m, S)
where x_s is (N,T,D), m is (N,T,D), S is (N,T,D,D)
N is the number of sequences, T is the sequence length, D being the number of state dimensions.
"""
def __init__(self, dim_n, dim_t, dim_d):
"""
@param dim_n: N number of sequences
@param dim_n: T sequence length
@param dim_d: D state dimensionality
"""
super(StateSequenceVariationalFactorizedGaussian, self).__init__(
dim_n, dim_t, dim_d
)
self.param_mean = Param(
np.random.normal(size=(self.dim_n, self.dim_t, self.dim_d))
* initial_state_scale,
name="State distribution (mean)",
) # (N,T,D)
self.param_lchol = Param(
np.stack([np.stack([np.eye(self.dim_d)] * self.dim_t)] * self.dim_n)
* initial_state_scale,
# (N,T,D,D)
transform=transforms.StackedLowerTriangular(
self.dim_d, self.dim_n, self.dim_t
),
name="State distribution (scale)",
)
self._add_initial_state()
def _add_initial_state(self):
self.x0 = StateInitialVariationalGaussian(self.dim_n, self.dim_d)
def _initialize(self, x0, xs, xs_std=None):
self.x0._initialize(x0, None)
self.param_mean().data = xs
if xs_std is not None:
self.param_scale.optvar.data = self.param_lchol.transform.backward_tensor(
torch.diag_embed(xs_std)
).data
def mean(self):
return self.param_mean()
def lchol(self):
return self.param_lchol()
def distribution(self):
xs_mean = self.mean()
xs_lchol = self.lchol()
xs_qcov = torch.einsum("ntij, ntkj -> ntik", xs_lchol, xs_lchol)
xs_qcov = (
xs_qcov + torch.eye(xs_qcov.shape[-1]).unsqueeze(0).unsqueeze(0) * jitter
)
dist = MultivariateNormal(loc=xs_mean, covariance_matrix=xs_qcov)
return dist
def sample_numpy(self, num_samples=1, seed=None):
# append sample from the initial state distribution
epsilon = sample_normal(
shape=(num_samples, self.dim_n, self.dim_t, self.dim_d), seed=seed
) # (S,N,T,D)
zs = torch.einsum("ntij, sntj->snti", self.lchol(), epsilon)
return torch.cat(
[
self.x0.sample(num_samples, seed).unsqueeze(2),
zs + self.mean().unsqueeze(0),
],
2,
) # (S, N, T+1, D)
def sample(self, num_samples=1, seed=None):
return torch.cat(
[
self.x0.sample(num_samples).unsqueeze(2),
self.distribution().rsample((num_samples,)),
],
2,
) # (S, N, T+1, D)
def entropy(self):
return self.distribution().entropy()
def log_prob(self, x):
return self.distribution().log_prob(x)
| 10,358 | 31.990446 | 99 | py |
hgp | hgp-main/hgp/core/observation_likelihoods.py | # MIT License
# Copyright (c) 2021 Pashupati Hegde.
# Copyright (c) 2023 Magnus Ross.
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import numpy as np
import torch
import torch.nn as nn
from torch.nn import init
from hgp.misc.constraint_utils import invsoftplus, softplus
class Gaussian(nn.Module):
"""
Gaussian likelihood
"""
def __init__(self, ndim=1, init_val=0.01):
super(Gaussian, self).__init__()
self.unconstrained_variance = torch.nn.Parameter(
torch.ones(ndim), requires_grad=True
)
self._initialize(init_val)
def _initialize(self, x):
init.constant_(self.unconstrained_variance, invsoftplus(torch.tensor(x)).item())
@property
def variance(self):
return softplus(self.unconstrained_variance)
@variance.setter
def variance(self, value):
self.unconstrained_variance = nn.Parameter(
invsoftplus(value), requires_grad=True
)
def log_prob(self, F, Y):
return -0.5 * (
np.log(2.0 * np.pi)
+ torch.log(self.variance)
+ torch.pow(F - Y, 2) / self.variance
)
| 2,157 | 33.253968 | 88 | py |
hgp | hgp-main/hgp/models/sequence.py | # MIT License
# Copyright (c) 2021 Pashupati Hegde.
# Copyright (c) 2023 Magnus Ross.
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from hgp.misc.torch_utils import insert_zero_t0, compute_ts_dense
from torch import nn
import torch
def stack_segments(unstacked):
return unstacked.reshape(-1, unstacked.shape[-1])
def unstack_segments(stacked, unstacked_shape):
return stacked.reshape(unstacked_shape)
class BaseSequenceModel(nn.Module):
"""
Implements base class for model for learning unknown Hamiltonian system.
Model setup for observations on non-uniform grid or mini-batching over time can be derived from this class.
Defines following methods:
build_flow: given an initial state and time sequence, perform forward ODE integration
build_flow_and_divergence: performs coupled forward ODE integration for states and density change
build_lowerbound_terms: given observed states and time, builds individual terms for the lowerbound computation
build_inducing_kl: computes KL divergence between inducing prior and posterior.
forward: a wrapper for build_flow method
"""
def __init__(
self,
flow,
num_observations,
state_distribution,
observation_likelihood,
constraint_likelihood,
ts_dense_scale=2,
):
super(BaseSequenceModel, self).__init__()
self.flow = flow
self.num_observations = num_observations
self.state_distribution = state_distribution
self.observation_likelihood = observation_likelihood
self.constraint_likelihood = constraint_likelihood
self.ts_dense_scale = ts_dense_scale
def build_flow(self, x0, ts):
"""
Given an initial state and time sequence, perform forward ODE integration
Optionally, the time sequnce can be made dense based on self.ts_dense_scale prameter
@param x0: initial state tensor (N,D)
@param ts: time sequence tensor (T,)
@return: forward solution tensor (N,T,D)
"""
ts = compute_ts_dense(ts, self.ts_dense_scale)
ys = self.flow(x0, ts, return_energy=False)
return ys[:, :: self.ts_dense_scale - 1, :]
def build_lowerbound_terms(self, ys, ts, **kwargs):
raise NotImplementedError
def build_objective(self, ys, ts):
raise NotImplementedError
def build_inducing_kl(self):
"""
Computes KL divergence between inducing prior and posterior.
@return: inducing KL scaled by the number of observations
"""
return self.flow.kl() / self.num_observations
def forward(self, x0, ts):
"""
A wrapper for build_flow method
@param x0: initial state tensor (N,D)
@param ts: time sequence tensor (T,)
@return: forward solution tensor (N,T,D)
"""
return self.build_flow(x0, ts)
class NNSequenceModel(BaseSequenceModel):
"""
Implements sequence model for neural network derivative functions.
"""
def build_objective(self, ys, ts):
raise NotImplementedError
def build_inducing_kl(self):
raise NotImplementedError
def build_lowerbound_terms(self, ys_batched, ts, num_samples=1):
"""
Given oberved states and time, builds the individual terms for the lowerbound computation
@param ys: observed sequence tensor (N,T,D)
@param ts: observed time sequence (T,)
@return: nll, initial state KL
"""
assert num_samples == 1, ">1 sample not implemented for standard model."
xs = self.build_flow(ys_batched[:, 0, :], ts)
mse = self.observation_likelihood(xs, ys_batched)
# print(mse.shape)
return mse
class NNUniformShootingModel(BaseSequenceModel):
"""
Neural network model, with shooting.
"""
def __init__(
self,
flow,
num_observations,
state_distribution,
observation_likelihood,
constraint_likelihood,
shooting_time_factor=None,
ts_dense_scale=2,
alpha=100,
):
super(NNUniformShootingModel, self).__init__(
flow=flow,
num_observations=num_observations,
state_distribution=state_distribution,
observation_likelihood=observation_likelihood,
constraint_likelihood=constraint_likelihood,
ts_dense_scale=ts_dense_scale,
)
self.shooting_time_factor = shooting_time_factor
self.alpha = alpha
def build_objective(self, ys, ts):
loss, shooting_loss = self.build_lowerbound_terms(ys, ts)
return loss + shooting_loss
def build_inducing_kl(self):
raise NotImplementedError
def build_lowerbound_terms(self, ys, ts, num_samples=1):
"""
Given oberved states and time, builds the individual terms for the lowerbound computation
@param ys: observed sequence tensor (N,T,D)
@param ts: observed time sequence (T,)
@return: nll, initial state KL
"""
assert num_samples == 1, ">1 sample not implemented for nn model."
ss_samples = self.state_distribution.sample(
num_samples=num_samples
) # (S,N,(T-1)/shooting_time_factor + 1, D)
(S, N, N_state, D) = ss_samples.shape
N_shooting = N_state - 1
T = ts.shape[0]
shooting_time_factor = (T - 1) // N_shooting
assert (
shooting_time_factor == self.shooting_time_factor
), f"{shooting_time_factor}, {T}, {N_shooting}"
predicted_xs = self.flow(
x0=stack_segments(ss_samples[:, :, 1:, :]),
ts=ts[: shooting_time_factor + 1],
) # (SxNxN_state, shooting_time_factor+1, D)
predicted_xs = unstack_segments(
predicted_xs[:, 1:], (S, N, T - 1, D)
) # (S, N, T-1, D)
predicted_x0 = self.flow(
x0=stack_segments(ss_samples[:, :, 0, :]),
ts=ts[:2],
)
predicted_x0 = unstack_segments(predicted_x0[:, -1], (S, N, 1, D))
predicted_xs = torch.cat([predicted_x0, predicted_xs], axis=2)
loss = self.observation_likelihood(predicted_xs, ys.unsqueeze(0))
shooting_loss = self.constraint_likelihood(
ss_samples[:, :, 1:, :],
predicted_xs[:, :, 0:-shooting_time_factor:shooting_time_factor, :],
)
return loss, self.alpha * shooting_loss
class SequenceModel(BaseSequenceModel):
"""
Standard ODE model, with no shooting, works for irregular timepoints.
"""
def __init__(
self,
flow,
num_observations,
state_distribution,
observation_likelihood,
constraint_likelihood,
ts_dense_scale=2,
):
super(SequenceModel, self).__init__(
flow=flow,
num_observations=num_observations,
state_distribution=state_distribution,
observation_likelihood=observation_likelihood,
constraint_likelihood=constraint_likelihood,
ts_dense_scale=ts_dense_scale,
)
def build_objective(self, ys, ts):
"""
Compute objective.
@param ys: true observation sequence
@param ts: observation timesd
@return: loss, nll, initial_staet_kl, inducing_kl
"""
observ_loglik, init_state_kl = self.build_lowerbound_terms(ys, ts)
kl = self.build_inducing_kl()
loss = -(observ_loglik - init_state_kl - kl)
return loss
def build_lowerbound_terms(self, ys, ts, num_samples=1):
"""
Given oberved states and time, builds the individual terms for the lowerbound computation
@param ys: observed sequence tensor (N,T,D)
@param ts: observed time sequence (T,)
@return: nll, initial state KL
"""
assert num_samples == 1, ">1 sample not implemented for standard model."
ts = insert_zero_t0(ts)
x0_samples = self.state_distribution.sample(num_samples=1)[0]
x0_kl = self.state_distribution.kl()
xs = self.build_flow(x0_samples, ts)[:, 1:]
loglik = self.observation_likelihood.log_prob(xs, ys)
return loglik.mean(), x0_kl.mean() / self.num_observations
class SubSequenceModel(BaseSequenceModel):
"""
Batched data model, with timepoints on regular grid.
"""
def __init__(
self,
flow,
num_observations,
state_distribution,
observation_likelihood,
constraint_likelihood,
ts_dense_scale=2,
):
super(SubSequenceModel, self).__init__(
flow=flow,
num_observations=num_observations,
state_distribution=state_distribution,
observation_likelihood=observation_likelihood,
constraint_likelihood=constraint_likelihood,
ts_dense_scale=ts_dense_scale,
)
def build_objective(self, ys, ts):
"""
Compute objective for GPODE optimization
@param ys: true observation sequence
@param ts: observation timesd
@return: loss, nll, initial_staet_kl, inducing_kl
"""
observ_loglik = self.build_lowerbound_terms(ys, ts)
kl = self.build_inducing_kl()
loss = -(observ_loglik - kl)
return loss
def build_lowerbound_terms(self, ys_batched, ts, num_samples=1):
"""
Given oberved states and time, builds the individual terms for the lowerbound computation
@param ys: observed sequence tensor (N_batch,T,D)
@param ts: observed time sequence (T,)
@return: nll, initial state KL
"""
assert num_samples == 1, ">1 sample not implemented for standard model."
xs = self.build_flow(ys_batched[:, 0, :], ts)
loglik = self.observation_likelihood.log_prob(xs, ys_batched)
return loglik.mean()
class UniformShootingModel(BaseSequenceModel):
"""
Implements shooting model for data observed on uniform time grid.
Defines following methods:
build_lowerbound_terms: given observed states and time, builds individual terms for the lowerbound computation
"""
def __init__(
self,
flow,
num_observations,
state_distribution,
observation_likelihood,
constraint_likelihood,
shooting_time_factor=None,
energy_likelihood=None,
ts_dense_scale=2,
):
super(UniformShootingModel, self).__init__(
flow=flow,
num_observations=num_observations,
state_distribution=state_distribution,
observation_likelihood=observation_likelihood,
constraint_likelihood=constraint_likelihood,
ts_dense_scale=ts_dense_scale,
)
self.energy_likelihood = energy_likelihood
self.constrain_energy = False
self.shooting_time_factor = shooting_time_factor
def compute_segments(self, ts, num_samples=1, constrain_energy=False):
ss_samples = self.state_distribution.sample(
num_samples=num_samples
) # (S,N,(T-1)/shooting_time_factor + 1, D)
(S, N, N_state, D) = ss_samples.shape
N_shooting = N_state - 1
T = ts.shape[0]
shooting_time_factor = (T - 1) // N_shooting
assert (
shooting_time_factor == self.shooting_time_factor
), f"{shooting_time_factor}, {T}, {N_shooting}"
if constrain_energy:
predicted_xs, predicted_energy = self.flow(
x0=stack_segments(ss_samples),
ts=ts[: shooting_time_factor + 1],
return_energy=True,
) # (SxNxN_state, shooting_time_factor+1, D)
# get the energy of the initial point of each segement
# include initial x0 in ss energy as it also needs to be penalised
ss_energy = unstack_segments(predicted_energy[:, 0], (S, N, N_state, 1))
else:
predicted_xs = self.flow(
x0=stack_segments(ss_samples),
ts=ts[: shooting_time_factor + 1],
return_energy=False,
) # (SxNxN_state, shooting_time_factor+1, D)
# get additional set of points we don't need from integrating the initial
# condition too far,
predicted_xs = unstack_segments(
predicted_xs[:, 1:], (S, N, T + shooting_time_factor - 1, D)
)
# get rid of extraneous intial points
predicted_xs = torch.cat(
[
predicted_xs[:, :, 0:1, :],
predicted_xs[:, :, shooting_time_factor:, :],
],
axis=2,
)
if constrain_energy:
return ss_samples, predicted_xs, ss_energy
else:
return ss_samples, predicted_xs
def build_lowerbound_terms(self, ys, ts, num_samples=1):
"""
Given observed states and time, builds the individual terms for the lowerbound computation
@param ys: observed sequence tensor (N,T,D)
@param ts: observed time sequence (T,)
@param num_samples: number of reparametrized samples used to compute lowerbound
@return: nll, state cross-entropy, state entropy, initial state KL
"""
ss_samples, predicted_xs = self.compute_segments(
ts, num_samples=num_samples, constrain_energy=False
)
(S, N, N_state, D) = ss_samples.shape
N_shooting = N_state - 1
T = ts.shape[0]
shooting_time_factor = (T - 1) // N_shooting
assert (
shooting_time_factor == self.shooting_time_factor
), f"{shooting_time_factor}, {T}, {N_shooting}"
observation_loglik = self.observation_likelihood.log_prob(
predicted_xs, ys.unsqueeze(0)
) # (S,N,T,D)
# compute the entropy of variational posteriors for shooting states
state_entropy = self.state_distribution.entropy() # (N,T-1)
# compute the shooting constraint likelihoods
state_constraint_logprob = self.constraint_likelihood.log_prob(
ss_samples[:, :, 1:, :],
predicted_xs[:, :, 0:-shooting_time_factor:shooting_time_factor, :],
).sum(
3
) # (S,N,T-1)
# compute initial state KL
initial_state_kl = self.state_distribution.x0.kl() # (1,)
assert state_entropy.shape == (N, N_shooting)
assert state_constraint_logprob.shape == (S, N, N_shooting)
total_state_constraint_loglik = (
state_constraint_logprob.mean(0).sum()
) / self.num_observations
scaled_state_entropy = state_entropy.sum() / self.num_observations
scaled_initial_state_kl = initial_state_kl / self.num_observations
return (
observation_loglik.mean(),
total_state_constraint_loglik,
scaled_state_entropy,
scaled_initial_state_kl,
)
class ConsUniformShootingModel(UniformShootingModel):
"""
Energy conserving shooting model, with timepoints on regular grid.
"""
def __init__(
self,
flow,
num_observations,
state_distribution,
observation_likelihood,
constraint_likelihood,
shooting_time_factor=None,
energy_likelihood=None,
ts_dense_scale=2,
):
super(ConsUniformShootingModel, self).__init__(
flow=flow,
num_observations=num_observations,
state_distribution=state_distribution,
observation_likelihood=observation_likelihood,
constraint_likelihood=constraint_likelihood,
shooting_time_factor=shooting_time_factor,
energy_likelihood=energy_likelihood,
ts_dense_scale=ts_dense_scale,
)
self.constrain_energy = True
def build_lowerbound_terms(self, ys, ts, num_samples=1):
"""
Given observed states and time, builds the individual terms for the lowerbound computation
@param ys: observed sequence tensor (N,T,D)
@param ts: observed time sequence (T,)
@param num_samples: number of reparametrized samples used to compute lowerbound
@return: nll, state cross-entropy, state entropy, initial state KL
"""
ss_samples, predicted_xs, ss_energy = self.compute_segments(
ts, num_samples=num_samples, constrain_energy=True
)
(S, N, N_state, D) = ss_samples.shape
N_shooting = N_state - 1
T = ts.shape[0]
shooting_time_factor = (T - 1) // N_shooting
assert (
shooting_time_factor == self.shooting_time_factor
), f"{shooting_time_factor}, {T}, {N_shooting}"
observation_loglik = self.observation_likelihood.log_prob(
predicted_xs, ys.unsqueeze(0)
) # (S,N,T,D)
# compute the entropy of variational posteriors for shooting states
state_entropy = self.state_distribution.entropy() # (N,T-1)
# compute the shooting constraint likelihoods
state_constraint_logprob = self.constraint_likelihood.log_prob(
ss_samples[:, :, 1:, :],
predicted_xs[:, :, 0:-shooting_time_factor:shooting_time_factor, :],
).sum(
3
) # (S,N,T-1)
# compute initial state KL
initial_state_kl = self.state_distribution.x0.kl() # (1,)
assert state_entropy.shape == (N, N_shooting)
assert state_constraint_logprob.shape == (S, N, N_shooting)
scaled_state_constraint_loglik = (
state_constraint_logprob.mean(0).sum()
) / self.num_observations
# compute the energy likelihood
energy_constraint_logprob = self.energy_likelihood.log_prob(
ss_energy[:, :, 1:-1, :], ss_energy[:, :, 2:, :]
).squeeze(3)
# print(energy_constraint_logprob[:, :, 0:3])
scaled_energy_constraint_loglik = (
energy_constraint_logprob.mean(0).sum() / self.num_observations
)
# print(energy_constraint_logprob.mean(0).sum() / self.num_observations)
scaled_state_entropy = state_entropy.sum() / self.num_observations
scaled_initial_state_kl = initial_state_kl / self.num_observations
return (
observation_loglik.mean(),
scaled_state_constraint_loglik,
scaled_energy_constraint_loglik,
scaled_state_entropy,
scaled_initial_state_kl,
)
| 19,534 | 34.261733 | 118 | py |
hgp | hgp-main/hgp/models/initialization.py | # MIT License
# Copyright (c) 2021 Pashupati Hegde.
# Copyright (c) 2023 Magnus Ross.
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from hgp.misc import constraint_utils
import torch
import numpy as np
from scipy.cluster.vq import kmeans2
from hgp.core.kernels import DerivativeRBF, RBF
from scipy.signal import savgol_filter
from hgp.models.sequence import SequenceModel, UniformShootingModel
from hgp.core.dsvgp import DSVGP_Layer, Hamiltonian_DSVGP_Layer
from plum import dispatch
class MOExactHamiltonian(torch.nn.Module):
def __init__(self, X, Fx, Z, init_ls=1.3, init_var=0.5, init_noise=1e-1):
super().__init__()
self.X = X
self.D = X.shape[1]
self.N = X.shape[0]
self.M = Z.shape[0]
self.Fx = Fx
self.Z = Z
self.kern = DerivativeRBF(X.shape[1], init_ls=init_ls, init_var=init_var)
self.log_noise = torch.nn.Parameter(torch.log(torch.tensor(init_noise)))
def construct(self):
Ix = torch.eye(self.N * self.D) * torch.exp(self.log_noise)
self.Kxx = self.kern.hess_K(self.X, use_J=True) # (2DN,2DN)
self.Kxz = self.kern.grad_K(self.X, self.Z, use_J=True) # (M, 2DN)
Iz = torch.eye(self.M) * 1e-6
self.Kzz = self.kern.K(self.Z)
self.Lxx = torch.linalg.cholesky(self.Kxx + Ix) # (N,N) or (D,N,N)
self.Lzz = torch.linalg.cholesky(self.Kzz + Iz)
def posterior_mean(self, whiten=True):
self.construct()
alpha = torch.linalg.solve_triangular(self.Lxx, self.Fx.T, upper=False) # (N,D)
alpha = torch.linalg.solve_triangular(self.Lxx.T, alpha, upper=True) # (N,D)
f_update = torch.einsum("nm, nd -> md", self.Kxz, alpha) # (M,D)
if whiten:
return (
torch.linalg.solve_triangular(
self.Lzz, f_update.T.unsqueeze(2), upper=False
)
.squeeze(2)
.T
)
else:
return f_update
@dispatch
def initialize_inducing(
diffeq: Hamiltonian_DSVGP_Layer, data_ys, data_ts, data_noise=1e-1
):
"""
Initialization of inducing variabels for Hamiltonian DSVGP layer.
Inducing locations are initialized at cluster centers
Inducing values are initialized using empirical data gradients.
@param diffeq: a GP layer represnting the differential function
@param data_ys: observed sequence (N,T,D)
@param data_ts: data observation times, assumed to be equally spaced
@param data_noise: an initial guess for observation noise.
@return: the GP object after initialization
"""
# compute empirical gradients and scale them according to observation time.
f_xt = np.gradient(data_ys, data_ts, axis=1)
f_xt = f_xt.reshape(-1, data_ys.shape[-1]) # (N,T-1,D)
data_ys = data_ys[:, :-1, :] # (N,T-1,D)
data_ys = data_ys.reshape(-1, data_ys.shape[-1]) # (N*T-1,D)
with torch.no_grad():
num_obs_for_initialization = np.minimum(1000, data_ys.shape[0])
obs_index = np.random.choice(
data_ys.shape[0], num_obs_for_initialization, replace=False
)
inducing_loc = torch.tensor(
kmeans2(data_ys, k=diffeq.Um().shape[0], minit="points")[0]
)
data_ys = torch.tensor(data_ys[obs_index])
f_xt = torch.tensor(f_xt[obs_index].T.reshape(1, -1))
pre_model = MOExactHamiltonian(
data_ys,
f_xt,
inducing_loc,
init_noise=0.1,
init_ls=2.0,
init_var=0.5,
)
pre_model.construct()
inducing_val = pre_model.posterior_mean(whiten=True)
diffeq.inducing_loc().data = inducing_loc.data # (M,D)
diffeq.Um().data = inducing_val.data # (M,D)
diffeq.kern.lengthscales = pre_model.kern.lengthscales.detach()
diffeq.kern.variance = pre_model.kern.variance.detach()
return diffeq
def compute_gpode_intial_inducing(kern, N_u, data_ys, empirical_fs, data_noise=1e-1):
"""
Constructs initial inducing values using the process described in appendix of
"Bayesian inference of ODEs with Gaussian processes", Hegde et al., 2021
"""
# compute empirical gradients and scale them according to observation time.
empirical_fs = empirical_fs.reshape(-1, empirical_fs.shape[-1]) # (N,T-1,D)
data_ys = data_ys[:, :-1, :] # (N,T-1,D)
data_ys = data_ys.reshape(-1, data_ys.shape[-1]) # (N*T-1,D)
with torch.no_grad():
num_obs_for_initialization = np.minimum(1000, data_ys.shape[0])
obs_index = np.random.choice(
data_ys.shape[0], num_obs_for_initialization, replace=False
)
inducing_loc = torch.tensor(kmeans2(data_ys, k=N_u, minit="points")[0])
data_ys = torch.tensor(data_ys[obs_index])
empirical_fs = torch.tensor(empirical_fs[obs_index])
Kxx = kern.K(data_ys) # (N,N) or (D,N,N)
Kxz = kern.K(data_ys, inducing_loc) # (N,M) or (D,N,M)
Kzz = kern.K(inducing_loc) # (M,M) or (D,M,M)
Lxx = torch.linalg.cholesky(
Kxx + torch.eye(Kxx.shape[1]) * data_noise
) # (N,N) or (D,N,N)
Lzz = torch.linalg.cholesky(
Kzz + torch.eye(Kzz.shape[1]) * 1e-6
) # (M,M) or (D,M,M)
if not kern.dimwise:
alpha = torch.linalg.solve_triangular(
Lxx, empirical_fs, upper=False
) # (N,D)
alpha = torch.linalg.solve_triangular(Lxx.T, alpha, upper=True) # (N,D)
f_update = torch.einsum("nm, nd -> md", Kxz, alpha) # (M,D)
else:
alpha = torch.linalg.solve_triangular(
Lxx, empirical_fs.T.unsqueeze(2), upper=False
) # (N,D)
alpha = torch.linalg.solve_triangular(
Lxx.permute(0, 2, 1), alpha, upper=True
) # (N,D)
f_update = torch.einsum("dnm, dn -> md", Kxz, alpha.squeeze(2)) # (M,D)
inducing_val = (
torch.linalg.solve_triangular(Lzz, f_update.T.unsqueeze(2), upper=False)
.squeeze(2)
.T
) # (M,D)
return inducing_loc.data, inducing_val.data
@dispatch
def initialize_inducing(diffeq: DSVGP_Layer, data_ys, data_ts, data_noise=1e-1):
"""
Initialization of inducing variabels for standard DSVGP layer.
Inducing locations are initialized at cluster centers
Inducing values are initialized using empirical data gradients.
@param diffeq: a GP layer represnting the differential function
@param data_ys: observed sequence (N,T,D)
@param data_noise: an initial guess for observation noise.
@return: the gp object after initialization
"""
empirical_fs = np.gradient(data_ys, data_ts, axis=1) # (N,T-1,D)
inducing_loc, inducing_val = compute_gpode_intial_inducing(
diffeq.kern,
diffeq.Um().shape[0],
data_ys,
empirical_fs,
)
diffeq.inducing_loc().data = inducing_loc # (M,D)
diffeq.Um().data = inducing_val # (M,D)
return diffeq
def initialize_latents_with_data(model, data_ys, data_ts, num_samples=20):
"""
Initializes shooting states from data.
Initial state distribution is initialized by solving the ODE backward in time from the first
observation after inducing variables are initialized.
Other states are initialized at observed values.
@param model: a gpode.UniformShootingModel object
@param data_ys: observed state sequence
@param num_samples: number of samples to consider for initial state initialization
@return: the model object after initialization
"""
with torch.no_grad():
# this makes sure we only take the data points that align with our shooting states
try:
init_xs = torch.tensor(
data_ys[:, 0 : -model.shooting_time_factor : model.shooting_time_factor]
)
except AttributeError:
init_xs = torch.tensor(data_ys[:, :-1])
ts = torch.tensor(data_ts)
init_ts = torch.cat([ts[1:2], ts[0:1]])
init_x0 = []
for _ in range(num_samples):
init_x0.append(
model.build_flow(init_xs[:, 0], init_ts).clone().detach().data[:, -1]
)
init_x0 = torch.stack(init_x0).mean(0)
model.state_distribution._initialize(init_x0, init_xs)
return model
def initalize_noisevar(model, init_noisevar):
"""
Initializes likelihood observation noise variance parameter
@param model: a gpode.SequenceModel object
@param init_noisevar: initialization value
@return: the model object after initialization
"""
model.observation_likelihood.unconstrained_variance.data = (
constraint_utils.invsoftplus(torch.tensor(init_noisevar)).data
)
return model
def initialize_and_fix_kernel_parameters(
model, lengthscale_value=1.25, variance_value=0.5, fix=False
):
"""
Initializes and optionally fixes kernel parameter
@param model: a gpode.SequenceModel object
@param lengthscale_value: initialization value for kernel lengthscales parameter
@param variance_value: initialization value for kernel signal variance parameter
@param fix: a flag variable to fix kernel parameters during optimization
@return: the model object after initialization
"""
model.flow.odefunc.diffeq.kern.unconstrained_lengthscales.data = (
constraint_utils.invsoftplus(
lengthscale_value
* torch.ones_like(
model.flow.odefunc.diffeq.kern.unconstrained_lengthscales.data
)
)
)
model.flow.odefunc.diffeq.kern.unconstrained_variance.data = (
constraint_utils.invsoftplus(
variance_value
* torch.ones_like(
model.flow.odefunc.diffeq.kern.unconstrained_variance.data
)
)
)
if fix:
model.flow.odefunc.diffeq.kern.unconstrained_lengthscales.requires_grad_(False)
model.flow.odefunc.diffeq.kern.unconstrained_variance.requires_grad_(False)
return model
| 11,106 | 37.835664 | 96 | py |
hgp | hgp-main/hgp/models/builder.py | # MIT License
# Copyright (c) 2021 Pashupati Hegde.
# Copyright (c) 2023 Magnus Ross.
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import copy
import time
from typing import Union
import torch
from plum import dispatch
from torch.utils.data import DataLoader, Dataset
import hgp
# from scipy.stats import norm
# from scipy.special import logsumexp
import hgp.misc.metrics as metrics
from hgp.core import constraint_likelihoods as constraints
from hgp.core.dsvgp import DSVGP_Layer, Hamiltonian_DSVGP_Layer
from hgp.core.flow import Flow
from hgp.core.nn import HamiltonianNNModel, NNModel
from hgp.core.observation_likelihoods import Gaussian
from hgp.core.states import (
DeltaStateSequenceDistribution,
StateInitialVariationalGaussian,
StateSequenceVariationalFactorizedGaussian,
)
from hgp.misc import train_utils as utils
from hgp.misc.torch_utils import insert_zero_t0, numpy2torch, torch2numpy
from hgp.models.initialization import initialize_inducing, initialize_latents_with_data
from hgp.models.sequence import (
ConsUniformShootingModel,
NNSequenceModel,
NNUniformShootingModel,
SequenceModel,
SubSequenceModel,
UniformShootingModel,
)
@dispatch
def init_and_fit(
model: Union[UniformShootingModel, SequenceModel],
args,
data_ts,
data_ys,
return_history=False,
):
if args.model.inducing_init:
model.flow.odefunc.diffeq = initialize_inducing(
model.flow.odefunc.diffeq, data_ys, data_ts
)
model = initialize_latents_with_data(model, data_ys, data_ts)
trainer = Trainer()
model, history = trainer.train(
model=model,
loss_function=compute_loss,
ys=numpy2torch(data_ys),
ts=numpy2torch(data_ts),
num_iter=args.num_iter,
lr=args.lr,
log_freq=args.log_freq,
)
if return_history:
return model, history
else:
return model
@dispatch
def init_and_fit(
model: SubSequenceModel,
args,
data_ts,
data_ys,
return_history=False,
):
if args.model.inducing_init:
model.flow.odefunc.diffeq = initialize_inducing(
model.flow.odefunc.diffeq, data_ys, data_ts
)
trainer = BatchedTrainer()
model, history = trainer.train(
model=model,
loss_function=compute_loss,
ys=numpy2torch(data_ys),
ts=numpy2torch(data_ts),
num_iter=args.num_iter,
lr=args.lr,
log_freq=args.log_freq,
)
if return_history:
return model, history
else:
return model
@dispatch
def init_and_fit(
model: NNUniformShootingModel,
args,
data_ts,
data_ys,
return_history=False,
):
model = initialize_latents_with_data(model, data_ys, data_ts)
trainer = NNTrainer()
model, history = trainer.train(
model=model,
loss_function=compute_loss,
ys=numpy2torch(data_ys),
ts=numpy2torch(data_ts),
num_iter=args.num_iter,
lr=args.lr,
log_freq=args.log_freq,
)
if return_history:
return model, history
else:
return model
@dispatch
def init_and_fit(
model: NNSequenceModel,
args,
data_ts,
data_ys,
return_history=False,
):
trainer = BatchedNNTrainer()
model, history = trainer.train(
model=model,
loss_function=compute_loss,
ys=numpy2torch(data_ys),
ts=numpy2torch(data_ts),
num_iter=args.num_iter,
lr=args.lr,
log_freq=args.log_freq,
batch_length=args.model.batch_length,
batch_size=args.model.batch_size,
num_val_epochs=args.model.num_val_epochs,
)
if return_history:
return model, history
else:
return model
def build_model(args, data_ys):
"""
Builds a HGP model based on training sequence
@param args: model setup arguments
@param data_ys: observed/training sequence of (N,T,D) dimensions
"""
N, T, D = data_ys.shape
gp = Hamiltonian_DSVGP_Layer(
D_in=D,
M=args.model.num_inducing,
S=args.model.num_features,
q_diag=args.model.q_diag,
)
flow = Flow(diffeq=gp, solver=args.solver, use_adjoint=args.use_adjoint)
observation_likelihood = Gaussian(ndim=D, init_val=args.init_noise)
if args.model.shooting:
if args.model.shooting_time_factor is None:
args.model.shooting_time_factor = 1
assert (
(T - 1) % args.model.shooting_time_factor
) == 0, f"T-1 must be devisable by time factor, T={T}, F={args.model.shooting_time_factor}"
N_shooting = (T - 1) // args.model.shooting_time_factor
if args.model.constraint_type not in ["gauss", "laplace"]:
raise ValueError(
"invalid constraint likelihood specification, only available options are gauss/laplace"
)
constraint_type_class = (
constraints.Laplace
if args.model.constraint_type == "laplace"
else constraints.Gaussian
)
constraint_likelihood = constraint_type_class(
d=1,
scale=args.model.constraint_initial_scale,
requires_grad=args.model.constraint_trainable,
)
energy_likelihood = (
constraint_type_class(
d=1,
scale=args.model.energy_constraint_initial_scale,
requires_grad=args.model.constraint_trainable,
)
if args.model.constrain_energy
else None
)
model = (
ConsUniformShootingModel
if args.model.constrain_energy
else UniformShootingModel
)(
flow=flow,
num_observations=N * T * D,
state_distribution=StateSequenceVariationalFactorizedGaussian(
dim_n=N, dim_t=N_shooting, dim_d=D
),
observation_likelihood=observation_likelihood,
constraint_likelihood=constraint_likelihood,
shooting_time_factor=args.model.shooting_time_factor,
energy_likelihood=energy_likelihood,
ts_dense_scale=args.model.ts_dense_scale,
)
else:
model = SequenceModel(
flow=flow,
num_observations=N * T * D,
state_distribution=StateInitialVariationalGaussian(dim_n=N, dim_d=D),
observation_likelihood=observation_likelihood,
constraint_likelihood=None,
ts_dense_scale=args.model.ts_dense_scale,
)
return model
def build_subsequence_model(args, data_ys):
"""
Builds a HGP-Batched model based on training sequence
@param args: model setup arguments
@param data_ys: observed/training sequence of (N,T,D) dimensions
"""
N, T, D = data_ys.shape
gp = Hamiltonian_DSVGP_Layer(
D_in=D,
M=args.model.num_inducing,
S=args.model.num_features,
q_diag=args.model.q_diag,
)
flow = Flow(diffeq=gp, solver=args.solver, use_adjoint=args.use_adjoint)
observation_likelihood = Gaussian(ndim=D, init_val=args.init_noise)
model = SubSequenceModel(
flow=flow,
num_observations=N * T * D,
state_distribution=None,
observation_likelihood=observation_likelihood,
constraint_likelihood=None,
ts_dense_scale=args.model.ts_dense_scale,
)
return model
def build_gpode_model(args, data_ys):
"""
Builds a GP-ODE model based on training sequence
@param args: model setup arguments
@param data_ys: observed/training sequence of (N,T,D) dimensions
"""
N, T, D = data_ys.shape
gp = DSVGP_Layer(
D_in=D,
D_out=D,
M=args.model.num_inducing,
S=args.model.num_features,
dimwise=args.model.dimwise,
q_diag=args.model.q_diag,
)
flow = Flow(diffeq=gp, solver=args.solver, use_adjoint=args.use_adjoint)
observation_likelihood = Gaussian(ndim=D, init_val=args.init_noise)
if args.model.shooting:
if args.model.shooting_time_factor is None:
args.model.shooting_time_factor = 1
assert (
(T - 1) % args.model.shooting_time_factor
) == 0, "T-1 must be devisable by time factor"
N_shooting = (T - 1) // args.model.shooting_time_factor
if args.model.constraint_type not in ["gauss", "laplace"]:
raise ValueError(
"invalid constraint likelihood specification, only available options are gauss/laplace"
)
constraint_type_class = (
constraints.Laplace
if args.model.constraint_type == "laplace"
else constraints.Gaussian
)
constraint_likelihood = constraint_type_class(
d=1,
scale=args.model.constraint_initial_scale,
requires_grad=args.model.constraint_trainable,
)
model = UniformShootingModel(
flow=flow,
num_observations=N * T * D,
state_distribution=StateSequenceVariationalFactorizedGaussian(
dim_n=N, dim_t=N_shooting, dim_d=D
),
observation_likelihood=observation_likelihood,
constraint_likelihood=constraint_likelihood,
shooting_time_factor=args.model.shooting_time_factor,
ts_dense_scale=args.model.ts_dense_scale,
)
else:
model = SequenceModel(
flow=flow,
num_observations=N * T * D,
state_distribution=StateInitialVariationalGaussian(dim_n=N, dim_d=D),
observation_likelihood=observation_likelihood,
constraint_likelihood=None,
ts_dense_scale=args.model.ts_dense_scale,
)
return model
def build_nn_model(args, data_ys):
"""
Builds a NN model based on training sequence
@param args: model setup arguments
@param data_ys: observed/training sequence of (N,T,D) dimensions
"""
N, T, D = data_ys.shape
if args.model.flow_type == "hnn":
nn = HamiltonianNNModel(
D_in=D,
N_layers=args.model.N_layers,
N_nodes=args.model.N_nodes,
)
elif args.model.flow_type == "node":
nn = NNModel(
D_in=D,
D_out=D,
N_layers=args.model.N_layers,
N_nodes=args.model.N_nodes,
)
else:
raise ValueError
flow = Flow(diffeq=nn, solver=args.solver, use_adjoint=args.use_adjoint)
if args.model.shooting:
model = NNUniformShootingModel(
flow=flow,
num_observations=N * T * D,
state_distribution=DeltaStateSequenceDistribution(
dim_n=N, dim_t=T - 1, dim_d=D
),
observation_likelihood=torch.nn.L1Loss(),
constraint_likelihood=torch.nn.L1Loss(),
shooting_time_factor=args.model.shooting_time_factor,
ts_dense_scale=args.model.ts_dense_scale,
alpha=args.model.alpha,
)
else:
model = NNSequenceModel(
flow=flow,
num_observations=N * T * D,
state_distribution=None,
observation_likelihood=torch.nn.L1Loss(),
constraint_likelihood=None,
ts_dense_scale=args.model.ts_dense_scale,
)
# hack to get likelihoods to show nan as the model doesn't have one
model.observation_likelihood.variance = torch.tensor(torch.nan)
return model
@dispatch
def compute_loss(model: NNSequenceModel, ys, ts):
"""
Compute loss for model optimization.
@param model: a model object
@param ys: true observation sequence
@param ts: observation times
@return: loss, nan, nan
"""
loss = model.build_lowerbound_terms(ys, ts)
return loss, torch.tensor(torch.nan), torch.tensor(torch.nan)
@dispatch
def compute_loss(model: NNUniformShootingModel, ys, ts):
"""
Compute loss for model optimization.
@param model: a model object
@param ys: true observation sequence
@param ts: observation times
@return loss, observation loss, shooting loss
"""
obs_loss, shooting_loss = model.build_lowerbound_terms(ys, ts)
return obs_loss + shooting_loss, obs_loss, shooting_loss
@dispatch
def compute_loss(model: UniformShootingModel, ys, ts, **kwargs):
"""
Compute loss for model optimization.
@param model: a model object
@param ys: true observation sequence
@param ts: observation times
@param kwargs: additional parameters passed to the model.build_lowerbound_terms() method
@return: loss, nll, nan, initial_state_kl, inducing_kl
"""
(
observation_loglik,
state_constraint_logpob,
state_entropy,
init_state_kl,
) = model.build_lowerbound_terms(ys, ts, **kwargs)
inducing_kl = model.build_inducing_kl()
loss = -(
observation_loglik
+ state_constraint_logpob
+ state_entropy
- init_state_kl
- inducing_kl
)
return (
loss,
-observation_loglik,
-(state_constraint_logpob),
torch.tensor(torch.nan),
init_state_kl,
inducing_kl,
)
@dispatch
def compute_loss(model: ConsUniformShootingModel, ys, ts, **kwargs):
"""
Compute loss for model optimization.
@param model: a model object
@param ys: true observation sequence
@param ts: observation times
@param kwargs: additional parameters passed to the model.build_lowerbound_terms() method
@return: loss, nll, energy constraint, initial_state_kl, inducing_kl
"""
(
observation_loglik,
state_constraint_logpob,
energy_constraint_logpob,
state_entropy,
init_state_kl,
) = model.build_lowerbound_terms(ys, ts, **kwargs)
inducing_kl = model.build_inducing_kl()
loss = -(
observation_loglik
+ state_constraint_logpob
+ energy_constraint_logpob
+ state_entropy
- init_state_kl
- inducing_kl
)
return (
loss,
-observation_loglik,
-(state_constraint_logpob),
-(energy_constraint_logpob),
init_state_kl,
inducing_kl,
)
@dispatch
def compute_loss(model: SequenceModel, ys, ts, **kwargs):
"""
Compute loss for model optimization, no shooting.
@param model: a model object
@param ys: true observation sequence
@param ts: observation times
@return: loss, nll, nan, nan, initial_state_kl, inducing_kl
"""
observ_loglik, init_state_kl = model.build_lowerbound_terms(ys, ts)
kl = model.build_inducing_kl()
loss = -(observ_loglik - init_state_kl - kl)
return (
loss,
-observ_loglik,
torch.tensor(torch.nan),
torch.tensor(torch.nan),
init_state_kl,
kl,
)
@dispatch
def compute_loss(model: SubSequenceModel, ys, ts, **kwargs):
"""
Compute loss for model optimization, batched training.
@param model: a gpode.SequenceModel object
@param ys: true observation sequence
@param ts: observation times
@param kwargs: additional parameters passed to the model.build_lowerbound_terms() method
@return: loss, nll, inducing_kl
"""
observ_loglik = model.build_lowerbound_terms(ys, ts)
kl = model.build_inducing_kl()
loss = -(observ_loglik - kl)
return (
loss,
-observ_loglik,
kl,
)
@dispatch
def compute_single_prediction(
model: Union[
UniformShootingModel, ConsUniformShootingModel, NNUniformShootingModel
],
ts,
):
"""
Computes single prediction from a model from an optimized initial state
Useful while making predictions/extrapolation to novel time points from an optimized initial state.
@param model: a model object
@param ts: observation times
@return: predictive samples
"""
# add additional time point accounting the initial state
ts = insert_zero_t0(ts)
return model(model.state_distribution.x0.sample().squeeze(0), ts)
@dispatch
def compute_single_prediction(model: Union[SequenceModel, NNSequenceModel], ts):
"""
Computes single prediction from a model from an optimized initial state
Useful while making predictions/extrapolation to novel time points from an optimized initial state.
@param model: a model object
@param ts: observation times
@return: predictive samples
"""
# add additional time point accounting the initial state
ts = insert_zero_t0(ts)
return model(model.state_distribution.sample().squeeze(0), ts)
def compute_predictions(model, ts, eval_sample_size=10):
"""
Compute predictions or ODE sequences from a GPODE model from an optimized initial state
Useful while making predictions/extrapolation to novel time points from an optimized initial state.
@param model: a model object
@param ts: observation times
@param eval_sample_size: number of samples for evaluation
@return: predictive samples
"""
model.eval()
pred_samples = []
for _ in range(eval_sample_size):
with torch.no_grad():
pred_samples.append(compute_single_prediction(model, ts))
return torch.stack(pred_samples, 0)[:, :, 1:]
def compute_test_predictions(model, y0, ts, eval_sample_size=10):
"""
Compute predictions or ODE sequences from a GPODE model from an given initial state
@param model: a gpode.SequenceModel object
@param y0: initial state for computing predictions (N,D)
@param ts: observation times
@param eval_sample_size: number of samples for evaluation
@return: predictive samples
"""
model.eval()
pred_samples = []
for _ in range(eval_sample_size):
with torch.no_grad():
pred_samples.append(model(y0, ts))
return torch.stack(pred_samples, 0)
def compute_summary(actual, predicted, noise_var, ys=1.0, squeeze_time=True):
"""
Computes MSE and MLL as summary metrics between actual and predicted sequences
@param actual: true observation sequnce
@param predicted: predicted sequence
@param noise_var: noise var predicted by the model
@param ys: optional scaling factor for standardized data
@param squeeze_time: optional, if true averages over time dimension
@return: MLL(actual, predicted), MSE(actual, predicted)
"""
actual = actual * ys
predicted = predicted * ys
noise_var = noise_var * ys**2 + 1e-8
if squeeze_time:
return (
metrics.log_lik(actual, predicted, noise_var).mean(),
metrics.mse(actual, predicted).mean(),
metrics.rel_err(actual, predicted).mean(),
)
else:
return (
metrics.log_lik(actual, predicted, noise_var).mean(2),
metrics.mse(actual, predicted).mean(2),
metrics.rel_err(actual, predicted),
)
class Trainer:
"""
A trainer class for models. Stores optimization trace for monitoring/plotting purpose
"""
def __init__(self):
self.loss_meter = utils.CachedRunningAverageMeter(0.98)
self.observation_nll_meter = utils.CachedRunningAverageMeter(0.98)
self.state_kl_meter = utils.CachedRunningAverageMeter(0.98)
self.energy_kl_meter = utils.CachedRunningAverageMeter(0.98)
self.init_kl_meter = utils.CachedRunningAverageMeter(0.98)
self.inducing_kl_meter = utils.CachedRunningAverageMeter(0.98)
self.time_meter = utils.CachedAverageMeter()
self.compute_loss = compute_loss
def train(self, model, loss_function, ys, ts, num_iter, lr, log_freq, **kwargs):
optimizer = torch.optim.Adam(model.parameters(), lr=lr)
print("Fitting model...")
for itr in range(1, num_iter):
try:
model.train()
begin = time.time()
optimizer.zero_grad()
(
loss,
observation_nll,
state_kl,
energy_kl,
init_kl,
inducing_kl,
) = loss_function(model, ys, ts, **kwargs)
loss.backward()
optimizer.step()
self.loss_meter.update(loss.item(), itr)
self.observation_nll_meter.update(observation_nll.item(), itr)
self.state_kl_meter.update(state_kl.item(), itr)
self.energy_kl_meter.update(energy_kl.item(), itr)
self.init_kl_meter.update(init_kl.item(), itr)
self.inducing_kl_meter.update(inducing_kl.item(), itr)
self.time_meter.update(time.time() - begin, itr)
if itr % log_freq == 0:
log_message = (
"Iter {:04d} | Loss {:.2f}({:.2f}) |"
"OBS NLL {:.2f}({:.2f}) | XS KL {:.2f}({:.2f}) |"
" E KL {:.2f}({:.2f}) |"
"X0 KL {:.2f}({:.2f}) | IND KL {:.2f}({:.2f})".format(
itr,
self.loss_meter.val,
self.loss_meter.avg,
self.observation_nll_meter.val,
self.observation_nll_meter.avg,
self.state_kl_meter.val,
self.state_kl_meter.avg,
self.energy_kl_meter.val,
self.energy_kl_meter.avg,
self.init_kl_meter.val,
self.init_kl_meter.avg,
self.inducing_kl_meter.val,
self.inducing_kl_meter.avg,
)
)
print(log_message)
except KeyboardInterrupt:
break
return model, self
class MultiDataset(Dataset):
def __init__(self, ts, ys):
self.ts = ts
self.ys = ys
self.len = ys.shape[0]
self.batch_length = ys.shape[1]
self.d = ys.shape[-1]
def __getitem__(self, index):
return self.ys[index]
def __len__(self):
return self.len
class BatchedTrainer:
"""
A trainer class for batched models. Stores optimization trace for monitoring/plotting purpose
"""
def __init__(self):
self.loss_meter = utils.CachedRunningAverageMeter(0.98)
self.observation_nll_meter = utils.CachedRunningAverageMeter(0.98)
self.state_kl_meter = utils.CachedRunningAverageMeter(0.98)
def train(
self,
model,
loss_function,
ys,
ts,
num_iter,
lr,
log_freq,
batch_length=10,
batch_size=32,
num_val_epochs=10,
**kwargs,
):
if type(model) != SubSequenceModel:
raise ValueError("Batch training only supported for SubSequenceModel")
# only single examples for now
batch_ys = torch.stack(
[
ys[:, i : i + batch_length]
for i in range(ys.shape[1] - batch_length + 1)
],
axis=0,
)
batch_ys = batch_ys.reshape(-1, batch_ys.shape[2], batch_ys.shape[-1])
batch_ts = ts[:batch_length]
dataset = MultiDataset(batch_ts, batch_ys)
trainloader = DataLoader(dataset=dataset, batch_size=batch_size, shuffle=True)
optimizer = torch.optim.Adam(model.parameters(), lr=lr)
i = 0
best_metric = 1e6
best_model = copy.deepcopy(model)
print("Fitting model...")
for itr in range(1, 100000):
try:
for ysi in trainloader:
i += 1
model.train()
optimizer.zero_grad()
# print(ysi.shape)
# ysi = ysi.reshape(-1, dataset.batch_length, dataset.d)
loss, obs_like, initial_kl = loss_function(
model, ysi, dataset.ts, **kwargs
)
loss.backward()
optimizer.step()
self.loss_meter.update(loss.item(), i)
self.state_kl_meter.update(initial_kl.item(), i)
self.observation_nll_meter.update(obs_like.item(), i)
except KeyboardInterrupt:
break
if num_val_epochs:
if itr % num_val_epochs == 0:
preds = hgp.models.builder.compute_test_predictions(
model,
numpy2torch(ys[:, 0, :]),
numpy2torch(ts),
eval_sample_size=2,
)
mll, _, _ = hgp.models.builder.compute_summary(
torch2numpy(ys),
torch2numpy(preds),
torch2numpy(model.observation_likelihood.variance),
)
mnll = -mll
if mnll < best_metric:
best_model = copy.deepcopy(model)
best_metric = mnll
if itr % log_freq == 0:
log_message = "Iter {:04d} | Loss {:.3f}({:.3f}) | OBS {:.3f}({:.3f}) | KL {:.3f}({:.3f}) | Best Metric {:.3f}".format(
i,
self.loss_meter.val,
self.loss_meter.avg,
self.observation_nll_meter.val,
self.observation_nll_meter.avg,
self.state_kl_meter.val,
self.state_kl_meter.avg,
best_metric,
)
print(log_message)
if i > num_iter:
break
return best_model, self
class BatchedNNTrainer:
"""
A trainer class for batched NN models. Stores optimization trace for monitoring/plotting purpose
"""
def __init__(self):
self.loss_meter = utils.CachedRunningAverageMeter(0.98)
self.observation_nll_meter = utils.CachedRunningAverageMeter(0.98)
self.state_kl_meter = utils.CachedRunningAverageMeter(0.98)
def train(
self,
model,
loss_function,
ys,
ts,
num_iter,
lr,
log_freq,
batch_length=5,
batch_size=32,
num_val_epochs=10,
**kwargs,
):
# only single examples for now
batch_ys = torch.stack(
[
ys[:, i : i + batch_length]
for i in range(ys.shape[1] - batch_length + 1)
],
axis=0,
)
batch_ys = batch_ys.reshape(-1, batch_ys.shape[2], batch_ys.shape[-1])
batch_ts = ts[:batch_length]
dataset = MultiDataset(batch_ts, batch_ys)
trainloader = DataLoader(dataset=dataset, batch_size=batch_size, shuffle=True)
optimizer = torch.optim.Adam(model.parameters(), lr=lr)
i = 0
best_metric = 1e6
best_model = copy.deepcopy(model)
print("Fitting model...")
for itr in range(1, 100000):
try:
for ysi in trainloader:
i += 1
model.train()
optimizer.zero_grad()
# print(ysi.shape)
loss, obs_loss, shooting_loss = loss_function(
model, ysi, dataset.ts, **kwargs
)
loss.backward()
optimizer.step()
self.loss_meter.update(loss.item(), i)
self.state_kl_meter.update(obs_loss.item(), i)
self.observation_nll_meter.update(shooting_loss.item(), i)
except KeyboardInterrupt:
break
if num_val_epochs:
if itr % num_val_epochs == 0:
preds = hgp.models.builder.compute_test_predictions(
model,
numpy2torch(ys[:, 0, :]),
numpy2torch(ts),
eval_sample_size=1,
)
_, mse, _ = hgp.models.builder.compute_summary(
torch2numpy(ys),
torch2numpy(preds),
torch2numpy(model.observation_likelihood.variance),
)
if mse < best_metric:
best_model = copy.deepcopy(model)
best_metric = mse
if itr % log_freq == 0:
log_message = "Iter {:04d} | Loss {:.3f}({:.3f}) | OBS {:.3f}({:.3f}) | KL {:.3f}({:.3f}) | Best Metric {:.3f}".format(
i,
self.loss_meter.val,
self.loss_meter.avg,
self.observation_nll_meter.val,
self.observation_nll_meter.avg,
self.state_kl_meter.val,
self.state_kl_meter.avg,
best_metric,
)
print(log_message)
if i > num_iter:
break
return best_model, self
class NNTrainer:
"""
A trainer class for NN models. Stores optimization trace for monitoring/plotting purpose
"""
def __init__(self):
self.loss_meter = utils.CachedRunningAverageMeter(0.98)
self.observation_nll_meter = utils.CachedRunningAverageMeter(0.98)
self.state_kl_meter = utils.CachedRunningAverageMeter(0.98)
def train(self, model, loss_function, ys, ts, num_iter, lr, log_freq, **kwargs):
optimizer = torch.optim.Adam(model.parameters(), lr=lr)
print("Fitting model...")
for itr in range(1, num_iter):
try:
model.train()
begin = time.time()
optimizer.zero_grad()
loss, obs_loss, shooting_loss = loss_function(model, ys, ts, **kwargs)
loss.backward()
optimizer.step()
self.loss_meter.update(loss.item(), itr)
self.observation_nll_meter.update(obs_loss.item(), itr)
self.state_kl_meter.update(shooting_loss.item(), itr)
if itr % log_freq == 0:
log_message = "Iter {:04d} | Loss {:.3f}({:.3f}) | Obs {:.3f}({:.3f}) | Shooting {:.3f}({:.3f}) |".format(
itr,
self.loss_meter.val,
self.loss_meter.avg,
self.observation_nll_meter.val,
self.observation_nll_meter.avg,
self.state_kl_meter.val,
self.state_kl_meter.avg,
)
print(log_message)
except KeyboardInterrupt:
break
return model, self
| 31,938 | 30.716981 | 135 | py |
hgp | hgp-main/hgp/datasets/hamiltonians.py | # import numpy as np
import functorch
import numpy as np
import torch
from torchdiffeq import odeint
from hgp.misc.ham_utils import build_J
from hgp.misc.torch_utils import numpy2torch, torch2numpy
# from scipy.integrate import odeint
class Data:
def __init__(self, ys, ts):
self.ts = ts.astype(np.float32)
self.ys = ys.astype(np.float32)
def __len__(self):
return self.ys.shape[0]
def __getitem__(self, index):
return self.ys[index, ...], self.ts
class HamiltonianSystem:
def __init__(
self,
state_dimension,
frequency_train=4,
T_train=6.0,
frequency_test=None,
T_test=None,
x0=None,
x0_test=None,
N_x0s=None,
N_x0s_test=None,
noise_var=0.01,
noise_rel=False,
device="cpu",
seed=121,
ic_mode=None,
):
noise_rng = np.random.RandomState(seed)
init_rng_train = np.random.RandomState(seed + 1)
init_rng_test = np.random.RandomState(seed + 2)
frequency_test = (
frequency_test if frequency_test is not None else frequency_train
)
T_test = T_test if T_test is not None else T_train
self.S_test = frequency_train
self.T_test = T_test
self.S_train = frequency_test
self.T_train = T_train
if N_x0s is None:
N_x0s = 10
if x0 is None:
x0 = self.sample_ics(N_x0s, ic_mode=ic_mode, rng=init_rng_train)
if x0_test is None and N_x0s_test is not None:
x0_test = self.sample_ics(N_x0s_test, ic_mode=ic_mode, rng=init_rng_test)
if N_x0s_test is None:
N_x0s_test = N_x0s
if x0_test is None:
x0_test = x0
self.state_dimension = state_dimension
self.J = build_J(state_dimension).float()
self.x0 = x0
self.x0_test = x0_test
self.noise_var = noise_var
xs_train, ts_train = self.generate_sequence(
x0=self.x0, sequence_length=int(frequency_train * T_train) + 1, T=T_train
)
xs_test, ts_test = self.generate_sequence(
x0=self.x0_test, sequence_length=int(frequency_test * T_test), T=T_test
)
xs_train = xs_train + noise_rng.normal(size=xs_train.shape) * (
self.noise_var**0.5
) * (1.0 if not noise_rel else xs_train.std(axis=(1))[:, None, :])
self.trn = Data(ys=xs_train, ts=ts_train)
self.tst = Data(ys=xs_test, ts=ts_test)
self.mean_std_ys = self.trn.ys.mean(axis=(0, 1)), self.trn.ys.std(axis=(0, 1))
self.max_trn = self.trn.ts.max()
def f(self, t, x):
"""
Computes derivative function from H
"""
dHdx = functorch.grad(lambda xi: self.hamiltonian(xi).sum())(x)
return dHdx @ self.J.T
def generate_sequence(self, x0, sequence_length, T):
"""
Generates trajectories given derivative function
"""
with torch.no_grad():
ts = torch.linspace(0, 1, sequence_length) * T
x0 = torch.tensor(x0, dtype=torch.float32, requires_grad=False)
# x0 = x0.clone().detach()
xs = torch2numpy(
odeint(
self.f,
x0,
ts,
).permute(1, 0, 2)
)
return xs, torch2numpy(ts)
def scale_output(self, x):
return (x - self.mean_std_ys[0]) / self.mean_std_ys[1]
def unscale_output(self, x):
return x * self.mean_std_ys[1] + self.mean_std_ys[0]
def scale_t(self, t):
return t / self.max_trn
def unscale_t(self, t):
return t * self.max_trn
def scale_ts(self):
self.trn.ts = self.scale_t(self.trn.ts)
self.tst.ts = self.scale_t(self.tst.ts)
def scale_ys(self):
self.tst.ys = self.scale_output(self.tst.ys)
self.trn.ys = self.scale_output(self.trn.ys)
self.x0_test = self.scale_output(self.x0_test)
self.x0 = self.scale_output(self.x0)
class SimplePendulum(HamiltonianSystem):
def __init__(
self,
**kwargs,
):
super(SimplePendulum, self).__init__(2, **kwargs)
self.xlim = (
-self.tst.ys[:, :, 0].max() - 0.1,
self.tst.ys[:, :, 0].max() + 0.1,
)
self.ylim = (
-self.tst.ys[:, :, 1].max() - 0.1,
self.tst.ys[:, :, 1].max() + 0.1,
)
self.name = "simple-pendulum"
def sample_ics(self, N, rng, ic_mode=None):
out = []
n = 0
while n < N:
x0 = rng.rand(2) * 2.0 - 1.0
energy = self.hamiltonian(numpy2torch(x0))
if energy < 9.81:
out.append(x0)
n += 1
return np.array(out)
def hamiltonian(self, x, m=1, g=9.81, r=1):
q, p = torch.split(x, x.shape[-1] // 2, dim=-1)
return m * g * r * (1 - torch.cos(q)) + 0.5 / (r**2 * m) * p**2
class SpringPendulum(HamiltonianSystem):
def __init__(
self,
**kwargs,
):
self.m = 1.0
self.l0 = 3.0
self.k = 10
self.g = 9.81
super(SpringPendulum, self).__init__(4, **kwargs)
self.lim = self.trn.ys.max()
self.name = "spring-pendulum"
def sample_ics(self, N, rng, ic_mode=None):
out_ics = rng.uniform(low=-0.25, high=0.25, size=(N, 4))
return out_ics
def hamiltonian(self, x):
q, p = torch.split(x, x.shape[-1] // 2, dim=-1)
kin = (
0.5
* (1 / self.m)
* (p[..., 0] ** 2 + p[..., 1] ** 2 / (q[..., 0] + self.l0) ** 2)
)
elas = 0.5 * self.k * (q[..., 0]) ** 2
gpe = -self.m * self.g * (q[..., 0] + self.l0) * torch.cos(q[..., 1])
return kin + elas + gpe
class HenonHeiles(HamiltonianSystem):
def __init__(
self,
**kwargs,
):
self.mu = 0.8
super(HenonHeiles, self).__init__(4, **kwargs)
self.lim = self.trn.ys.max()
self.name = "henon-heiles"
def sample_ics(self, N, rng, ic_mode=None):
out = []
n = 0
while n < N:
x0 = rng.uniform(low=-1.0, high=1.0, size=4)
energy = self.hamiltonian(numpy2torch(x0))
if energy < 1 / (6 * self.mu**2):
out.append(x0)
n += 1
out_ics =(np.array(out))
return out_ics
def hamiltonian(self, x):
q, p = torch.split(x, x.shape[-1] // 2, dim=-1)
return self.mu * (q[..., 0] ** 2 * q[..., 1] - q[..., 1] ** 3 / 3) + 0.5 * (
x**2
).sum(-1)
def load_system_from_name(name):
all_classes = {
"simple-pendulum": SimplePendulum,
"henon-heiles": HenonHeiles,
"spring-pendulum": SpringPendulum,
}
return all_classes[name]
| 6,888 | 26.890688 | 86 | py |
hgp | hgp-main/hgp/misc/plot_utils.py | from hgp.misc.torch_utils import torch2numpy, numpy2torch
import matplotlib.pyplot as plt
from matplotlib import cm
import matplotlib
import shutil
import numpy as np
from hgp.models.builder import compute_summary
def plot_predictions(data, test_pred, save=None, test_true=None, model_name="Model"):
test_ts, test_ys = data.tst.ts, data.tst.ys
fig, axs = plt.subplots(
data.state_dimension,
1,
figsize=(
12,
2 * data.state_dimension,
),
sharex=True,
sharey=True,
squeeze=True,
)
# for i, (model_name, test_pred) in enumerate(test_pred_dict.items()):
for d in range(data.state_dimension):
# axs[d].plot(test_ts, test_pred_mean[n, :, d], c="r", alpha=0.7, zorder=3)
axs[d].plot(test_ts, test_pred[:, 0, :, d].T, c=cm.Set1(2), alpha=0.1, zorder=4)
axs[d].plot(test_ts, test_ys[0, :, d], c="k", alpha=0.7, zorder=2)
axs[d].scatter(
data.trn.ts,
data.trn.ys[0, :, d],
c="k",
s=20,
marker=".",
zorder=200,
)
axs[d].set_xlabel("$t$")
axs[d].scatter([], [], c="k", s=20, marker=".", label="Training data")
axs[d].plot([], [], c="k", alpha=0.7, label="True $\mathbf{x}(t)$")
axs[d].plot(
[],
[],
c=cm.Set1(2),
alpha=0.9,
label=model_name + " Pred. $\mathbf{x}(t)$",
)
axs[0].legend(loc="upper right")
axs[0].set_title(model_name)
for i in range(data.state_dimension):
half_d = data.state_dimension // 2
ax_label = ("q_" if i < half_d else "p_") + str(i % half_d + 1)
axs[i].set_ylabel(f"${ax_label}$")
# fig.suptitle("Predictive posterior")
# fig.subplots_adjust(wspace=0.2, hspace=0.2)
plt.tight_layout()
if save:
plt.savefig(save)
plt.close()
else:
plt.show()
def plot_longitudinal(data, test_pred, noisevar, save=None, test_true=None):
test_pred_mean, test_pred_postvar = test_pred.mean(0), test_pred.var(0)
test_pred_predvar = test_pred_postvar + noisevar
if test_true is None:
test_ts, test_ys = data.tst.ts, data.tst.ys
else:
test_ts, test_ys = test_true
for n in range(test_pred_mean.shape[0]):
fig, axs = plt.subplots(
data.state_dimension,
1,
figsize=(
3 * data.state_dimension,
8 * 1,
),
)
for d in range(data.state_dimension):
axs[d].plot(test_ts, test_pred_mean[n, :, d], c="r", alpha=0.7, zorder=3)
axs[d].fill_between(
test_ts,
test_pred_mean[n, :, d] - 2 * test_pred_postvar[n, :, d] ** 0.5,
test_pred_mean[n, :, d] + 2 * test_pred_postvar[n, :, d] ** 0.5,
color="r",
alpha=0.1,
zorder=1,
label="posterior",
)
axs[d].fill_between(
test_ts,
test_pred_mean[n, :, d] - 2 * test_pred_predvar[n, :, d] ** 0.5,
test_pred_mean[n, :, d] + 2 * test_pred_predvar[n, :, d] ** 0.5,
color="b",
alpha=0.1,
zorder=0,
label="predictive",
)
axs[d].plot(test_ts, test_pred[:, n, :, d].T, c="g", alpha=0.1, zorder=4)
axs[d].plot(test_ts, test_ys[n, :, d], c="k", alpha=0.7, zorder=2)
if data.trn.ys.shape[0] == data.tst.ys.shape[0]:
axs[d].scatter(
data.trn.ts,
data.trn.ys[n, :, d],
c="k",
s=100,
marker=".",
zorder=200,
)
axs[d].set_title("State {}".format(d + 1))
axs[d].set_xlabel("Time")
axs[d].scatter([], [], c="k", s=10, marker=".", label="train obs")
axs[d].plot([], [], c="k", alpha=0.7, label="true")
axs[d].plot([], [], c="r", alpha=0.7, label="predicted")
axs[d].legend(loc="upper right")
fig.suptitle("Predictive posterior")
fig.subplots_adjust(wspace=0.2, hspace=0.2)
plt.tight_layout()
if save:
plt.savefig(save + f"t{n}.pdf")
plt.close()
else:
return fig, axs
def plot_traces(model, data, test_pred, save=None):
mll, mse = compute_summary(
data.tst.ys,
torch2numpy(test_pred),
torch2numpy(model.observation_likelihood.variance),
squeeze_time=False,
)
fig, axs = plt.subplots(2, 2, figsize=(20, 10))
(ax1, ax2, ax3, ax4) = axs.flatten()
ax1.plot(data.tst.ts, mll.T)
ax1.set_title("MLL")
ax2.plot(data.tst.ts, mse.T)
ax2.set_title("MSE")
ax3.plot(data.tst.ts, np.var(torch2numpy(test_pred), axis=0).mean(-1).T)
ax3.set_title("Variance")
pred_energy = torch2numpy(data.hamiltonian(numpy2torch(test_pred)))
true_energy = torch2numpy(data.hamiltonian(numpy2torch(data.tst.ys)))
energy_err = np.power(true_energy - pred_energy.mean(0), 2)
ax4.plot(data.tst.ts, energy_err.T)
ax4.set_title("Energy MSE")
if save:
plt.savefig(save)
else:
plt.show()
def plot_comparison_traces(test_preds, obs_noises, data, save=None, names=None):
fig, axs = plt.subplots(2, 2, figsize=(20, 10))
names = [None, None] if names is None else names
for i, (noise, test_pred, name) in enumerate(zip(obs_noises, test_preds, names)):
mll, mse, rel_err = compute_summary(
data.tst.ys,
torch2numpy(test_pred),
torch2numpy(noise),
squeeze_time=False,
)
(ax1, ax2, ax3, ax4) = axs.flatten()
ax1.plot(data.tst.ts, mll.T, c=cm.Set2(i), alpha=0.7)
ax1.plot([], [], c=cm.Set2(i), label=name)
ax1.set_title("MLL")
ax2.plot(data.tst.ts, rel_err.T, c=cm.Set2(i), alpha=0.7)
ax2.set_title("Relative Error")
ax3.plot(
data.tst.ts,
np.var(torch2numpy(test_pred), axis=0).mean(-1).T,
c=cm.Set2(i),
alpha=0.7,
)
ax3.set_title("Variance")
pred_energy = torch2numpy(data.hamiltonian(numpy2torch(test_pred)))
true_energy = torch2numpy(data.hamiltonian(numpy2torch(data.tst.ys)))
energy_err = np.sqrt(np.power(true_energy - pred_energy.mean(0), 2))
ax4.plot(data.tst.ts, np.squeeze(energy_err).T, c=cm.Set2(i), alpha=0.7)
ax4.set_title("Energy MSE")
for ax in axs.flatten():
ax.axvline(
data.trn.ts.max(),
ls=":",
c="grey",
alpha=0.5,
label="End of train period",
)
ax.set_xlabel("T (s)")
ax1.legend()
if save:
plt.savefig(save)
else:
plt.show()
def plot_learning_curve(history, save=None):
fig, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(20, 3))
ax1.plot(history.loss_meter.iters, history.loss_meter.vals)
ax1.set_title("Loss function")
ax1.set_yscale("log")
try:
ax2.plot(
history.observation_nll_meter.iters, history.observation_nll_meter.vals
)
ax2.set_title("Observation NLL")
# ax2.set_yscale("log")
ax3.plot(history.state_kl_meter.iters, history.state_kl_meter.vals)
ax3.set_title("State KL")
ax3.set_yscale("log")
# deals with nn plotting
except AttributeError:
pass
if save:
plt.savefig(save)
else:
plt.show()
| 7,650 | 31.012552 | 88 | py |
hgp | hgp-main/hgp/misc/settings.py | import torch
import numpy
class Settings:
def __init__(self):
pass
@property
def torch_int(self):
return torch.int32
@property
def numpy_int(self):
return numpy.int32
@property
def device(self):
# return torch.device('cpu')
# return torch.device('cuda:0')
return torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
@property
def torch_float(self):
return torch.float32
@property
def numpy_float(self):
return numpy.float32
@property
def jitter(self):
return 1e-5
settings = Settings()
| 630 | 16.054054 | 77 | py |
hgp | hgp-main/hgp/misc/ham_utils.py | import torch
def build_J(D_in):
assert D_in % 2 == 0
I = torch.eye(D_in // 2)
zeros = torch.zeros((D_in // 2, D_in // 2))
zI = torch.hstack((zeros, I))
mIz = torch.hstack((-I, zeros))
return torch.vstack((zI, mIz))
| 241 | 21 | 47 | py |
hgp | hgp-main/hgp/misc/train_utils.py | import random
import numpy as np
import torch
def seed_everything(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.use_deterministic_algorithms(True)
def get_logger(logpath, filepath, add_stdout=True):
logger = logging.getLogger()
logger.setLevel(logging.INFO)
info_file_handler = logging.FileHandler(logpath, mode="a")
info_file_handler.setLevel(logging.INFO)
logger.addHandler(info_file_handler)
logger.info(filepath)
if add_stdout:
consoleHandler = logging.StreamHandler()
consoleHandler.setFormatter(logging.Formatter("%(asctime)s %(message)s"))
logger.addHandler(consoleHandler)
return logger
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
class CachedAverageMeter(object):
"""Computes and stores the average and current value over optimization iterations"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
self.vals = []
self.iters = []
def update(self, val, iter, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
self.vals.append(val)
self.iters.append(iter)
class RunningAverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self, momentum=0.99):
self.momentum = momentum
self.reset()
def reset(self):
self.val = None
self.avg = 0
def update(self, val):
if self.val is None:
self.avg = val
else:
self.avg = self.avg * self.momentum + val * (1 - self.momentum)
self.val = val
class CachedRunningAverageMeter(object):
"""Computes and stores the average and current value over optimization iterations"""
def __init__(self, momentum=0.99):
self.momentum = momentum
self.reset()
def reset(self):
self.val = None
self.avg = 0
self.vals = []
self.iters = []
def update(self, val, iter):
if self.val is None:
self.avg = val
else:
self.avg = self.avg * self.momentum + val * (1 - self.momentum)
self.val = val
self.vals.append(val)
self.iters.append(iter)
| 2,731 | 23.836364 | 88 | py |
hgp | hgp-main/hgp/misc/torch_utils.py | from hgp.misc.settings import settings
import numpy as np
import torch
device = settings.device
dtype = settings.torch_float
def numpy2torch(x):
return (
torch.tensor(x, dtype=dtype).to(device)
if type(x) is np.ndarray
else x.to(device)
)
def torch2numpy(x):
return x if type(x) is np.ndarray else x.detach().cpu().numpy()
def restore_model(model, filename):
checkpt = torch.load(filename, map_location=lambda storage, loc: storage)
model.load_state_dict(checkpt["state_dict"])
return model
def save_model(model, filename):
torch.save({"state_dict": model.state_dict()}, filename)
def save_model_optimizer(model, optimizer, filename):
torch.save(
{
"state_dict": model.state_dict(),
"optimizer_state_dict": optimizer.state_dict(),
},
filename,
)
def insert_zero_t0(ts):
return torch.cat([torch.tensor([0.0]), ts + ts[1] - ts[0]])
def compute_ts_dense(ts, ts_dense_scale):
"""
Given a time sequence ts, this makes it dense by adding intermediate time points.
@param ts: time sequence
@param ts_dense_scale: dense factor
@return: dense time sequence
"""
if ts_dense_scale > 1:
ts_dense = torch.cat(
[
torch.linspace(t1, t2, ts_dense_scale)[:-1]
for (t1, t2) in zip(ts[:-1], ts[1:])
]
+ [ts[-1:]]
)
else:
ts_dense = ts
return ts_dense
| 1,487 | 22.619048 | 85 | py |
hgp | hgp-main/hgp/misc/constraint_utils.py | import torch
import torch.nn.functional as F
def softplus(x):
lower = 1e-12
return F.softplus(x) + lower
def invsoftplus(x):
lower = 1e-12
xs = torch.max(x - lower, torch.tensor(torch.finfo(x.dtype).eps).to(x))
return xs + torch.log(-torch.expm1(-xs))
| 276 | 18.785714 | 75 | py |
hgp | hgp-main/hgp/misc/param.py | import numpy as np
import torch
from hgp.misc import transforms
from hgp.misc.settings import settings
class Param(torch.nn.Module):
"""
A class to handle contrained --> unconstrained optimization using variable transformations.
Similar to Parameter class in GPflow : https://github.com/GPflow/GPflow/blob/develop/gpflow/base.py
"""
def __init__(self, value, transform=transforms.Identity(), name="var"):
super(Param, self).__init__()
self.transform = transform
self.name = name
value_ = self.transform.backward(value)
self.optvar = torch.nn.Parameter(
torch.tensor(data=np.array(value_), dtype=settings.torch_float)
).to(settings.device)
def __call__(self):
return self.transform.forward_tensor(self.optvar)
def __repr__(self):
return "{} parameter with {}".format(self.name, self.transform.__str__())
| 913 | 31.642857 | 103 | py |
hgp | hgp-main/hgp/misc/transforms.py | from hgp.misc.settings import settings
import numpy as np
import torch
import torch.nn.functional as F
class Identity:
def __init__(self):
pass
def __str__(self):
return "Identity transformation"
def forward_tensor(self, x):
return x
def backward_tensor(self, y):
return y
def forward(self, x):
return x
def backward(self, y):
return y
class SoftPlus:
def __init__(self, lower=1e-12):
self._lower = lower
def __str__(self):
return "Softplus transformation"
def forward(self, x):
return np.logaddexp(0, x) + self._lower
def forward_tensor(self, x):
return F.softplus(x) + self._lower
def backward_tensor(self, y):
ys = torch.max(y - self._lower, torch.tensor(torch.finfo(y.dtype).eps).to(y))
return ys + torch.log(-torch.expm1(-ys))
def backward(self, y):
ys = np.maximum(y - self._lower, np.finfo(settings.numpy_float).eps)
return ys + np.log(-np.expm1(-ys))
class LowerTriangular:
def __init__(self, N, num_matrices=1):
self.N = N
self.num_matrices = num_matrices # We need to store this for reconstruction.
def __str__(self):
return "Lower cholesky transformation"
def forward(self, x):
fwd = np.zeros((self.num_matrices, self.N, self.N), dtype=settings.numpy_float)
indices = np.tril_indices(self.N, 0)
z = np.zeros(len(indices[0])).astype(int)
for i in range(self.num_matrices):
fwd[(z + i,) + indices] = x[i, :]
return fwd
def backward(self, y):
ind = np.tril_indices(self.N)
return np.vstack([y_i[ind] for y_i in y])
def forward_tensor(self, x):
fwd = torch.zeros(
(self.num_matrices, self.N, self.N),
dtype=settings.torch_float,
device=settings.device,
)
indices = np.tril_indices(self.N, 0)
z = np.zeros(len(indices[0])).astype(int)
for i in range(self.num_matrices):
fwd[(z + i,) + indices] = x[i, :]
return fwd
def backward_tensor(self, y):
ind = np.tril_indices(self.N)
return torch.stack([y_i[ind] for y_i in y])
class StackedLowerTriangular:
def __init__(self, N, num_n, num_m):
self.N = N
self.num_n = num_n # We need to store this for reconstruction.
self.num_m = num_m
def __str__(self):
return "Lower cholesky transformation for stack sequence of covariance matrices"
def forward(self, x):
fwd = np.zeros(
(self.num_n, self.num_m, self.N, self.N), dtype=settings.numpy_float
)
indices = np.tril_indices(self.N, 0)
z = np.zeros(len(indices[0])).astype(int)
for i in range(self.num_n):
for j in range(self.num_m):
fwd[
(
z + i,
z + j,
)
+ indices
] = x[i, j, :]
return fwd
def backward(self, y):
ind = np.tril_indices(self.N)
return np.stack([np.stack([y_i[ind] for y_i in y_j]) for y_j in y])
def forward_tensor(self, x):
fwd = torch.zeros(
(self.num_n, self.num_m, self.N, self.N),
dtype=settings.torch_float,
device=settings.device,
)
indices = np.tril_indices(self.N, 0)
z = np.zeros(len(indices[0])).astype(int)
for i in range(self.num_n):
for j in range(self.num_m):
fwd[
(
z + i,
z + j,
)
+ indices
] = x[i, j, :]
return fwd
def backward_tensor(self, y):
ind = np.tril_indices(self.N)
return torch.stack([torch.stack([y_i[ind] for y_i in y_j]) for y_j in y])
| 3,936 | 27.323741 | 88 | py |
hgp | hgp-main/tests/test_kernels.py | import pytest
import hgp.core.kernels as kernels
import torch
@pytest.fixture()
def t():
return 1 * torch.randn(10, 4)
@pytest.fixture()
def kernel():
k = kernels.DerivativeRBF(4)
return k
def test_single_k(t, kernel):
K = kernel.K(t)
for i in range(10):
for j in range(10):
assert torch.isclose(K[i, j], kernel.single_k(t[i], t[j]))
def test_grad_single_k(t, kernel):
def analytic_dk(xi, yi, dim):
return (
-0.5
* (1 / kernel.lengthscales[dim] ** 2)
* (xi[dim] - yi[dim])
* kernel.single_k(xi, yi)
)
for j in range(10):
for k in range(10):
for i in range(4):
print(i)
assert torch.isclose(
kernel.grad_single_k(t[k], t[j])[i],
analytic_dk(t[k], t[j], i),
)
def test_hess_single_k(t, kernel):
def analytic_ddk(xi, yi, dim1, dim2):
return (
0.25
* (1 / kernel.lengthscales[dim2] ** 2)
* (
2 * (dim1 == dim2)
- (1 / kernel.lengthscales[dim1] ** 2)
* (xi[dim1] - yi[dim1])
* (xi[dim2] - yi[dim2])
)
* kernel.single_k(xi, yi)
)
for j in range(10):
for k in range(10):
for i in range(4):
for j in range(4):
pred = kernel.hess_single_k(t[j], t[k])[i][j]
ana = analytic_ddk(t[j], t[k], i, j)
print(pred, ana)
assert torch.isclose(
pred,
ana,
)
def test_grad_K(t, kernel):
dK = kernel.grad_K(t, t[:9])
def analytic_dk(xi, yi, dim):
return (
-0.5
* (1 / kernel.lengthscales[dim] ** 2)
* (xi[dim] - yi[dim])
* kernel.single_k(xi, yi)
)
# print(dK.shape)
for i in range(4 * 10):
for j in range(9):
assert torch.isclose(
dK[i, j], analytic_dk(t[i % 10], t[j], i // 10), atol=1e-6
)
def test_hess_K(t, kernel):
ddK = kernel.hess_K(t)
def analytic_ddk(xi, yi, dim1, dim2):
return (
0.25
* (1 / kernel.lengthscales[dim2] ** 2)
* (
2 * (dim1 == dim2)
- (1 / kernel.lengthscales[dim1] ** 2)
* (xi[dim1] - yi[dim1])
* (xi[dim2] - yi[dim2])
)
* kernel.single_k(xi, yi)
)
for i in range(4 * 10):
for j in range(4 * 10):
assert torch.isclose(
ddK[i, j],
analytic_ddk(t[i % 10], t[j % 10], i // 10, j // 10),
atol=1e-6,
)
def test_hess_K_PSD(t, kernel):
ddK = kernel.hess_K(t)
torch.linalg.cholesky(ddK + torch.eye(ddK.shape[1]) * 1e-5)
def test_variance_setter(kernel):
kernel.variance = torch.tensor(2.70)
assert torch.isclose(kernel.variance, torch.tensor(2.70))
def test_lengthscale_setter(kernel):
kernel.lengthscales = torch.ones(4) * 2.7
assert torch.all(torch.isclose(kernel.lengthscales, torch.ones(4) * 2.7))
| 3,261 | 24.286822 | 77 | py |
hgp | hgp-main/experiments/initial_pendulum/experiment.py | import logging
import os
import hydra
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import torch
from matplotlib import cm
from matplotlib.collections import LineCollection
from matplotlib.legend import _get_legend_handles_labels
from omegaconf import DictConfig
import hgp
from hgp.datasets.hamiltonians import load_system_from_name
from hgp.misc.torch_utils import numpy2torch, torch2numpy
from hgp.misc.train_utils import seed_everything
from hgp.misc.settings import settings
device = settings.device
if device.type == 'cuda':
torch.set_default_tensor_type('torch.cuda.FloatTensor')
log = logging.getLogger(__name__)
@hydra.main(config_path=".", config_name="config", version_base="1.2")
def run_experiment(config: DictConfig):
print("Working directory : {}".format(os.getcwd()))
seed_everything(3)
(fig, axs) = plt.subplots(
2,
4,
figsize=(12, 6),
# sharex=True,
# sharey=True,
)
cycle_lengths = [0.52, 1]
model_names = ["hgp", "gpode"]
titles1 = ["Inferred vector field", "Posterior samples"]
titles2 = [", $\\frac{1}{2}$ cycle", ", $1$ cycle"]
for c, cycle_length in enumerate(cycle_lengths):
train_time = cycle_length * 2 * np.pi / np.sqrt(9.81)
system = load_system_from_name("simple-pendulum")(
frequency_train=16,
T_train=train_time,
frequency_test=20,
T_test=(8 * np.pi / np.sqrt(9.81)),
noise_var=0.01,
noise_rel=True,
seed=3,
N_x0s=1,
)
models = []
preds = []
for model_name in model_names:
model = (
hgp.models.builder.build_model(config, system.trn.ys)
if model_name == "hgp"
else hgp.models.builder.build_gpode_model(config, system.trn.ys)
)
model = hgp.models.builder.init_and_fit(
model, config, system.trn.ts, system.trn.ys
)
models.append(model)
preds.append(
hgp.models.builder.compute_predictions(
model,
numpy2torch(system.tst.ts),
eval_sample_size=config.eval_samples,
)
)
(ax1, ax2, ax3, ax4) = axs[c]
grid_size = 30
xlim = system.xlim
ylim = system.ylim
factor = 1.5
xx, yy = np.meshgrid(
np.linspace(xlim[0] * factor, xlim[1] * factor, grid_size),
np.linspace(ylim[0] * factor, ylim[1] * factor, grid_size),
)
grid_x = np.concatenate([xx.reshape(-1, 1), yy.reshape(-1, 1)], 1)
grid_f = []
for gx in grid_x:
grid_f.append(
torch2numpy(system.f(None, torch.tensor(gx, dtype=torch.float32)))
)
grid_f = np.stack(grid_f)
if c == 0:
ax1.streamplot(
xx,
yy,
grid_f[:, 0].reshape(xx.shape),
grid_f[:, 1].reshape(xx.shape),
color="grey",
density=0.5,
)
ax1.set_title("True vector field")
ax1.scatter(
[None], [None], marker=".", c="k", alpha=0.8, label="Training data"
)
ax1.plot(
[None],
[None],
color="k",
linestyle="solid",
alpha=1.0,
zorder=4,
label="True trajectory",
)
ax1.set_ylabel("$p$")
ax1.set_xlabel("$q$")
# ax1.legend(loc="lower right")
else:
ax1.axis("off")
grid_x = torch.tensor(
np.concatenate([xx.reshape(-1, 1), yy.reshape(-1, 1)], 1),
dtype=torch.float32,
)
for i, model in enumerate(models):
grid_f = []
color = cm.Set2(i)
with torch.no_grad():
for _ in range(100):
model.flow.odefunc.diffeq.build_cache()
grid_f.append(model.flow.odefunc.diffeq.forward(None, grid_x))
grid_f = torch2numpy(torch.stack(grid_f))
sp = ax2.streamplot(
xx,
yy,
grid_f.mean(0)[:, 0].reshape(xx.shape),
grid_f.mean(0)[:, 1].reshape(xx.shape),
color=color,
arrowsize=1.1,
arrowstyle="<|-",
density=0.5,
)
ax2.set_title(titles1[0] + titles2[c])
if c == 0:
ax2.plot(
[None],
[None],
linestyle=":" if i == 0 else "solid",
color=color,
label=model_names[i].upper(),
)
sp.lines.set(alpha=0.8, ls=":" if i == 0 else "solid")
for s in range(min(preds[i].shape[0], 10)):
for n in range(preds[i].shape[1]):
points = preds[i][s, n].reshape(-1, 1, 2)
segments = np.concatenate([points[:-1], points[1:]], axis=1)
lc = LineCollection(
segments,
linestyle=":" if i == 0 else "solid",
alpha=0.3,
color=color,
)
lc.set_linewidth(2.5)
ax3.add_collection(lc)
ax3.set_title(titles1[1] + titles2[c])
pred_energy = torch2numpy(system.hamiltonian(numpy2torch(preds[i])))
true_energy = torch2numpy(system.hamiltonian(numpy2torch(system.tst.ys)))
energy_err = np.sqrt(np.power(true_energy - pred_energy, 2))
ax4.plot(
system.tst.ts,
np.squeeze(energy_err).T,
linestyle=":" if i == 0 else "solid",
alpha=0.3,
color=color,
)
ax4.set_title("Energy MSE" + titles2[c])
ax4.set_xlabel("$t$")
for n in range(system.tst.ys.shape[0]):
points = system.tst.ys[n].reshape(-1, 1, 2)
segments = np.concatenate([points[:-1], points[1:]], axis=1)
lc = LineCollection(
segments, color="k", linestyle="solid", alpha=1.0, zorder=4
)
lc.set_linewidth(0.5)
ax3.add_collection(lc)
ax3.scatter(
system.trn.ys[:, :, 0], system.trn.ys[:, :, 1], marker=".", c="k", alpha=1
)
for ax in [ax1, ax2, ax3]:
ax.set_xlim(xlim[0] * factor, xlim[1] * factor)
ax.set_ylim(ylim[0] * factor, ylim[1] * factor)
fig.legend(*_get_legend_handles_labels(fig.axes), loc=(0.1, 0.3))
plt.tight_layout()
plt.savefig("./figure4.pdf")
if __name__ == "__main__":
run_experiment()
| 6,928 | 31.078704 | 86 | py |
hgp | hgp-main/experiments/forward_trajectory/experiment.py | import logging
import os
import pickle
from distutils.dir_util import copy_tree
from pathlib import Path
import hydra
import numpy as np
import torch
from omegaconf import DictConfig
import hgp
from hgp.datasets.hamiltonians import load_system_from_name
from hgp.misc.plot_utils import (
plot_comparison_traces,
plot_learning_curve,
plot_longitudinal,
)
from hgp.misc.torch_utils import numpy2torch, torch2numpy
from hgp.misc.train_utils import seed_everything
from hgp.misc.settings import settings
device = settings.device
if device.type == 'cuda':
torch.set_default_tensor_type('torch.cuda.FloatTensor')
log = logging.getLogger(__name__)
@hydra.main(config_path="conf", config_name="config", version_base="1.2")
def run_experiment(config: DictConfig):
seed_everything(config.system.seed)
times_ahead = 2
system = load_system_from_name(config.system.system_name)(
frequency_train=config.system.frequency_train,
T_train=config.system.data_obs_T,
frequency_test=config.system.frequency_test,
T_test=times_ahead * config.system.data_obs_T,
noise_var=config.system.data_obs_noise_var,
noise_rel=config.system.noise_rel,
seed=config.system.seed,
N_x0s=1,
)
system.scale_ts()
system.scale_ys()
if config.model.model_type == "hgp":
model = hgp.models.builder.build_model(config, system.trn.ys)
elif config.model.model_type == "hgp_subseq":
model = hgp.models.builder.build_subsequence_model(config, system.trn.ys)
elif config.model.model_type == "gpode":
model = hgp.models.builder.build_gpode_model(config, system.trn.ys)
elif config.model.model_type == "nn":
model = hgp.models.builder.build_nn_model(config, system.trn.ys)
else:
raise ValueError("Model type not valid.")
model, history = hgp.models.builder.init_and_fit(
model, config, system.trn.ts, system.trn.ys, return_history=True
)
plot_learning_curve(
history, save=os.path.join(os.getcwd(), f"lc_{config.model.name}.pdf")
)
print("Generating predictions...")
preds = (
hgp.models.builder.compute_predictions(
model,
numpy2torch(system.tst.ts),
eval_sample_size=config.eval_samples,
)
if config.model.model_type in ["hgp", "gpode"]
else hgp.models.builder.compute_test_predictions(
model,
numpy2torch(system.trn.ys[:, 0, :]),
numpy2torch(system.tst.ts),
eval_sample_size=config.eval_samples,
)
)
model_vars = model.observation_likelihood.variance
# we need to only compute the metrics outside the training region
# test data here also contains noise free function in traing range
test_idx = system.tst.ts > system.trn.ts.max()
mll, mse, rel_err = hgp.models.builder.compute_summary(
system.tst.ys[:, test_idx, :],
torch2numpy(preds[:, :, test_idx, :]),
torch2numpy(model_vars),
squeeze_time=False,
)
plot_longitudinal(
system,
torch2numpy(preds),
torch2numpy(model_vars),
save=os.path.join(os.getcwd(), f"{config.model.name}_trajpost"),
)
# print(model)
res_dict = {}
full_res_dict = {}
full_res_dict["rmse"] = np.sqrt(mse)
full_res_dict["mll"] = mll
full_res_dict["rel_err"] = rel_err
full_res_dict["preds"] = torch2numpy(preds)
res_dict["rmse"] = np.sqrt(mse.mean())
res_dict["mll"] = mll.mean()
res_dict["rel_err"] = rel_err.mean()
log.info(res_dict)
# also save data for plotting
full_res_dict["system"] = system
with open(os.path.join(os.getcwd(), "full_metrics.pickle"), "wb") as handle:
pickle.dump(full_res_dict, handle, protocol=pickle.HIGHEST_PROTOCOL)
with open(os.path.join(os.getcwd(), "summary_metrics.pickle"), "wb") as handle:
pickle.dump(res_dict, handle, protocol=pickle.HIGHEST_PROTOCOL)
plot_comparison_traces(
[preds],
[model_vars],
system,
save=os.path.join(os.getcwd(), "comparison_traces.pdf"),
names=config.model.name,
)
if config.exp_dir:
cwd = os.getcwd()
last_path = cwd.split("/")[-1]
main_path = f"data/results/{config.exp_dir}/"
is_multi = last_path if len(last_path) <= 3 else ""
res_path = hydra.utils.to_absolute_path(main_path + is_multi)
filepath = Path(res_path)
filepath.mkdir(parents=True, exist_ok=True)
copy_tree(os.getcwd(), res_path)
if __name__ == "__main__":
run_experiment()
| 4,625 | 30.469388 | 83 | py |
hgp | hgp-main/experiments/multiple_trajectory/experiment.py | import logging
import os
import pickle
from distutils.dir_util import copy_tree
from pathlib import Path
import hydra
import numpy as np
import torch
from omegaconf import DictConfig
import hgp
from hgp.datasets.hamiltonians import load_system_from_name
from hgp.misc.plot_utils import (
plot_comparison_traces,
plot_learning_curve,
plot_longitudinal,
)
from hgp.misc.torch_utils import numpy2torch, torch2numpy
from hgp.misc.train_utils import seed_everything
from hgp.misc.settings import settings
device = settings.device
if device.type == 'cuda':
torch.set_default_tensor_type('torch.cuda.FloatTensor')
log = logging.getLogger(__name__)
@hydra.main(config_path="conf", config_name="config", version_base="1.2")
def run_experiment(config: DictConfig):
seed_everything(config.system.seed)
# assert config.shooting == True
times_ahead = 3
system = load_system_from_name(config.system.system_name)(
frequency_train=config.system.frequency_train,
T_train=config.system.data_obs_T,
frequency_test=config.system.frequency_test,
T_test=times_ahead * config.system.data_obs_T,
noise_var=config.system.data_obs_noise_var,
noise_rel=config.system.noise_rel,
seed=config.system.seed,
N_x0s=config.system.num_traj,
N_x0s_test=25,
)
system.scale_ts()
system.scale_ys()
if config.model.model_type == "hgp":
model = hgp.models.builder.build_model(config, system.trn.ys)
elif config.model.model_type == "hgp_subseq":
model = hgp.models.builder.build_subsequence_model(config, system.trn.ys)
elif config.model.model_type == "gpode":
model = hgp.models.builder.build_gpode_model(config, system.trn.ys)
elif config.model.model_type == "nn":
model = hgp.models.builder.build_nn_model(config, system.trn.ys)
else:
raise ValueError("Model type not valid.")
model, history = hgp.models.builder.init_and_fit(
model, config, system.trn.ts, system.trn.ys, return_history=True
)
plot_learning_curve(
history, save=os.path.join(os.getcwd(), f"lc_{config.model.name}.pdf")
)
model_vars = model.observation_likelihood.variance
print("Generating predictions...")
preds = hgp.models.builder.compute_test_predictions(
model,
system.x0_test,
numpy2torch(system.tst.ts),
eval_sample_size=config.eval_samples,
)
train_preds = (
hgp.models.builder.compute_predictions(
model,
numpy2torch(system.trn.ts),
eval_sample_size=config.eval_samples,
)
if config.model.model_type in ["hgp", "gpode"]
else hgp.models.builder.compute_test_predictions(
model,
numpy2torch(system.trn.ys[:, 0, :]),
numpy2torch(system.trn.ts),
eval_sample_size=config.eval_samples,
)
)
mll, mse, rel_err = hgp.models.builder.compute_summary(
system.tst.ys,
torch2numpy(preds),
torch2numpy(model.observation_likelihood.variance),
squeeze_time=False,
)
plot_longitudinal(
system,
torch2numpy(preds[:, : min(np.shape(preds)[1], 5)]),
torch2numpy(model.observation_likelihood.variance),
save=os.path.join(os.getcwd(), f"{config.model.name}_trajpost"),
)
plot_longitudinal(
system,
torch2numpy(train_preds[:, : max(np.shape(train_preds)[1], 5)]),
torch2numpy(model.observation_likelihood.variance),
save=os.path.join(os.getcwd(), f"train_{config.model.name}_trajpost"),
test_true=(system.trn.ts, system.trn.ys),
)
res_dict = {}
full_res_dict = {}
full_res_dict["rmse"] = np.sqrt(mse)
full_res_dict["mll"] = mll
full_res_dict["rel_err"] = rel_err
full_res_dict["preds"] = torch2numpy(preds)
res_dict["rmse"] = np.sqrt(mse.mean())
res_dict["mll"] = mll.mean()
res_dict["rel_err"] = rel_err.mean()
log.info(res_dict)
full_res_dict["system"] = system
with open(os.path.join(os.getcwd(), "metrics.pickle"), "wb") as handle:
pickle.dump(full_res_dict, handle, protocol=pickle.HIGHEST_PROTOCOL)
with open(os.path.join(os.getcwd(), "summary_metrics.pickle"), "wb") as handle:
pickle.dump(res_dict, handle, protocol=pickle.HIGHEST_PROTOCOL)
plot_comparison_traces(
[preds],
[model_vars],
system,
save=os.path.join(os.getcwd(), "comparison_traces.pdf"),
names=[config.model.name],
)
if config.exp_dir:
cwd = os.getcwd()
last_path = cwd.split("/")[-1]
main_path = f"data/results/{config.exp_dir}/"
is_multi = last_path if len(last_path) <= 3 else ""
res_path = hydra.utils.to_absolute_path(main_path + is_multi)
filepath = Path(res_path)
filepath.mkdir(parents=True, exist_ok=True)
copy_tree(os.getcwd(), res_path)
if __name__ == "__main__":
run_experiment()
| 5,012 | 29.944444 | 83 | py |
SubGNN | SubGNN-main/SubGNN/anchor_patch_samplers.py | # General
import numpy as np
import random
from collections import defaultdict
import networkx as nx
import sys
import time
# Pytorch
import torch
# Our Methods
sys.path.insert(0, '..') # add config to path
import config
import subgraph_utils
#######################################################
# Triangular Random Walks
def is_triangle(graph, a, b, c):
'''
Returns true if the nodes a,b,c consistute a triangle in the graph
'''
return c in set(graph.neighbors(a)).intersection(set(graph.neighbors(b)))
def get_neighbors(networkx_graph, subgraph, all_valid_border_nodes, prev_node, curr_node, inside):
'''
Returns lists of triangle and non-triangle neighbors for the curr_node
'''
# if 'inside', we don't want to consider any nodes outside of the subgraph
if inside: graph = subgraph
else: graph = networkx_graph
neighbors = list(graph.neighbors(curr_node))
# if we're doing a border random walk, we need to make sure the neighbors are in the valid set of nodes to consider (i.e. all_valid_border_nodes)
if not inside: neighbors = [n for n in neighbors if n in all_valid_border_nodes]
# separate neighbors into triangle and non-triangle neighbors
triangular_neighbors = []
non_triangular_neighbors = []
for n in neighbors:
if is_triangle(graph, prev_node, curr_node, n): triangular_neighbors.append(n)
else: non_triangular_neighbors.append(n)
return triangular_neighbors, non_triangular_neighbors
def triangular_random_walk(hparams, networkx_graph, anchor_patch_subgraph, walk_len, in_border_nodes, all_valid_nodes, inside):
'''
Perform a triangular random walk
This is used (1) to sample anchor patches and (2) to generate internal/border representations of the anchor patches
when using function for (1), in_border_nodes, non_subgraph_nodes, all_valid_nodes = None; inside = True & anchor_patch_subgraph is actually the entire networkx graph
Inputs:
- hparams: dict of hyperparameters
- networkx graph: base underlying graph
- anchor patch subgraph: this is either the anchor patch subgraph or in the case of (1), it's actually the base underlying graph
- walk_len: length of the random walk
- in_border_nodes: nodes that are in the subgraph, but that have an edge to a node external to the subgraph
- all_valid_nodes: the union of in_border_nodes + nodes that are not in the subgraph but are in the underlying graph
- inside: whether this random walk is internal or border to the subgraph (note that when using this method to sample anchor patches, inside=True)
Output:
- visited: list of node ids visited during the walk
'''
if inside:
# randomly sample a start node from the subgraph/graph
prev_node = np.random.choice(list(anchor_patch_subgraph.nodes()))
# get all of the neighbors for the start node
neighbor_nodes = list(anchor_patch_subgraph.neighbors(prev_node))
# sample node from neighbors
curr_node = np.random.choice(neighbor_nodes) if len(neighbor_nodes) > 0 else config.PAD_VALUE #we're using the PAD_VALUE as a sentinel that the first node has no neighbors
all_valid_nodes = None
else:
# ranomly sample a start node from the list of 'in_border_nodes" and restrict neighboring nodes to only those in 'all_valid_nodes'
prev_node = np.random.choice(in_border_nodes, 1)[0]
neighbor_nodes = [n for n in list(networkx_graph.neighbors(prev_node)) if n in all_valid_nodes]
curr_node = np.random.choice(neighbor_nodes, 1)[0] if len(neighbor_nodes) > 0 else config.PAD_VALUE
# if the first node has no neighbors, the random walk is only length 1 & we return it immediately
if curr_node == config.PAD_VALUE:
return [prev_node]
visited = [prev_node, curr_node]
# now that we've already performed a walk of length 2, let's perform the rest of the walk
for k in range(walk_len - 2):
#get the triangular and non-triangular neighbors for the current node given the previously visited node
triangular_neighbors, non_triangular_neighbors = get_neighbors(networkx_graph, anchor_patch_subgraph, all_valid_nodes, prev_node, curr_node, inside=inside)
neighbors = triangular_neighbors + non_triangular_neighbors
if len(neighbors) == 0: break # if there are no neighbors, end walk
else:
# if there are no neighbors of one type, sample from the other type
if len(triangular_neighbors) == 0:
next_node = np.random.choice(non_triangular_neighbors)
elif len(non_triangular_neighbors) == 0:
next_node = np.random.choice(triangular_neighbors)
# with probability 'rw_beta', we go to a triangular node
elif random.uniform(0, 1) <= hparams['rw_beta'] and len(triangular_neighbors) != 0:
next_node = np.random.choice(triangular_neighbors)
# otherwise we go to a non-triangular node
else:
next_node = np.random.choice(non_triangular_neighbors)
prev_node = curr_node
curr_node = next_node
visited.append(next_node)
# we return a list of the node ids visited during the walk
return visited
#######################################################
# Perform random walks over the sampled structure anchor patches
def perform_random_walks(hparams, networkx_graph, anchor_patch_ids, inside):
'''
Performs random walks over the sampled anchor patches
If inside=True, performs random walks over the inside of the subgraph. Otherwise, performs random walks over the subgraph border
(i.e. nodes in the subgraph that have an external edge + nodes external to the subgraph)
Returns padded tensor of all walks of shape (n sampled anchor patches, n_triangular_walks, random_walk_len)
'''
n_sampled_patches, max_patch_len = anchor_patch_ids.shape
all_patch_walks = []
for anchor_patch in anchor_patch_ids:
curr_anchor_patch = anchor_patch[anchor_patch != config.PAD_VALUE] #remove any padding
# if anchor patch is only padding, then we just add a tensor of all zeros to maintain the padding
if curr_anchor_patch.shape[0] == 0:
all_patch_walks.append(torch.zeros((hparams['n_triangular_walks'], hparams['random_walk_len']), dtype=torch.long).fill_(config.PAD_VALUE))
else:
anchor_patch_subgraph = networkx_graph.subgraph(curr_anchor_patch.numpy()) # create a networkx graph from the anchor patch
if not inside:
# get nodes in subgraph that have an edge to a node not in the subgraph & all of the nodes that are not in the subgraph
in_border_nodes, non_subgraph_nodes = subgraph_utils.get_border_nodes(networkx_graph, anchor_patch_subgraph)
# the border random walk can operate over all nodes on the border of the subgraph + all nodes external to the subgraph
all_valid_nodes = set(in_border_nodes).union(set(non_subgraph_nodes))
else: in_border_nodes, non_subgraph_nodes, all_valid_nodes = None, None, None
# perform 'n_triangular_walks' number of walks over the anchor patch (each walk's length = 'random_walk_len')
# pad the walks and stack them to produce a final tensor of shape (n sampled anchor patches, n_triangular_walks, random_walk_len)
walks = []
for w in range(hparams['n_triangular_walks']):
walk = triangular_random_walk(hparams, networkx_graph, anchor_patch_subgraph, hparams['random_walk_len'], in_border_nodes, all_valid_nodes, inside=inside)
fill_len = hparams['random_walk_len'] - len(walk)
walk = torch.cat([torch.LongTensor(walk),torch.LongTensor((fill_len)).fill_(config.PAD_VALUE)])
walks.append(walk)
all_patch_walks.append(torch.stack(walks))
all_patch_walks = torch.stack(all_patch_walks).view(n_sampled_patches, hparams['n_triangular_walks'], hparams['random_walk_len'])
return all_patch_walks
#######################################################
# Sample anchor patches
def sample_neighborhood_anchor_patch(hparams, networkx_graph, cc_ids, border_set, sample_inside=True ):
'''
Returns a tensor of shape (batch_sz, max_n_cc, n_anchor_patches_N_in OR n_anchor_patches_N_out) that contains the sampled
neighborhood internal or border anchor patches
'''
batch_sz, max_n_cc, _ = cc_ids.shape
components = cc_ids.view(cc_ids.shape[0]*cc_ids.shape[1], -1) #(batch_sz * max_n_cc, max_cc_len)
# sample internal N anchor patch
if sample_inside:
all_samples = []
for i in range(hparams['n_anchor_patches_N_in']):
# to efficiently sample a random element from each connected component (with variable lengths),
# we generate and pad a random matrix then take the argmax. This gives a randomly sampled node ID from within the component.
rand = torch.randn(components.shape)
rand[components == config.PAD_VALUE] = config.PAD_VALUE
sample = components[range(len(components)), torch.argmax(rand, dim=1)]
all_samples.append(sample)
samples = torch.transpose(torch.stack(all_samples), 0, 1)
# sample border N anchor patch
else:
border_set_reshaped = border_set.view(border_set.shape[0]*border_set.shape[1], -1)
all_samples = []
for i in range(hparams['n_anchor_patches_N_out']): # number of neighborhood border AP to sample
# same approach as internally, except that we're sampling from the border_set instead of within the connected component
rand = torch.randn(border_set_reshaped.shape)
rand[border_set_reshaped == config.PAD_VALUE] = config.PAD_VALUE
sample = border_set_reshaped[range(len(border_set_reshaped)), torch.argmax(rand, dim=1)]
all_samples.append(sample)
samples = torch.transpose(torch.stack(all_samples),0,1)
# Reshape and return
anchor_patches = samples.view(batch_sz, max_n_cc, -1)
return anchor_patches
def sample_position_anchor_patches(hparams, networkx_graph, subgraph = None):
'''
Returns list of sampled position anchor patches. If subgraph != None, we sample from within the entire subgraph (across all CC).
Otherwise, we sample from the entire base graph. 'n_anchor_patches_pos_out' and 'n_anchor_patches_pos_in' specify the number of anchor patches to sample.
'''
if not subgraph: #sample border position anchor patches
return list(np.random.choice(list(networkx_graph.nodes), hparams['n_anchor_patches_pos_out'], replace = True))
else: #sampling internal position anchor patches
return list(np.random.choice(subgraph, hparams['n_anchor_patches_pos_in'], replace = True))
def sample_structure_anchor_patches(hparams, networkx_graph, device, max_sim_epochs):
'''
Generate a large number of structure anchor patches from which we can sample later
max_sim_epochs: multiplication factor to ensure we generate more AP than are actually needed
Returns a tensor of shape (n sampled patches, max patch length)
'''
# number of anchor patches to sample
n_samples = max_sim_epochs * hparams['n_anchor_patches_structure'] * hparams['n_layers']
all_patches = []
start_nodes = list(np.random.choice(list(networkx_graph.nodes), n_samples, replace = True))
for i, node in enumerate(start_nodes):
# there are two approaches implemented to sample the structure anchor patches: 'ego_graph' or 'triangular_random_walk' (the default)
if hparams['structure_patch_type'] == 'ego_graph':
# in this case, the anchor patch is the ego graph around the randomly sampled start node where the radius is specified by 'structure_anchor_patch_radius'
subgraph = list(nx.ego_graph(networkx_graph, node, radius=hparams['structure_anchor_patch_radius']).nodes)
elif hparams['structure_patch_type'] == 'triangular_random_walk':
# in this case, we perform a triangular random walk of length 'sample_walk_len'
subgraph = triangular_random_walk(hparams, networkx_graph, networkx_graph, hparams['sample_walk_len'], None, None, True)
else:
raise NotImplementedError
all_patches.append(subgraph)
# pad the sampled anchor patches to the max length
max_anchor_len = max([len(s) for s in all_patches])
padded_all_patches = []
for s in all_patches:
fill_len = max_anchor_len - len(s)
padded_all_patches.append(torch.cat([torch.LongTensor(s),torch.LongTensor((fill_len)).fill_(config.PAD_VALUE)]))
return torch.stack(padded_all_patches).long() # (n sampled patches, max patch length)
#######################################################
# Initialize anchor patches
def init_anchors_neighborhood(split, hparams, networkx_graph, device, train_cc_ids, val_cc_ids, test_cc_ids, train_N_border, val_N_border, test_N_border):
'''
Returns:
- anchors_int_neigh: dict of dicts mapping from dataset name & layer number -> sampled internal N anchor patches
- anchors_border_neigh: same as above, but stores border N anchor patches
'''
# get datasets to process based on split
if split == 'all':
dataset_names = ['train', 'val', 'test']
datasets = [train_cc_ids, val_cc_ids, test_cc_ids]
border_sets = [train_N_border, val_N_border, test_N_border]
elif split == 'train_val':
dataset_names = ['train', 'val']
datasets = [train_cc_ids, val_cc_ids]
border_sets = [train_N_border, val_N_border]
elif split == 'test':
dataset_names = ['test']
datasets = [test_cc_ids]
border_sets = [test_N_border]
#initialize internal and border neighborhood anchor patch dicts
anchors_int_neigh = defaultdict(dict)
anchors_border_neigh = defaultdict(dict)
# for each dataset, for each layer, sample internal and border neighborhood anchor patches
# we can use the precomputed border set to speed up the calculation
for dataset_name, dataset, border_set in zip(dataset_names, datasets, border_sets):
for n in range(hparams['n_layers']):
anchors_int_neigh[dataset_name][n] = sample_neighborhood_anchor_patch(hparams, networkx_graph, dataset, border_set, sample_inside=True)
anchors_border_neigh[dataset_name][n] = sample_neighborhood_anchor_patch(hparams, networkx_graph, dataset, border_set, sample_inside=False)
return anchors_int_neigh, anchors_border_neigh
def init_anchors_pos_int(split, hparams, networkx_graph, device, train_cc_ids, val_cc_ids, test_cc_ids):
'''
Returns:
- anchors_pos_int: dict of dicts mapping from dataset name (e.g train, val, etc.) and layer number to the sampled internal position anchor patches
'''
# get datasets to process based on split
if split == 'all':
dataset_names = ['train', 'val', 'test']
datasets = [train_cc_ids, val_cc_ids, test_cc_ids]
elif split == 'train_val':
dataset_names = ['train', 'val']
datasets = [train_cc_ids, val_cc_ids]
elif split == 'test':
dataset_names = ['test']
datasets = [test_cc_ids]
anchors_pos_int = defaultdict(dict)
# for each dataset, for each layer, sample internal position anchor patches
for dataset_name, dataset in zip(dataset_names, datasets):
for n in range(hparams['n_layers']):
anchors = [sample_position_anchor_patches(hparams, networkx_graph, sg) for sg in dataset]
anchors_pos_int[dataset_name][n] = torch.stack([torch.tensor(l) for l in anchors])
return anchors_pos_int
def init_anchors_pos_ext(hparams, networkx_graph, device):
'''
Returns:
- anchors_pos_ext: dict mapping from layer number in SubGNN -> tensor of sampled border position anchor patches
'''
anchors_pos_ext = {}
for n in range(hparams['n_layers']):
anchors_pos_ext[n] = torch.tensor(sample_position_anchor_patches(hparams, networkx_graph))
return anchors_pos_ext
def init_anchors_structure(hparams, structure_anchors, int_structure_anchor_rw, bor_structure_anchor_rw):
'''
For each layer in SubGNN, sample 'n_anchor_patches_structure' number of anchor patches and their associated pre-computed internal & border random walks
Returns:
- anchors_struc: dictionary from layer number -> tuple(sampled structure anchor patches, indices of the selected anchor patches in larger list of sampled anchor patches,
associated sampled internal random walks, associated sampled border random walks)
'''
anchors_struc = {}
for n in range(hparams['n_layers']):
indices = list(np.random.choice(range(structure_anchors.shape[0]), hparams['n_anchor_patches_structure'], replace = True))
anchors_struc[n] = (structure_anchors[indices,:], indices, int_structure_anchor_rw[indices,:,:], bor_structure_anchor_rw[indices,:,:] )
return anchors_struc
#######################################################
# Retrieve anchor patches
def get_anchor_patches(dataset_type, hparams, networkx_graph, node_matrix, \
subgraph_idx, cc_ids, cc_embed_mask, lstm, anchors_neigh_int, anchors_neigh_border, \
anchors_pos_int, anchors_pos_ext, anchors_structure, layer_num, channel, inside, \
device=None):
'''
Inputs:
- dataset_type: train, val, etc.
- hparams: dictionary of hyperparameters
- networkx_graph:
- node_matrix: matrix containing node embeddings for every node in base graph
Returns:
- anchor_patches: tensor of shape (batch_sz, max_n_cc, n_anchor_patches, max_length_anchor_patch) containing the node ids associated with each anchor patch
- anchor_mask: tensor of shape (batch_sz, max_n_cc, n_anchor_patches, max_length_anchor_patch) containing a mask over the anchor patches so we know which are just padding
- anchor_embeds: tensor of shape (batch_sz, max_n_cc, n_anchor_patches, embed_dim) containing embeddings for each anchor patch
'''
batch_sz, max_n_cc, max_size_cc = cc_ids.shape
if channel == 'neighborhood':
# look up precomputed anchor patches
if inside:
anchor_patches = anchors_neigh_int[dataset_type][layer_num][subgraph_idx].squeeze(1)
else:
anchor_patches = anchors_neigh_border[dataset_type][layer_num][subgraph_idx].squeeze(1)
anchor_patches = anchor_patches.to(cc_ids.device)
# Get anchor patch embeddings: return shape is (batch_sz, max_n_cc, n_sampled_patches, hidden_dim)
anchor_embeds, anchor_mask = embed_anchor_patch(node_matrix, anchor_patches, device)
anchor_patches = anchor_patches.unsqueeze(-1)
anchor_mask = anchor_mask.unsqueeze(-1)
elif channel == 'position':
# Get precomputed anchor patch ids: return shape is (batch_sz, max_n_cc, n_sampled_patches)
if inside:
anchors_tensor = anchors_pos_int[dataset_type][layer_num][subgraph_idx].squeeze(1)
anchor_patches = anchors_tensor.unsqueeze(1).repeat(1,max_n_cc,1) # repeat anchor patches for each CC
anchor_patches[~cc_embed_mask] = config.PAD_VALUE #mask CC that are just padding
else:
anchor_patches = anchors_pos_ext[layer_num].unsqueeze(0).unsqueeze(0).repeat(batch_sz,max_n_cc,1)
anchor_patches[~cc_embed_mask] = config.PAD_VALUE #mask CC that are just padding
# Get anchor patch embeddings: return shape is (batch_sz, max_n_cc, n_sampled_patches, hidden_dim)
anchor_embeds, anchor_mask = embed_anchor_patch(node_matrix, anchor_patches, device)
anchor_patches = anchor_patches.unsqueeze(-1)
anchor_mask = anchor_mask.unsqueeze(-1)
elif channel == 'structure':
anchor_patches, indices, int_anchor_rw, bor_anchor_rw = anchors_structure[layer_num] #(n_anchor_patches_sampled, max_length_anchor_patch)
# Get anchor patch embeddings: return shape is (n_sampled_patches, hidden_dim)
anchor_rw = int_anchor_rw if inside else bor_anchor_rw
anchor_embeds = aggregate_structure_anchor_patch(hparams, networkx_graph, lstm, node_matrix, anchor_patches, anchor_rw, inside=inside, device=cc_ids.device)
# expand anchor patches/embeddings to be batch_sz, max_n_cc and pad them
# return shape of anchor_patches = (bs, n_cc, n_anchor_patches_sampled, max_length_anchor_patch)
anchor_patches = anchor_patches.unsqueeze(0).unsqueeze(0).repeat(batch_sz,max_n_cc,1,1)
anchor_patches[~cc_embed_mask] = config.PAD_VALUE # mask CC that are just padding
anchor_mask = (anchor_patches != config.PAD_VALUE).bool()
anchor_embeds = anchor_embeds.unsqueeze(0).unsqueeze(0).repeat(batch_sz,max_n_cc,1,1)
anchor_embeds[~cc_embed_mask] = config.PAD_VALUE
else:
raise Exception('An invalid channel has been entered.')
return anchor_patches, anchor_mask, anchor_embeds
#######################################################
# Embed anchor patches
def embed_anchor_patch(node_matrix, anchor_patch_ids, device):
'''
Returns a tensor of the node embeddings associated with the `anchor patch ids`
and an associated mask where there's 1 where there's no padding and 0 otherwise
'''
anchor_patch_embeds = node_matrix(anchor_patch_ids.to(device))
anchor_patch_mask = (anchor_patch_ids != config.PAD_VALUE).bool()
return anchor_patch_embeds, anchor_patch_mask
def aggregate_structure_anchor_patch(hparams, networkx_graph, lstm, node_matrix, anchor_patch_ids, all_patch_walks, inside, device):
'''
Computes embedding for structure anchor patch by (1) retrieving node embeddings for nodes visited in precomputed triangular random walks,
(2) feeding the RW embeddings into an bi-lstm, and (3) summing the resulting embedding for each random walk to generate a
final embedding of shape (n sampled anchor batches, node_embed_dim)
'''
# anchor_patch_ids shape is (batch_sz, max_n_cc, n_sampled_patches, max_patch_len)
# anchor_patch_embeds shape is (batch_sz, max_n_cc, n_sampled_patches, max_patch_len, hidden_dim)
n_sampled_patches, max_patch_len = anchor_patch_ids.shape
#Get embeddings for each walk
walk_embeds, _ = embed_anchor_patch(node_matrix, all_patch_walks, device) # n_patch, n_walk, walk_len, embed_sz
walk_embeds_reshaped = walk_embeds.view(n_sampled_patches * hparams['n_triangular_walks'], hparams['random_walk_len'], hparams['node_embed_size'])
# input into RNN & aggregate over walk len
walk_hidden = lstm(walk_embeds_reshaped)
walk_hidden = walk_hidden.view(n_sampled_patches, hparams['n_triangular_walks'], -1)
# Sum over random walks
return torch.sum(walk_hidden, dim=1)
| 23,117 | 51.901602 | 179 | py |
SubGNN | SubGNN-main/SubGNN/subgraph_utils.py | # General
import typing
import sys
import numpy as np
#Networkx
import networkx as nx
# Sklearn
from sklearn.preprocessing import MultiLabelBinarizer
from sklearn.metrics import f1_score, accuracy_score
# Pytorch
import torch
import torch.nn.functional as F
import torch.nn as nn
from torch.nn.functional import one_hot
# Our methods
sys.path.insert(0, '..') # add config to path
import config
def read_subgraphs(sub_f, split = True):
'''
Read subgraphs from file
Args
- sub_f (str): filename where subgraphs are stored
Return for each train, val, test split:
- sub_G (list): list of nodes belonging to each subgraph
- sub_G_label (list): labels for each subgraph
'''
# Enumerate/track labels
label_idx = 0
labels = {}
# Train/Val/Test subgraphs
train_sub_G = []
val_sub_G = []
test_sub_G = []
# Train/Val/Test subgraph labels
train_sub_G_label = []
val_sub_G_label = []
test_sub_G_label = []
# Train/Val/Test masks
train_mask = []
val_mask = []
test_mask = []
multilabel = False
# Parse data
with open(sub_f) as fin:
subgraph_idx = 0
for line in fin:
nodes = [int(n) for n in line.split("\t")[0].split("-") if n != ""]
if len(nodes) != 0:
if len(nodes) == 1: print(nodes)
l = line.split("\t")[1].split("-")
if len(l) > 1: multilabel = True
for lab in l:
if lab not in labels.keys():
labels[lab] = label_idx
label_idx += 1
if line.split("\t")[2].strip() == "train":
train_sub_G.append(nodes)
train_sub_G_label.append([labels[lab] for lab in l])
train_mask.append(subgraph_idx)
elif line.split("\t")[2].strip() == "val":
val_sub_G.append(nodes)
val_sub_G_label.append([labels[lab] for lab in l])
val_mask.append(subgraph_idx)
elif line.split("\t")[2].strip() == "test":
test_sub_G.append(nodes)
test_sub_G_label.append([labels[lab] for lab in l])
test_mask.append(subgraph_idx)
subgraph_idx += 1
if not multilabel:
train_sub_G_label = torch.tensor(train_sub_G_label).long().squeeze()
val_sub_G_label = torch.tensor(val_sub_G_label).long().squeeze()
test_sub_G_label = torch.tensor(test_sub_G_label).long().squeeze()
if len(val_mask) < len(test_mask):
return train_sub_G, train_sub_G_label, test_sub_G, test_sub_G_label, val_sub_G, val_sub_G_label
return train_sub_G, train_sub_G_label, val_sub_G, val_sub_G_label, test_sub_G, test_sub_G_label
def calc_f1(logits, labels, avg_type='macro', multilabel_binarizer=None):
'''
Calculates the F1 score (either macro or micro as defined by 'avg_type') for the specified logits and labelss
'''
if multilabel_binarizer is not None: #multi-label prediction
# perform a sigmoid on each logit separately & use > 0.5 threshold to make prediction
probs = torch.sigmoid(logits)
thresh = torch.tensor([0.5]).to(probs.device)
pred = (probs > thresh)
score = f1_score(labels.cpu().detach(), pred.cpu().detach(), average=avg_type)
else: # multi-class, but not multi-label prediction
pred = torch.argmax(logits, dim=-1) #get predictions by finding the indices with max logits
score = f1_score(labels.cpu().detach(), pred.cpu().detach(), average=avg_type)
return torch.tensor([score])
def calc_accuracy(logits, labels, multilabel_binarizer=None):
'''
Calculates the accuracy for the specified logits and labels
'''
if multilabel_binarizer is not None: #multi-label prediction
# perform a sigmoid on each logit separately & use > 0.5 threshold to make prediction
probs = torch.sigmoid(logits)
thresh = torch.tensor([0.5]).to(probs.device)
pred = (probs > thresh)
acc = accuracy_score(labels.cpu().detach(), pred.cpu().detach())
else:
pred = torch.argmax(logits, 1) #get predictions by finding the indices with max logits
acc = accuracy_score(labels.cpu().detach(), pred.cpu().detach())
return torch.tensor([acc])
def get_border_nodes(graph, subgraph):
'''
Returns (1) an array containing the border nodes of the subgraph (i.e. all nodes that have an edge to a node not in the subgraph, but are themselves in the subgraph)
and (2) an array containing all of the nodes in the base graph that aren't in the subgraph
'''
# get all of the nodes in the base graph that are not in the subgraph
non_subgraph_nodes = np.array(list(set(graph.nodes()).difference(set(subgraph.nodes()))))
subgraph_nodes = np.array(list(subgraph.nodes()))
A = nx.adjacency_matrix(graph).todense()
# subset adjacency matrix to get edges between subgraph and non-subgraph nodes
border_A = A[np.ix_(subgraph_nodes - 1,non_subgraph_nodes - 1)] # NOTE: Need to subtract 1 bc nodes are indexed starting at 1
# the nodes in the subgraph are border nodes if they have at least one edge to a node that is not in the subgraph
border_edge_exists = (np.sum(border_A, axis=1) > 0).flatten()
border_nodes = subgraph_nodes[np.newaxis][border_edge_exists]
return border_nodes, non_subgraph_nodes
def get_component_border_neighborhood_set(networkx_graph, component, k, ego_graph_dict=None):
'''
Returns a set containing the nodes in the k-hop border of the specified component
component: 1D tensor of node IDs in the component (with possible padding)
k: number of hops around the component that is included in the border set
ego_graph_dict: dictionary mapping from node id to precomputed ego_graph for the node
'''
# First, remove any padding that exists in the component
if type(component) is torch.Tensor:
component_inds_non_neg = (component!=config.PAD_VALUE).nonzero().view(-1)
component_set = {int(n) for n in component[component_inds_non_neg]}
else:
component_set = set(component)
# calculate the ego graph for each node in the connected component & take the union of all nodes
neighborhood = set()
for node in component_set:
if ego_graph_dict == None: # if it hasn't already been computed, calculate the ego graph (i.e. induced subgraph of neighbors centered at node with specified radius)
ego_g = nx.ego_graph(networkx_graph, node, radius = k).nodes()
else:
ego_g = ego_graph_dict[node-1] #NOTE: nodes in dict were indexed with 0, while our nodes are indexed starting at 1
neighborhood = neighborhood.union(set(ego_g))
# remove from the unioned ego sets all nodes that are actually in the component
# this will leave only the nodes that are in the k-hop border, but not in the subgraph component
border_nodes = neighborhood.difference(component_set)
return border_nodes
# THE BELOW FUNCTIONS ARE COPIED FROM ALLEN NLP
def weighted_sum(matrix: torch.Tensor, attention: torch.Tensor) -> torch.Tensor:
"""
Takes a matrix of vectors and a set of weights over the rows in the matrix (which we call an
"attention" vector), and returns a weighted sum of the rows in the matrix. This is the typical
computation performed after an attention mechanism.
Note that while we call this a "matrix" of vectors and an attention "vector", we also handle
higher-order tensors. We always sum over the second-to-last dimension of the "matrix", and we
assume that all dimensions in the "matrix" prior to the last dimension are matched in the
"vector". Non-matched dimensions in the "vector" must be `directly after the batch dimension`.
For example, say I have a "matrix" with dimensions `(batch_size, num_queries, num_words,
embedding_dim)`. The attention "vector" then must have at least those dimensions, and could
have more. Both:
- `(batch_size, num_queries, num_words)` (distribution over words for each query)
- `(batch_size, num_documents, num_queries, num_words)` (distribution over words in a
query for each document)
are valid input "vectors", producing tensors of shape:
`(batch_size, num_queries, embedding_dim)` and
`(batch_size, num_documents, num_queries, embedding_dim)` respectively.
"""
# We'll special-case a few settings here, where there are efficient (but poorly-named)
# operations in pytorch that already do the computation we need.
if attention.dim() == 2 and matrix.dim() == 3:
return attention.unsqueeze(1).bmm(matrix).squeeze(1)
if attention.dim() == 3 and matrix.dim() == 3:
return attention.bmm(matrix)
if matrix.dim() - 1 < attention.dim():
expanded_size = list(matrix.size())
for i in range(attention.dim() - matrix.dim() + 1):
matrix = matrix.unsqueeze(1)
expanded_size.insert(i + 1, attention.size(i + 1))
matrix = matrix.expand(*expanded_size)
intermediate = attention.unsqueeze(-1).expand_as(matrix) * matrix
return intermediate.sum(dim=-2)
def masked_sum(
vector: torch.Tensor, mask: torch.BoolTensor, dim: int, keepdim: bool = False) -> torch.Tensor:
"""
**
Adapted from AllenNLP's masked mean:
https://github.com/allenai/allennlp/blob/90e98e56c46bc466d4ad7712bab93566afe5d1d0/allennlp/nn/util.py
**
To calculate mean along certain dimensions on masked values
# Parameters
vector : `torch.Tensor`
The vector to calculate mean.
mask : `torch.BoolTensor`
The mask of the vector. It must be broadcastable with vector.
dim : `int`
The dimension to calculate mean
keepdim : `bool`
Whether to keep dimension
# Returns
`torch.Tensor`
A `torch.Tensor` of including the mean values.
"""
replaced_vector = vector.masked_fill(~mask, 0.0)
value_sum = torch.sum(replaced_vector, dim=dim, keepdim=keepdim)
return value_sum
| 10,206 | 41.886555 | 172 | py |
SubGNN | SubGNN-main/SubGNN/subgraph_mpn.py | # General
import numpy as np
import sys
from multiprocessing import Pool
import time
# Pytorch
import torch
import torch.nn as nn
import torch.nn.functional as F
# Pytorch Geometric
from torch_geometric.utils import add_self_loops
from torch_geometric.nn import MessagePassing
# Our methods
sys.path.insert(0, '..') # add config to path
import config
class SG_MPN(MessagePassing):
'''
A single subgraph-level message passing layer
Messages are passed from anchor patch to connected component and weighted by the channel-specific similarity between the two.
The resulting messages for a single component are aggregated and used to update the embedding for the component.
'''
def __init__(self, hparams):
super(SG_MPN, self).__init__(aggr='add') # "Add" aggregation.
self.hparams = hparams
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
self.linear = nn.Linear(hparams['node_embed_size'] * 2, hparams['node_embed_size']).to(self.device)
self.linear_position = nn.Linear(hparams['node_embed_size'],1).to(self.device)
def create_patch_embedding_matrix(self,cc_embeds, cc_embed_mask, anchor_embeds, anchor_mask):
'''
Concatenate the connected component and anchor patch embeddings into a single matrix.
This will be used an input for the pytorch geometric message passing framework.
'''
batch_sz, max_n_cc, cc_hidden_dim = cc_embeds.shape
anchor_hidden_dim = anchor_embeds.shape[-1]
# reshape connected component & anchor patch embedding matrices
reshaped_cc_embeds = cc_embeds.view(-1, cc_hidden_dim) #(batch_sz * max_n_cc , hidden_dim)
reshaped_anchor_embeds = anchor_embeds.view(-1, anchor_hidden_dim) #(batch_sz * max_n_cc * n_sampled_patches, hidden_dim)
# concatenate the anchor patch and connected component embeddings into single matrix
patch_embedding_matrix = torch.cat([reshaped_anchor_embeds, reshaped_cc_embeds])
return patch_embedding_matrix
def create_edge_index(self, reshaped_cc_ids, reshaped_anchor_patch_ids, anchor_mask, n_anchor_patches):
'''
Create edge matrix of shape (2, # edges) where edges exist between connected components and their associated anchor patches
Note that edges don't exist between components or between anchor patches
'''
# get indices into patch matrix corresponding to anchor patches
anchor_inds = torch.tensor(range(reshaped_anchor_patch_ids.shape[0]))
# get indices into patch matrix corresponding to connected components
cc_inds = torch.tensor(range(reshaped_cc_ids.shape[0])) + reshaped_anchor_patch_ids.shape[0]
# repeat CC indices n_anchor_patches times
cc_inds_matched = cc_inds.repeat_interleave(n_anchor_patches)
# stack together two indices to create (2,E) edge matrix
edge_index = torch.stack((anchor_inds, cc_inds_matched)).to(device=self.device)
mask_inds = anchor_mask.view(-1, anchor_mask.shape[-1])[:,0]
return edge_index[:,mask_inds], mask_inds
def get_similarities(self, networkx_graph, edge_index, sims, cc_ids, anchor_ids, anchors_sim_index):
'''
Reshape similarities tensor of shape (n edges, 1) that contains similarity value for each edge in the edge index
sims: (batch_size, max_n_cc, n possible anchor patches)
edge_index: (2, number of edges between components and anchor patches)
anchors_sim_index: indices into sims matrix for the structure channel that specify which anchor patches we're using
'''
n_cc = cc_ids.shape[0]
n_anchor_patches = anchor_ids.shape[0]
batch_sz, max_n_cc, n_patch_options = sims.shape
sims = sims.view(batch_sz * max_n_cc, n_patch_options)
if anchors_sim_index != None: anchors_sim_index = anchors_sim_index * torch.unique(edge_index[1,:]).shape[0] # n unique CC
# NOTE: edge_index contains stacked anchor, cc embeddings
if anchors_sim_index == None: # neighborhood, position channels
anchor_indices = anchor_ids[edge_index[0,:],:] - 1 # get the indices into the similarity matrix of which anchors were sampled
cc_indices = edge_index[1,:] - n_anchor_patches # get indices of the conneced components into the similarity matrix
similarities = sims[cc_indices, anchor_indices.squeeze()]
else: #structure channel
# get indices of the conneced components into the similarity matrix
cc_indices = edge_index[1,:] - n_anchor_patches #indexing into edge index is different than indexing into sims because patch matrix from which edge index was derived stacks anchor paches before the cc embeddings
similarities = sims[cc_indices, torch.tensor(anchors_sim_index)] # anchors_sim_index provides indexing into the big similarity matrix - it tells you which anchors we actually sampled
if len(similarities.shape) == 1: similarities = similarities.unsqueeze(-1)
return similarities
def generate_pos_struc_embeddings(self, raw_msgs, cc_ids, anchor_ids, edge_index, edge_index_mask):
'''
Generates the property aware position/structural embeddings for each connected component
'''
# Generate position/structure embeddings
n_cc = cc_ids.shape[0]
n_anchor_patches = anchor_ids.shape[0]
embed_sz = raw_msgs.shape[1]
n_anchors_per_cc = int(n_anchor_patches/n_cc)
# 1) add masked CC back in & reshape
# raw_msgs doesn't include padding so we need to add padding back in
# NOTE: while these are named as position embeddings, these apply to structure channel as well
pos_embeds = torch.zeros((n_cc * n_anchors_per_cc, embed_sz)).to(device=self.device) + config.PAD_VALUE
pos_embeds[edge_index_mask] = raw_msgs # raw_msgs doesn't include padding so we need to add padding back in
pos_embeds_reshaped = pos_embeds.view(-1, n_anchors_per_cc, embed_sz)
# 2) linear layer + normalization
position_out = self.linear_position(pos_embeds_reshaped).squeeze(-1)
# optionally normalize the output of the linear layer (this is what P-GNN paper did)
if 'norm_pos_struc_embed' in self.hparams and self.hparams['norm_pos_struc_embed']:
position_out = F.normalize(position_out, p=2, dim=-1)
else: # otherwise, just push through a relu
position_out = F.relu(position_out)
return position_out #(n subgraphs * n_cc, n_anchors_per_cc )
def forward(self, networkx_graph, sims, cc_ids, cc_embeds, cc_embed_mask, \
anchor_patches, anchor_embeds, anchor_mask, anchors_sim_index):
'''
Performs a single message passing layer
Returns:
- cc_embed_matrix_reshaped: order-invariant hidden representation (batch_sz, max_n_cc, node embed dim)
- position_struc_out_reshaped: property aware cc representation (batch_sz, max_n_cc, n_anchor_patches)
'''
# reshape anchor patches & CC embeddings & stack together
# NOTE: anchor patches then CC stacked in matrix
patch_matrix = self.create_patch_embedding_matrix(cc_embeds, cc_embed_mask, anchor_embeds, anchor_mask)
# reshape cc & anchor patch id matrices
batch_sz, max_n_cc, max_size_cc = cc_ids.shape
cc_ids = cc_ids.view(-1, max_size_cc) # (batch_sz * max_n_cc, max_size_cc)
anchor_ids = anchor_patches.contiguous().view(-1, anchor_patches.shape[-1]) # (batch_sz * max_n_cc * n_sampled_patches, anchor patch size)
n_anchor_patches_sampled = anchor_ids.shape[0]
# create edge index
edge_index, edge_index_mask = self.create_edge_index(cc_ids, anchor_ids, anchor_mask, anchor_patches.shape[2])
# get similarity values for each edge index
similarities = self.get_similarities( networkx_graph, edge_index, sims, cc_ids, anchor_ids, anchors_sim_index)
# Perform Message Passing
# propagated_msgs: (length of concatenated anchor patches & cc, node dim size)
propagated_msgs, raw_msgs = self.propagate(edge_index, x=patch_matrix, similarity=similarities)
# Generate Position/Structure Embeddings
position_struc_out = self.generate_pos_struc_embeddings(raw_msgs, cc_ids, anchor_ids, edge_index, edge_index_mask)
# index resulting propagated messagaes to get updated CC embeddings & reshape
cc_embed_matrix = propagated_msgs[n_anchor_patches_sampled:,:]
cc_embed_matrix_reshaped = cc_embed_matrix.view(batch_sz , max_n_cc ,-1)
# reshape property aware position/structure embeddings
position_struc_out_reshaped = position_struc_out.view(batch_sz, max_n_cc, -1)
return cc_embed_matrix_reshaped, position_struc_out_reshaped
def propagate(self, edge_index, size=None, **kwargs):
# We need to reimplement propagate instead of relying on base class implementation because we need
# to return the raw messages to generate the position/structure embeddings.
# Everything else is identical to propagate function from Pytorch Geometric.
r"""The initial call to start propagating messages.
Args:
edge_index (Tensor or SparseTensor): A :obj:`torch.LongTensor` or a
:obj:`torch_sparse.SparseTensor` that defines the underlying
graph connectivity/message passing flow.
:obj:`edge_index` holds the indices of a general (sparse)
assignment matrix of shape :obj:`[N, M]`.
If :obj:`edge_index` is of type :obj:`torch.LongTensor`, its
shape must be defined as :obj:`[2, num_messages]`, where
messages from nodes in :obj:`edge_index[0]` are sent to
nodes in :obj:`edge_index[1]`
(in case :obj:`flow="source_to_target"`).
If :obj:`edge_index` is of type
:obj:`torch_sparse.SparseTensor`, its sparse indices
:obj:`(row, col)` should relate to :obj:`row = edge_index[1]`
and :obj:`col = edge_index[0]`.
The major difference between both formats is that we need to
input the *transposed* sparse adjacency matrix into
:func:`propagate`.
size (tuple, optional): The size :obj:`(N, M)` of the assignment
matrix in case :obj:`edge_index` is a :obj:`LongTensor`.
If set to :obj:`None`, the size will be automatically inferred
and assumed to be quadratic.
This argument is ignored in case :obj:`edge_index` is a
:obj:`torch_sparse.SparseTensor`. (default: :obj:`None`)
**kwargs: Any additional data which is needed to construct and
aggregate messages, and to update node embeddings.
"""
size = self.__check_input__(edge_index, size)
# run both functions in separation.
coll_dict = self.__collect__(self.__user_args__, edge_index, size,
kwargs)
msg_kwargs = self.inspector.distribute('message', coll_dict)
msg_out = self.message(**msg_kwargs)
aggr_kwargs = self.inspector.distribute('aggregate', coll_dict)
out = self.aggregate(msg_out, **aggr_kwargs)
update_kwargs = self.inspector.distribute('update', coll_dict)
out = self.update(out, **update_kwargs)
return out, msg_out
def message(self, x_j, similarity): #default is source to target
'''
The message is the anchor patch representation weighted by the similarity between the patch and the component
'''
return similarity * x_j
def update(self, aggr_out, x):
'''
Update the connected component embedding from the result of the aggregation. The default is to 'use_mpn_projection',
i.e. concatenate the aggregated messages with the previous cc embedding and push through a relu
'''
if self.hparams['use_mpn_projection']:
return F.relu(self.linear(torch.cat([x, aggr_out], dim=1)))
else:
return aggr_out
| 12,371 | 50.123967 | 223 | py |
SubGNN | SubGNN-main/SubGNN/datasets.py | # Pytorch
import torch
import torch.nn as nn
from torch.utils.data import DataLoader, Dataset
# Typing
from typing import List
class SubgraphDataset(Dataset):
'''
Stores subgraphs and their associated labels as well as precomputed similarities and border sets for the subgraphs
'''
def __init__(self, subgraph_list: List, labels, cc_ids, N_border, NP_sim, I_S_sim, B_S_sim, multilabel, multilabel_binarizer):
# subgraph ids & labels
self.subgraph_list = subgraph_list
self.cc_ids = cc_ids
self.labels = labels
# precomputed border set
self.N_border = N_border
# precomputed similarity matrices
self.NP_sim = NP_sim
self.I_S_sim = I_S_sim
self.B_S_sim = B_S_sim
# necessary for handling multi-label classsification
self.multilabel = multilabel
self.multilabel_binarizer = multilabel_binarizer
def __len__(self):
'''
Returns number of subgraphs
'''
return len(self.subgraph_list)
def __getitem__(self, idx):
'''
Returns a single example from the datasest
'''
subgraph_ids = torch.LongTensor(self.subgraph_list[idx]) # list of node IDs in subgraph
cc_ids = self.cc_ids[idx]
N_border = self.N_border[idx] if self.N_border != None else None
NP_sim = self.NP_sim[idx] if self.NP_sim != None else None
I_S_sim = self.I_S_sim[idx] if self.I_S_sim != None else None
B_S_sim = self.B_S_sim[idx] if self.B_S_sim != None else None
if self.multilabel:
label = torch.LongTensor(self.multilabel_binarizer.transform([self.labels[idx]]))
else:
label = torch.LongTensor([self.labels[idx]])
idx = torch.LongTensor([idx])
return (subgraph_ids, cc_ids, N_border, NP_sim, I_S_sim, B_S_sim, idx, label)
| 1,904 | 31.844828 | 130 | py |
SubGNN | SubGNN-main/SubGNN/train_config.py | # General
import numpy as np
import random
import argparse
import tqdm
import pickle
import json
import commentjson
import joblib
import os
import sys
import pathlib
from collections import OrderedDict
import random
import string
# Pytorch
import torch
from torch.utils.data import DataLoader
from torch.nn.functional import one_hot
import pytorch_lightning as pl
from pytorch_lightning.loggers import TensorBoardLogger
from pytorch_lightning.callbacks import ModelCheckpoint
from pytorch_lightning.profiler import AdvancedProfiler
# Optuna
import optuna
from optuna.samplers import TPESampler
from optuna.integration import PyTorchLightningPruningCallback
# Our Methods
import SubGNN as md
sys.path.insert(0, '..') # add config to path
import config
def parse_arguments():
'''
Read in the config file specifying all of the parameters
'''
parser = argparse.ArgumentParser(description="Learn subgraph embeddings")
parser.add_argument('-config_path', type=str, default=None, help='Load config file')
args = parser.parse_args()
return args
def read_json(fname):
'''
Read in the json file specified by 'fname'
'''
with open(fname, 'rt') as handle:
return commentjson.load(handle, object_hook=OrderedDict)
def get_optuna_suggest(param_dict, name, trial):
'''
Returns a suggested value for the hyperparameter specified by 'name' from the range of values in 'param_dict'
name: string specifying hyperparameter
trial: optuna trial
param_dict: dictionary containing information about the hyperparameter (range of values & type of sampler)
e.g.{
"type" : "suggest_categorical",
"args" : [[ 64, 128]]
}
'''
module_name = param_dict['type'] # e.g. suggest_categorical, suggest_float
args = [name]
args.extend(param_dict['args']) # resulting list will look something like this ['batch_size', [ 64, 128]]
if "kwargs" in param_dict:
kwargs = dict(param_dict["kwargs"])
return getattr(trial, module_name)(*args, **kwargs)
else:
return getattr(trial, module_name)(*args)
def get_hyperparams_optuna(run_config, trial):
'''
Converts the fixed and variable hyperparameters in the run config to a dictionary of the final hyperparameters
Returns: hyp_fix - dictionary where key is the hyperparameter name (e.g. batch_size) and value is the hyperparameter value
'''
#initialize the dict with the fixed hyperparameters
hyp_fix = dict(run_config["hyperparams_fix"])
# update the dict with variable value hyperparameters by sampling a hyperparameter value from the range specified in the run_config
hyp_optuna = {k:get_optuna_suggest(run_config["hyperparams_optuna"][k], k, trial) for k in dict(run_config["hyperparams_optuna"]).keys()}
hyp_fix.update(hyp_optuna)
return hyp_fix
def build_model(run_config, trial = None):
'''
Creates SubGNN from the hyperparameters specified in the run config
'''
# get hyperparameters for the current trial
hyperparameters = get_hyperparams_optuna(run_config, trial)
# Set seeds for reproducibility
torch.manual_seed(hyperparameters['seed'])
np.random.seed(hyperparameters['seed'])
torch.cuda.manual_seed(hyperparameters['seed'])
torch.cuda.manual_seed_all(hyperparameters['seed'])
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
# initialize SubGNN
model = md.SubGNN(hyperparameters, run_config["graph_path"], \
run_config["subgraphs_path"], run_config["embedding_path"], \
run_config["similarities_path"], run_config["shortest_paths_path"], run_config['degree_sequence_path'], run_config['ego_graph_path'])
return model, hyperparameters
def build_trainer(run_config, hyperparameters, trial = None):
'''
Set up optuna trainer
'''
if 'progress_bar_refresh_rate' in hyperparameters:
p_refresh = hyperparameters['progress_bar_refresh_rate']
else:
p_refresh = 5
# set epochs, gpus, gradient clipping, etc.
# if 'no_gpu' in run config, then use CPU
trainer_kwargs={'max_epochs': hyperparameters['max_epochs'],
"gpus": 0 if 'no_gpu' in run_config else 1,
"num_sanity_val_steps":0,
"progress_bar_refresh_rate":p_refresh,
"gradient_clip_val": hyperparameters['grad_clip']
}
# set auto learning rate finder param
if 'auto_lr_find' in hyperparameters and hyperparameters['auto_lr_find']:
trainer_kwargs['auto_lr_find'] = hyperparameters['auto_lr_find']
# Create tensorboard logger
lgdir = os.path.join(run_config['tb']['dir_full'], run_config['tb']['name'])
if not os.path.exists(lgdir):
os.makedirs(lgdir)
logger = TensorBoardLogger(run_config['tb']['dir_full'], name=run_config['tb']['name'], version="version_"+ str(random.randint(0, 10000000)))
if not os.path.exists(logger.log_dir):
os.makedirs(logger.log_dir)
print("Tensorboard logging at ", logger.log_dir)
trainer_kwargs["logger"] = logger
# Save top three model checkpoints
trainer_kwargs["checkpoint_callback"] = ModelCheckpoint(
filepath= os.path.join(logger.log_dir, "{epoch}-{val_micro_f1:.2f}-{val_acc:.2f}-{val_auroc:.2f}"),
save_top_k = 3,
verbose=True,
monitor=run_config['optuna']['monitor_metric'],
mode='max'
)
# if we use pruning, use the pytorch lightning pruning callback
if run_config["optuna"]['pruning']:
trainer_kwargs['early_stop_callback'] = PyTorchLightningPruningCallback(trial, monitor=run_config['optuna']['monitor_metric'])
trainer = pl.Trainer(**trainer_kwargs)
return trainer, trainer_kwargs, logger.log_dir
def train_model(run_config, trial = None):
'''
Train a single model whose hyperparameters are specified in the run config
Returns the max (or min) metric specified by 'monitor_metric' in the run config
'''
# get model and hyperparameter dict
model, hyperparameters = build_model(run_config, trial)
# build optuna trainer
trainer, trainer_kwargs, results_path = build_trainer(run_config, hyperparameters, trial)
# dump hyperparameters to results dir
hparam_file = open(os.path.join(results_path, "hyperparams.json"),"w")
hparam_file.write(json.dumps(hyperparameters, indent=4))
hparam_file.close()
# dump trainer args to results dir
tkwarg_file = open(os.path.join(results_path, "trainer_kwargs.json"),"w")
pop_keys = [key for key in ['logger','profiler','early_stop_callback','checkpoint_callback'] if key in trainer_kwargs.keys()]
[trainer_kwargs.pop(key) for key in pop_keys]
tkwarg_file.write(json.dumps(trainer_kwargs, indent=4))
tkwarg_file.close()
# train the model
trainer.fit(model)
# write results to the results dir
if results_path is not None:
hparam_file = open(os.path.join(results_path, "final_metric_scores.json"),"w")
results_serializable = {k:float(v) for k,v in model.metric_scores[-1].items()}
hparam_file.write(json.dumps(results_serializable, indent=4))
hparam_file.close()
# return the max (or min) metric specified by 'monitor_metric' in the run config
all_scores = [score[run_config['optuna']['monitor_metric']].numpy() for score in model.metric_scores]
if run_config['optuna']['opt_direction'] == "maximize":
return(np.max(all_scores))
else:
return(np.min(all_scores))
def main():
'''
Perform an optuna run according to the hyperparameters and directory locations specified in 'config_path'
'''
torch.autograd.set_detect_anomaly(True)
args = parse_arguments()
# read in config file
run_config = read_json(args.config_path)
## Set paths to data
task = run_config['data']['task']
embedding_type = run_config['hyperparams_fix']['embedding_type']
# paths to subgraphs, edge list, and shortest paths between all nodes in the graph
run_config["subgraphs_path"] = os.path.join(task, "subgraphs.pth")
run_config["graph_path"] = os.path.join(task, "edge_list.txt")
run_config['shortest_paths_path'] = os.path.join(task, "shortest_path_matrix.npy")
run_config['degree_sequence_path'] = os.path.join(task, "degree_sequence.txt")
run_config['ego_graph_path'] = os.path.join(task, "ego_graphs.txt")
#directory where similarity calculations will be stored
run_config["similarities_path"] = os.path.join(task, "similarities/")
# get location of node embeddings
if embedding_type == 'gin':
run_config["embedding_path"] = os.path.join(task, "gin_embeddings.pth")
elif embedding_type == 'graphsaint':
run_config["embedding_path"] = os.path.join(task, "graphsaint_gcn_embeddings.pth")
else:
raise NotImplementedError
# create a tensorboard directory in the folder specified by dir in the PROJECT ROOT folder
if 'local' in run_config['tb'] and run_config['tb']['local']:
run_config['tb']['dir_full'] = run_config['tb']['dir']
else:
run_config['tb']['dir_full'] = os.path.join(config.PROJECT_ROOT, run_config['tb']['dir'])
ntrials = run_config['optuna']['opt_n_trials']
print(f'Running {ntrials} Trials of optuna')
if run_config['optuna']['pruning']:
pruner = optuna.pruners.MedianPruner()
else:
pruner = None
# the complete study path is the tensorboard directory + the study name
run_config['study_path'] = os.path.join(run_config['tb']['dir_full'], run_config['tb']['name'])
print("Logging to ", run_config['study_path'])
pathlib.Path(run_config['study_path']).mkdir(parents=True, exist_ok=True)
# get database file
db_file = os.path.join(run_config['study_path'], 'optuna_study_sqlite.db')
# specify sampler
if run_config['optuna']['sampler'] == "grid" and "grid_search_space" in run_config['optuna']:
sampler = optuna.samplers.GridSampler(run_config['optuna']['grid_search_space'])
elif run_config['optuna']['sampler'] == "tpe":
sampler = optuna.samplers.TPESampler()
elif run_config['optuna']['sampler'] == "random":
sampler = optuna.samplers.RandomSampler()
# create an optuna study with the specified sampler, pruner, direction (e.g. maximize)
# A SQLlite database is used to keep track of results
# Will load in existing study if one exists
study = optuna.create_study(direction=run_config['optuna']['opt_direction'],
sampler=sampler,
pruner=pruner,
storage= 'sqlite:///' + db_file,
study_name=run_config['study_path'],
load_if_exists=True)
study.optimize(lambda trial: train_model(run_config, trial), n_trials=run_config['optuna']['opt_n_trials'], n_jobs =run_config['optuna']['opt_n_cores'])
optuna_results_path = os.path.join(run_config['study_path'], 'optuna_study.pkl')
print("Saving Study Results to", optuna_results_path)
joblib.dump(study, optuna_results_path)
print(study.best_params)
if __name__ == "__main__":
main() | 11,383 | 39.226148 | 156 | py |
SubGNN | SubGNN-main/SubGNN/SubGNN.py | # General
import os
import numpy as np
from pathlib import Path
import typing
import time
import json
import copy
from typing import Dict, List
import multiprocessing
from multiprocessing import Pool
from itertools import accumulate
from collections import OrderedDict
import pickle
import sys
from functools import partial
#Sklearn
from sklearn.preprocessing import MultiLabelBinarizer
from sklearn.metrics import roc_auc_score
# Pytorch
import torch
import torch.nn as nn
from torch.utils.data import DataLoader, Dataset
from torch.nn.utils.rnn import pad_sequence, pack_sequence, pad_packed_sequence
import torch.nn.functional as F
from torch.nn.functional import one_hot
from torch.nn.parameter import Parameter
import matplotlib.pyplot as plt
# Pytorch lightning
import pytorch_lightning as pl
# Pytorch Geometric
from torch_geometric.utils.convert import to_networkx
from torch_geometric.nn import MessagePassing, GINConv
# Similarity calculations
from fastdtw import fastdtw
# Networkx
import networkx as nx
# Our Methods
sys.path.insert(0, '..') # add config to path
import config
import subgraph_utils
from subgraph_mpn import SG_MPN
from datasets import SubgraphDataset
import anchor_patch_samplers
from anchor_patch_samplers import *
import gamma
import attention
class LSTM(nn.Module):
'''
bidirectional LSTM with linear head
'''
def __init__(self, n_features, h, dropout=0.0, num_layers=1, batch_first=True, aggregator='last'):
super().__init__()
# number of LSTM layers
self.num_layers = num_layers
# type of aggregation('sum' or 'last')
self.aggregator = aggregator
self.lstm = nn.LSTM(n_features, h, num_layers=num_layers, batch_first=batch_first, dropout=dropout, bidirectional=True)
self.linear = nn.Linear(h * 2, n_features)
def forward(self, input):
#input: (batch_sz, seq_len, hidden_dim )
lstm_out, last_hidden = self.lstm(input)
batch, seq_len, _ = lstm_out.shape
# either take last hidden state or sum all hidden states
if self.aggregator == 'last':
lstm_agg = lstm_out[:,-1,:]
elif self.aggregator == 'sum':
lstm_agg = torch.sum(lstm_out, dim=1)
else:
raise NotImplementedError
return self.linear(lstm_agg)
class SubGNN(pl.LightningModule):
'''
Pytorch lightning class for SubGNN
'''
def __init__(self, hparams: Dict, graph_path: str, subgraph_path: str,
embedding_path: str, similarities_path: str, shortest_paths_path:str,
degree_dict_path: str, ego_graph_path: str):
super(SubGNN, self).__init__()
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
#dictionary of hyperparameters
self.hparams = hparams
# paths where data is stored
self.graph_path = graph_path
self.subgraph_path = subgraph_path
self.embedding_path = embedding_path
self.similarities_path = Path(similarities_path)
self.shortest_paths_path = shortest_paths_path
self.degree_dict_path = degree_dict_path
self.ego_graph_path = ego_graph_path
# read in data
self.read_data()
# initialize MPN layers for each channel (neighborhood, structure, position; internal, border)
# and each layer (up to 'n_layers')
hid_dim = self.hparams['node_embed_size']
self.neighborhood_mpns = nn.ModuleList()
if self.hparams['use_neighborhood']:
hid_dim += self.hparams['n_layers'] * 2 * self.hparams['node_embed_size'] #automatically infer hidden dimension
for l in range(self.hparams['n_layers']):
curr_layer = nn.ModuleDict()
curr_layer['internal'] = SG_MPN(self.hparams)
curr_layer['border'] = SG_MPN(self.hparams)
# optionally add batch_norm
if 'batch_norm' in self.hparams and self.hparams['batch_norm']:
curr_layer['batch_norm'] = nn.BatchNorm1d(self.hparams['node_embed_size']).to(self.device)
curr_layer['batch_norm_out'] = nn.BatchNorm1d(self.hparams['node_embed_size']).to(self.device)
self.neighborhood_mpns.append(curr_layer)
self.position_mpns = nn.ModuleList()
if self.hparams['use_position']:
hid_dim = hid_dim + (self.hparams['n_anchor_patches_pos_in'] + self.hparams['n_anchor_patches_pos_out']) * self.hparams['n_layers']
for l in range(self.hparams['n_layers']):
curr_layer = nn.ModuleDict()
curr_layer['internal'] = SG_MPN(self.hparams)
curr_layer['border'] = SG_MPN(self.hparams)
# optionally add batch_norm
if 'batch_norm' in self.hparams and self.hparams['batch_norm']:
curr_layer['batch_norm'] = nn.BatchNorm1d(self.hparams['node_embed_size']).to(self.device)
curr_layer['batch_norm_out'] = nn.BatchNorm1d(self.hparams['node_embed_size']).to(self.device)
self.position_mpns.append(curr_layer)
self.structure_mpns = nn.ModuleList()
if self.hparams['use_structure']:
hid_dim += 2 * self.hparams['n_anchor_patches_structure'] * self.hparams['n_layers']
for l in range(self.hparams['n_layers']):
curr_layer = nn.ModuleDict()
curr_layer['internal'] = SG_MPN(self.hparams)
curr_layer['border'] = SG_MPN(self.hparams)
# optionally add batch_norm
if 'batch_norm' in self.hparams and self.hparams['batch_norm']:
curr_layer['batch_norm'] = nn.BatchNorm1d(self.hparams['node_embed_size']).to(self.device)
curr_layer['batch_norm_out'] = nn.BatchNorm1d(self.hparams['node_embed_size']).to(self.device)
self.structure_mpns.append(curr_layer)
# initialize 3 FF layers on top of MPN layers
self.lin = nn.Linear(hid_dim, self.hparams['linear_hidden_dim_1'])
self.lin2 = nn.Linear(self.hparams['linear_hidden_dim_1'], self.hparams['linear_hidden_dim_2'])
self.lin3 = nn.Linear(self.hparams['linear_hidden_dim_2'], self.num_classes)
# optional dropout on the linear layers
self.lin_dropout = nn.Dropout(p=self.hparams['lin_dropout'])
self.lin_dropout2 = nn.Dropout(p=self.hparams['lin_dropout'])
# initialize loss
if self.multilabel:
self.loss = nn.BCEWithLogitsLoss()
else:
self.loss = nn.CrossEntropyLoss()
# initialize LSTM - this is used in the structure channel for embedding anchor patches
self.lstm = LSTM(self.hparams['node_embed_size'], self.hparams['node_embed_size'], \
dropout=self.hparams['lstm_dropout'], num_layers=self.hparams['lstm_n_layers'], \
aggregator=self.hparams['lstm_aggregator'])
# optionally, use feedforward attention
if 'ff_attn' in self.hparams and self.hparams['ff_attn']:
self.attn_vector = torch.nn.Parameter(torch.zeros((hid_dim,1), dtype=torch.float).to(self.device), requires_grad=True)
nn.init.xavier_uniform_(self.attn_vector)
self.attention = attention.AdditiveAttention(hid_dim, hid_dim)
# default similarity function for the structure channel is dynamic time warping
if 'structure_similarity_fn' not in self.hparams:
self.hparams['structure_similarity_fn'] = 'dtw'
# track metrics (used for optuna)
self.metric_scores = []
##################################################
# forward pass
def run_mpn_layer(self, dataset_type, mpn_fn, subgraph_ids, subgraph_idx, cc_ids, \
cc_embeds, cc_embed_mask, sims, layer_num, channel, inside=True):
'''
Perform a single message-passing layer for the specified 'channel' and internal/border
Returns:
- cc_embed_matrix: updated connected component embedding matrix
- position_struc_out: property aware embedding matrix (for position & structure channels)
'''
# batch_sz, max_n_cc, max_size_cc = cc_ids.shape
# self.graph.x (n_nodes, hidden dim)
# Get Anchor Patches
anchor_patches, anchor_mask, anchor_embeds = get_anchor_patches(dataset_type, self.hparams, \
self.networkx_graph, self.node_embeddings, subgraph_idx, cc_ids, cc_embed_mask, self.lstm,
self.anchors_neigh_int, self.anchors_neigh_border, self.anchors_pos_int, \
self.anchors_pos_ext, self.anchors_structure, layer_num, channel, inside, self.device)
# for the structure channel, we need to also pass in indices into larger matrix of pre-sampled structure AP
if channel == 'structure': anchors_sim_index = self.anchors_structure[layer_num][1]
else: anchors_sim_index = None
# one layer of message passing
cc_embed_matrix, position_struc_out = mpn_fn(self.networkx_graph, sims, cc_ids,
cc_embeds, cc_embed_mask, anchor_patches, anchor_embeds,
anchor_mask, anchors_sim_index)
return cc_embed_matrix, position_struc_out
def forward(self, dataset_type, N_I_cc_embed, N_B_cc_embed, \
S_I_cc_embed, S_B_cc_embed, P_I_cc_embed, P_B_cc_embed, \
subgraph_ids, cc_ids, subgraph_idx, NP_sim, \
I_S_sim, B_S_sim):
'''
subgraph_ids: (batch_sz, max_len_sugraph)
cc_ids: (batch_sz, max_n_cc, max_len_cc)
'''
# create cc_embeds matrix for each channel: (batch_sz, max_n_cc, hidden_dim)
init_cc_embeds = self.initialize_cc_embeddings(cc_ids, self.hparams['cc_aggregator'])
if not self.hparams['trainable_cc']: # if the cc embeddings, aren't trainable, we clone them
N_in_cc_embeds = init_cc_embeds.clone()
N_out_cc_embeds = init_cc_embeds.clone()
P_in_cc_embeds = init_cc_embeds.clone()
P_out_cc_embeds = init_cc_embeds.clone()
S_in_cc_embeds = init_cc_embeds.clone()
S_out_cc_embeds = init_cc_embeds.clone()
else: # otherwise, we index into the intialized cc embeddings for each channel using the subgraph ids for the given batch
N_in_cc_embeds = torch.index_select(N_I_cc_embed, 0, subgraph_idx.squeeze(-1))
N_out_cc_embeds = torch.index_select(N_B_cc_embed, 0, subgraph_idx.squeeze(-1))
P_in_cc_embeds = torch.index_select(P_I_cc_embed, 0, subgraph_idx.squeeze(-1))
P_out_cc_embeds = torch.index_select(P_B_cc_embed, 0, subgraph_idx.squeeze(-1))
S_in_cc_embeds = torch.index_select(S_I_cc_embed, 0, subgraph_idx.squeeze(-1))
S_out_cc_embeds = torch.index_select(S_B_cc_embed, 0, subgraph_idx.squeeze(-1))
batch_sz, max_n_cc, _ = init_cc_embeds.shape
#get mask for cc_embeddings
cc_embed_mask = (cc_ids != config.PAD_VALUE)[:,:,0] # only take first element bc only need mask over n_cc, not n_nodes in cc
# for each layer in SubGNN:
outputs = []
for l in range(self.hparams['n_layers']):
# neighborhood channel
if self.hparams['use_neighborhood']:
# message passing layer for N internal and border
N_in_cc_embeds, _ = self.run_mpn_layer(dataset_type, self.neighborhood_mpns[l]['internal'], subgraph_ids, subgraph_idx, cc_ids, N_in_cc_embeds, cc_embed_mask, NP_sim, layer_num=l, channel='neighborhood', inside=True)
N_out_cc_embeds, _ = self.run_mpn_layer(dataset_type, self.neighborhood_mpns[l]['border'], subgraph_ids, subgraph_idx, cc_ids, N_out_cc_embeds, cc_embed_mask, NP_sim, layer_num=l, channel='neighborhood', inside=False)
if 'batch_norm' in self.hparams and self.hparams['batch_norm']: #optional batch norm
N_in_cc_embeds = self.neighborhood_mpns[l]['batch_norm'](N_in_cc_embeds.view(batch_sz*max_n_cc,-1)).view(batch_sz,max_n_cc, -1 )
N_out_cc_embeds = self.neighborhood_mpns[l]['batch_norm_out'](N_out_cc_embeds.view(batch_sz*max_n_cc,-1)).view(batch_sz,max_n_cc, -1 )
outputs.extend([N_in_cc_embeds, N_out_cc_embeds])
# position channel
if self.hparams['use_position']:
# message passing layer for P internal and border
P_in_cc_embeds, P_in_position_embed = self.run_mpn_layer(dataset_type, self.position_mpns[l]['internal'], subgraph_ids, subgraph_idx, cc_ids, P_in_cc_embeds, cc_embed_mask, NP_sim, layer_num=l, channel='position', inside=True)
P_out_cc_embeds, P_out_position_embed = self.run_mpn_layer(dataset_type, self.position_mpns[l]['border'], subgraph_ids, subgraph_idx, cc_ids, P_out_cc_embeds, cc_embed_mask, NP_sim, layer_num=l, channel='position', inside=False)
if 'batch_norm' in self.hparams and self.hparams['batch_norm']: #optional batch norm
P_in_cc_embeds = self.position_mpns[l]['batch_norm'](P_in_cc_embeds.view(batch_sz*max_n_cc,-1)).view(batch_sz,max_n_cc, -1 )
P_out_cc_embeds = self.position_mpns[l]['batch_norm_out'](P_out_cc_embeds.view(batch_sz*max_n_cc,-1)).view(batch_sz,max_n_cc, -1 )
outputs.extend([P_in_position_embed, P_out_position_embed])
# structure channel
if self.hparams['use_structure']:
# message passing layer for S internal and border
S_in_cc_embeds, S_in_struc_embed = self.run_mpn_layer(dataset_type, self.structure_mpns[l]['internal'], subgraph_ids, subgraph_idx, cc_ids, S_in_cc_embeds, cc_embed_mask, I_S_sim, layer_num=l, channel='structure', inside=True)
S_out_cc_embeds, S_out_struc_embed = self.run_mpn_layer(dataset_type, self.structure_mpns[l]['border'], subgraph_ids, subgraph_idx, cc_ids, S_out_cc_embeds, cc_embed_mask, B_S_sim, layer_num=l, channel='structure', inside=False)
if 'batch_norm' in self.hparams and self.hparams['batch_norm']: #optional batch norm
S_in_cc_embeds = self.structure_mpns[l]['batch_norm'](S_in_cc_embeds.view(batch_sz*max_n_cc,-1)).view(batch_sz,max_n_cc, -1 )
S_out_cc_embeds = self.structure_mpns[l]['batch_norm_out'](S_out_cc_embeds.view(batch_sz*max_n_cc,-1)).view(batch_sz,max_n_cc, -1 )
outputs.extend([S_in_struc_embed, S_out_struc_embed])
# concatenate all layers
all_cc_embeds = torch.cat([init_cc_embeds] + outputs, dim=-1)
# sum across all CC
if 'ff_attn' in self.hparams and self.hparams['ff_attn']:
batched_attn = self.attn_vector.squeeze().unsqueeze(0).repeat(all_cc_embeds.shape[0],1)
attn_weights = self.attention(batched_attn, all_cc_embeds, cc_embed_mask)
subgraph_embedding = subgraph_utils.weighted_sum(all_cc_embeds, attn_weights)
else:
subgraph_embedding = subgraph_utils.masked_sum(all_cc_embeds, cc_embed_mask.unsqueeze(-1), dim=1, keepdim=False)
# Fully Con Layers + Optional Dropout
subgraph_embedding_out = F.relu(self.lin(subgraph_embedding))
subgraph_embedding_out = self.lin_dropout(subgraph_embedding_out)
subgraph_embedding_out = F.relu(self.lin2(subgraph_embedding_out))
subgraph_embedding_out = self.lin_dropout2(subgraph_embedding_out)
subgraph_embedding_out = self.lin3(subgraph_embedding_out)
return subgraph_embedding_out
##################################################
# training, val, test steps
def training_step(self, train_batch, batch_idx):
'''
Runs a single training step over the batch
'''
# get subgraphs and labels
subgraph_ids = train_batch['subgraph_ids']
cc_ids = train_batch['cc_ids']
subgraph_idx = train_batch['subgraph_idx']
labels = train_batch['label'].squeeze(-1)
# get similarities for batch
NP_sim = train_batch['NP_sim']
I_S_sim = train_batch['I_S_sim']
B_S_sim = train_batch['B_S_sim']
# forward pass
logits = self.forward('train', self.train_N_I_cc_embed, self.train_N_B_cc_embed, \
self.train_S_I_cc_embed, self.train_S_B_cc_embed, self.train_P_I_cc_embed, self.train_P_B_cc_embed, \
subgraph_ids, cc_ids, subgraph_idx, NP_sim, I_S_sim, B_S_sim)
# calculate loss
if len(labels.shape) == 0: labels = labels.unsqueeze(-1)
if self.multilabel:
loss = self.loss(logits.squeeze(1), labels.type_as(logits))
else:
loss = self.loss(logits, labels)
# calculate accuracy
acc = subgraph_utils.calc_accuracy(logits, labels, multilabel_binarizer=self.multilabel_binarizer)
logs = {'train_loss': loss, 'train_acc': acc} # used for tensorboard
return {'loss': loss, 'log': logs}
def val_test_step(self, batch, batch_idx, is_test = False):
'''
Runs a single validation or test step over the batch
'''
# get subgraphs and labels
subgraph_ids = batch['subgraph_ids']
cc_ids = batch['cc_ids']
subgraph_idx = batch['subgraph_idx']
labels = batch['label'].squeeze(-1)
# get similarities for batch
NP_sim = batch['NP_sim']
I_S_sim = batch['I_S_sim']
B_S_sim = batch['B_S_sim']
# forward pass
if not is_test:
logits = self.forward('val', self.val_N_I_cc_embed, self.val_N_B_cc_embed, \
self.val_S_I_cc_embed, self.val_S_B_cc_embed, self.val_P_I_cc_embed, self.val_P_B_cc_embed, \
subgraph_ids, cc_ids, subgraph_idx, NP_sim, I_S_sim, B_S_sim)
else:
logits = self.forward('test', self.test_N_I_cc_embed, self.test_N_B_cc_embed, \
self.test_S_I_cc_embed, self.test_S_B_cc_embed, self.test_P_I_cc_embed, self.test_P_B_cc_embed, \
subgraph_ids, cc_ids, subgraph_idx, NP_sim, I_S_sim, B_S_sim)
# calc loss
if len(labels.shape) == 0: labels = labels.unsqueeze(-1)
if self.multilabel:
loss = self.loss(logits.squeeze(1), labels.type_as(logits))
else:
loss = self.loss(logits, labels)
# calc accuracy and macro F1
acc = subgraph_utils.calc_accuracy(logits, labels, multilabel_binarizer=self.multilabel_binarizer)
macro_f1 = subgraph_utils.calc_f1(logits, labels, avg_type='macro', multilabel_binarizer=self.multilabel_binarizer)
if not is_test: # for tensorboard
return {'val_loss': loss, 'val_acc': acc, 'val_macro_f1': macro_f1, 'val_logits': logits, 'val_labels': labels}
else:
return {'test_loss': loss, 'test_acc': acc, 'test_macro_f1': macro_f1, 'test_logits': logits, 'test_labels': labels}
def validation_step(self, val_batch, batch_idx):
'''
wrapper for self.val_test_step
'''
return self.val_test_step(val_batch, batch_idx, is_test = False)
def test_step(self, test_batch, batch_idx):
'''
wrapper for self.val_test_step
'''
return self.val_test_step(test_batch, batch_idx, is_test = True)
##################################################
# validation & test epoch end
def validation_epoch_end(self, outputs):
'''
called at the end of the validation epoch
Input:
- outputs: is an array with what you returned in validation_step for each batch
outputs = [{'loss': batch_0_loss}, {'loss': batch_1_loss}, ..., {'loss': batch_n_loss}]
'''
# aggregate the logits, labels, and metrics for all batches
logits = torch.cat([x['val_logits'] for x in outputs], dim=0)
labels = torch.cat([x['val_labels'] for x in outputs], dim=0)
macro_f1 = subgraph_utils.calc_f1(logits, labels, avg_type='macro', multilabel_binarizer=self.multilabel_binarizer).squeeze()
micro_f1 = subgraph_utils.calc_f1(logits, labels, avg_type='micro', multilabel_binarizer=self.multilabel_binarizer).squeeze()
acc = subgraph_utils.calc_accuracy(logits, labels, multilabel_binarizer=self.multilabel_binarizer).squeeze()
# calc AUC
if self.multilabel:
auroc = roc_auc_score(labels.cpu(), torch.sigmoid(logits).cpu(), multi_class = 'ovr')
elif len(torch.unique(labels)) == 2: #binary case
auroc = roc_auc_score(labels.cpu(), F.softmax(logits, dim=1).cpu()[:,1])
else: #multiclass
auroc = roc_auc_score(labels.cpu(), F.softmax(logits, dim=1).cpu(), multi_class = 'ovr')
# get average loss, acc, and macro F1 over batches
avg_loss = torch.stack([x['val_loss'] for x in outputs]).mean().cpu()
avg_acc = torch.stack([x['val_acc'] for x in outputs]).mean()
avg_macro_f1 = torch.stack([x['val_macro_f1'] for x in outputs]).mean()
tensorboard_logs = {'val_loss': avg_loss, 'val_micro_f1': micro_f1, 'val_macro_f1': macro_f1, \
'val_acc': acc, 'avg_val_acc': avg_acc, 'avg_macro_f1':avg_macro_f1, 'val_auroc':auroc }
# add to tensorboard
if self.multilabel:
for c in range(logits.shape[1]): #n_classes
tensorboard_logs['val_auroc_class_' + str(c)] = roc_auc_score(labels[:, c].cpu(), torch.sigmoid(logits)[:, c].cpu())
else:
one_hot_labels = one_hot(labels, num_classes = logits.shape[1])
for c in range(logits.shape[1]): #n_classes
tensorboard_logs['val_auroc_class_' + str(c)] = roc_auc_score(one_hot_labels[:, c].cpu(), logits[:, c].cpu())
# Re-Initialize cc_embeds
if not self.hparams['trainable_cc']:
self.init_all_embeddings(split = 'train_val', trainable = self.hparams['trainable_cc'])
# Optionally re initialize anchor patches each epoch (defaults to false)
if self.hparams['resample_anchor_patches']:
if self.hparams['use_neighborhood']:
self.anchors_neigh_int, self.anchors_neigh_border = init_anchors_neighborhood('train_val', self.hparams, self.networkx_graph, self.device, self.train_cc_ids, self.val_cc_ids, self.test_cc_ids)
if self.hparams['use_position']:
self.anchors_pos_int = init_anchors_pos_int('train_val', self.hparams, self.networkx_graph, self.device, self.train_sub_G, self.val_sub_G, self.test_sub_G)
self.anchors_pos_ext = init_anchors_pos_ext(self.hparams, self.networkx_graph, self.device)
if self.hparams['use_structure']:
self.anchors_structure = init_anchors_structure(self.hparams, self.structure_anchors, self.int_structure_anchor_random_walks, self.bor_structure_anchor_random_walks)
self.metric_scores.append(tensorboard_logs) # keep track for optuna
return {'avg_val_loss': avg_loss, 'log': tensorboard_logs}
def test_epoch_end(self, outputs):
'''
Called at end of the test epoch
'''
# aggregate the logits, labels, and metrics for all batches
logits = torch.cat([x['test_logits'] for x in outputs], dim=0)
labels = torch.cat([x['test_labels'] for x in outputs], dim=0)
macro_f1 = subgraph_utils.calc_f1(logits, labels, avg_type='macro', multilabel_binarizer=self.multilabel_binarizer).squeeze()
micro_f1 = subgraph_utils.calc_f1(logits, labels, avg_type='micro', multilabel_binarizer=self.multilabel_binarizer).squeeze()
acc = subgraph_utils.calc_accuracy(logits, labels, multilabel_binarizer=self.multilabel_binarizer).squeeze()
# calc AUC
if self.multilabel:
auroc = roc_auc_score(labels.cpu(), torch.sigmoid(logits).cpu(), multi_class = 'ovr')
elif len(torch.unique(labels)) == 2: #binary case
auroc = roc_auc_score(labels.cpu(), F.softmax(logits, dim=1).cpu()[:,1])
else: #multiclass
auroc = roc_auc_score(labels.cpu(), F.softmax(logits, dim=1).cpu(), multi_class = 'ovr')
# get average loss, acc, and macro F1 over batches
avg_loss = torch.stack([x['test_loss'] for x in outputs]).mean().cpu()
avg_acc = torch.stack([x['test_acc'] for x in outputs]).mean()
avg_macro_f1 = torch.stack([x['test_macro_f1'] for x in outputs]).mean()
tensorboard_logs = {'test_loss': avg_loss, 'test_micro_f1': micro_f1, 'test_macro_f1': macro_f1, \
'test_acc': acc, 'avg_test_acc': avg_acc, 'test_avg_macro_f1':avg_macro_f1, 'test_auroc':auroc }
# add ROC for each class to tensorboard
if self.multilabel:
for c in range(logits.shape[1]): #n_classes
tensorboard_logs['test_auroc_class_' + str(c)] = roc_auc_score(labels[:, c].cpu(), torch.sigmoid(logits)[:, c].cpu())
else:
one_hot_labels = one_hot(labels, num_classes = logits.shape[1])
for c in range(logits.shape[1]): #n_classes
tensorboard_logs['test_auroc_class_' + str(c)] = roc_auc_score(one_hot_labels[:, c].cpu(), logits[:, c].cpu())
self.test_results = tensorboard_logs
return {'avg_test_loss': avg_loss, 'log': tensorboard_logs}
##################################################
# Read in data
def reindex_data(self, data):
'''
Relabel node indices in the train/val/test sets to be 1-indexed instead of 0 indexed
so that we can use 0 for padding
'''
new_subg = []
for subg in data:
new_subg.append([c + 1 for c in subg])
return new_subg
def read_data(self):
'''
Read in the subgraphs & their associated labels
'''
# read networkx graph from edge list
self.networkx_graph = nx.read_edgelist(config.PROJECT_ROOT / self.graph_path)
# readin list of node ids for each subgraph & their labels
self.train_sub_G, self.train_sub_G_label, self.val_sub_G, \
self.val_sub_G_label, self.test_sub_G, self.test_sub_G_label \
= subgraph_utils.read_subgraphs(config.PROJECT_ROOT / self.subgraph_path)
# check if the dataset is multilabel (e.g. HPO-NEURO)
if type(self.train_sub_G_label) == list:
self.multilabel=True
all_labels = self.train_sub_G_label + self.val_sub_G_label + self.test_sub_G_label
self.multilabel_binarizer = MultiLabelBinarizer().fit(all_labels)
else:
self.multilabel = False
self.multilabel_binarizer = None
# Optionally subset the data for debugging purposes to the batch size
if 'subset_data' in self.hparams and self.hparams['subset_data']:
print("****WARNING: SUBSETTING DATA*****")
self.train_sub_G, self.train_sub_G_label, self.val_sub_G, \
self.val_sub_G_label, self.test_sub_G, self.test_sub_G_label = self.train_sub_G[0:self.hparams['batch_size']], self.train_sub_G_label[0:self.hparams['batch_size']], self.val_sub_G[0:self.hparams['batch_size']], \
self.val_sub_G_label[0:self.hparams['batch_size']], self.test_sub_G[0:self.hparams['batch_size']], self.test_sub_G_label[0:self.hparams['batch_size']]
# get the number of classes for prediction
if type(self.train_sub_G_label) == list: # if multi-label
self.num_classes = max([max(l) for l in self.train_sub_G_label + self.val_sub_G_label + self.test_sub_G_label]) + 1
else:
self.num_classes = int(torch.max(torch.cat((self.train_sub_G_label, self.val_sub_G_label, self.test_sub_G_label)))) + 1
# renumber nodes to start with index 1 instead of 0
mapping = {n:int(n)+1 for n in self.networkx_graph.nodes()}
self.networkx_graph = nx.relabel_nodes(self.networkx_graph, mapping)
self.train_sub_G = self.reindex_data(self.train_sub_G)
self.val_sub_G = self.reindex_data(self.val_sub_G)
self.test_sub_G = self.reindex_data(self.test_sub_G)
# Initialize pretrained node embeddings
pretrained_node_embeds = torch.load(config.PROJECT_ROOT / self.embedding_path, torch.device('cpu')) # feature matrix should be initialized to the node embeddings
self.hparams['node_embed_size'] = pretrained_node_embeds.shape[1]
zeros = torch.zeros(1, pretrained_node_embeds.shape[1])
embeds = torch.cat((zeros, pretrained_node_embeds), 0) #there's a zeros in the first index for padding
# optionally freeze the node embeddings
self.node_embeddings = nn.Embedding.from_pretrained(embeds, freeze=self.hparams['freeze_node_embeds'], padding_idx=config.PAD_VALUE).to(self.device)
print('--- Finished reading in data ---')
##################################################
# Initialize connected components & associated embeddings for each channel in SubGNN
def initialize_cc_ids(self, subgraph_ids):
'''
Initialize the 3D matrix of (n_subgraphs X max number of cc X max length of cc)
Input:
- subgraph_ids: list of subgraphs where each subgraph is a list of node ids
Output:
- reshaped_cc_ids_pad: padded tensor of shape (n_subgraphs, max_n_cc, max_len_cc)
'''
n_subgraphs = len(subgraph_ids) # number of subgraphs
# Create connected component ID list from subgraphs
cc_id_list = []
for curr_subgraph_ids in subgraph_ids:
subgraph = nx.subgraph(self.networkx_graph, curr_subgraph_ids) #networkx version of subgraph
con_components = list(nx.connected_components(subgraph)) # get connected components in subgraph
cc_id_list.append([torch.LongTensor(list(cc_ids)) for cc_ids in con_components])
# pad number of connected components
max_n_cc = max([len(cc) for cc in cc_id_list]) #max number of cc across all subgraphs
for cc_list in cc_id_list:
while True:
if len(cc_list) == max_n_cc: break
cc_list.append(torch.LongTensor([config.PAD_VALUE]))
# pad number of nodes in connected components
all_pad_cc_ids = [cc for cc_list in cc_id_list for cc in cc_list]
assert len(all_pad_cc_ids) % max_n_cc == 0
con_component_ids_pad = pad_sequence(all_pad_cc_ids, batch_first=True, padding_value=config.PAD_VALUE) # (batch_sz * max_n_cc, max_cc_len)
reshaped_cc_ids_pad = con_component_ids_pad.view(n_subgraphs, max_n_cc, -1) # (batch_sz, max_n_cc, max_cc_len)
return reshaped_cc_ids_pad # (n_subgraphs, max_n_cc, max_len_cc)
def initialize_cc_embeddings(self, cc_id_list, aggregator='sum'):
'''
Initialize connected component embeddings as either the sum or max of node embeddings in the connected component
Input:
- cc_id_list: 3D tensor of shape (n subgraphs, max n CC, max length CC)
Output:
- 3D tensor of shape (n_subgraphs, max n_cc, node embedding dim)
'''
if aggregator == 'sum':
return torch.sum(self.node_embeddings(cc_id_list.to(self.device)), dim=2)
elif aggregator == 'max':
return torch.max(self.node_embeddings(cc_id_list.to(self.device)), dim=2)[0]
def initialize_channel_embeddings(self, cc_embeddings, trainable = False):
'''
Initialize CC embeddings for each channel (N, S, P X internal, border)
'''
if trainable: # if the embeddings are trainable, make them a parameter
N_I_cc_embeds = Parameter(cc_embeddings.detach().clone())
N_B_cc_embeds = Parameter(cc_embeddings.detach().clone())
S_I_cc_embeds = Parameter(cc_embeddings.detach().clone())
S_B_cc_embeds = Parameter(cc_embeddings.detach().clone())
P_I_cc_embeds = Parameter(cc_embeddings.detach().clone())
P_B_cc_embeds = Parameter(cc_embeddings.detach().clone())
else:
N_I_cc_embeds = cc_embeddings
N_B_cc_embeds = cc_embeddings
S_I_cc_embeds = cc_embeddings
S_B_cc_embeds = cc_embeddings
P_I_cc_embeds = cc_embeddings
P_B_cc_embeds = cc_embeddings
return (N_I_cc_embeds, N_B_cc_embeds, S_I_cc_embeds, S_B_cc_embeds, P_I_cc_embeds, P_B_cc_embeds)
def init_all_embeddings(self, split = 'all', trainable = False):
'''
Initialize the CC and channel-specific CC embeddings for the subgraphs in the specified split
('all', 'train_val', 'train', 'val', or 'test')
'''
if split in ['all','train_val','train']:
# initialize CC embeddings
train_cc_embeddings = self.initialize_cc_embeddings(self.train_cc_ids, self.hparams['cc_aggregator'])
# initialize CC embeddings for each channel
self.train_N_I_cc_embed, self.train_N_B_cc_embed, self.train_S_I_cc_embed, \
self.train_S_B_cc_embed, self.train_P_I_cc_embed, self.train_P_B_cc_embed \
= self.initialize_channel_embeddings(train_cc_embeddings, trainable)
if split in ['all','train_val','val']:
val_cc_embeddings = self.initialize_cc_embeddings( self.val_cc_ids, self.hparams['cc_aggregator'])
self.val_N_I_cc_embed, self.val_N_B_cc_embed, self.val_S_I_cc_embed, \
self.val_S_B_cc_embed, self.val_P_I_cc_embed, self.val_P_B_cc_embed \
= self.initialize_channel_embeddings(val_cc_embeddings, trainable=False)
if split in ['all','test']:
test_cc_embeddings = self.initialize_cc_embeddings( self.test_cc_ids, self.hparams['cc_aggregator'])
self.test_N_I_cc_embed, self.test_N_B_cc_embed, self.test_S_I_cc_embed, \
self.test_S_B_cc_embed, self.test_P_I_cc_embed, self.test_P_B_cc_embed \
= self.initialize_channel_embeddings(test_cc_embeddings, trainable=False)
##################################################
# Initialize node border sets surrounding each CC for each subgraph
def initialize_border_sets(self, fname, cc_ids, radius, ego_graph_dict=None):
'''
Creates and saves to file a matrix containing the node ids in the k-hop border set of each CC for each subgraph
The shape of the resulting matrix, which is padded to the max border set size, is (n_subgraphs, max_n_cc, max_border_set_sz)
'''
n_subgraphs, max_n_cc, _ = cc_ids.shape
all_border_sets = []
# for each component in each subgraph, calculate the k-hop node border of the connected component
for s, subgraph in enumerate(cc_ids):
border_sets = []
for c, component in enumerate(subgraph):
# radius specifies the size of the border set - i.e. the k number of hops away the node can be from any node in the component to be in the border set
component_border = subgraph_utils.get_component_border_neighborhood_set(self.networkx_graph, component, radius, ego_graph_dict)
border_sets.append(component_border)
all_border_sets.append(border_sets)
#fill in matrix with padding
max_border_set_len = max([len(s) for l in all_border_sets for s in l])
border_set_matrix = torch.zeros((n_subgraphs, max_n_cc, max_border_set_len), dtype=torch.long).fill_(config.PAD_VALUE)
for s, subgraph in enumerate(all_border_sets):
for c,component in enumerate(subgraph):
fill_len = max_border_set_len - len(component)
border_set_matrix[s,c,:] = torch.cat([torch.LongTensor(list(component)),torch.LongTensor((fill_len)).fill_(config.PAD_VALUE)])
# save border set to file
np.save(fname, border_set_matrix.cpu().numpy())
return border_set_matrix # n_subgraphs, max_n_cc, max_border_set_sz
def get_border_sets(self, split):
'''
Returns the node ids in the k-hop border of each subgraph (where k = neigh_sample_border_size) for the train, val, and test subgraphs
'''
# location where similarities are stored
sim_path = config.PROJECT_ROOT / self.similarities_path
self.train_P_border = None
self.val_P_border = None
self.test_P_border = None
# We need the border sets if we're using the neighborhood channel or if we're using the edit distance similarity function in the structure channel
if self.hparams['use_neighborhood'] or (self.hparams['use_structure'] and self.hparams['structure_similarity_fn'] == 'edit_distance'):
# load ego graphs dictionary
ego_graph_path = config.PROJECT_ROOT / self.ego_graph_path
if ego_graph_path.exists():
with open(str(ego_graph_path), 'r') as f:
ego_graph_dict = json.load(f)
ego_graph_dict = {int(key): value for key, value in ego_graph_dict.items()}
else: ego_graph_dict = None
# either load in the border sets from file or recompute the border sets
train_neigh_path = sim_path / (str(self.hparams["neigh_sample_border_size"]) + '_' + str(config.PAD_VALUE) + '_train_border_set.npy')
val_neigh_path = sim_path / (str(self.hparams["neigh_sample_border_size"]) + '_' + str(config.PAD_VALUE) + '_val_border_set.npy')
test_neigh_path = sim_path / (str(self.hparams["neigh_sample_border_size"]) + '_' + str(config.PAD_VALUE) + '_test_border_set.npy')
if split == 'test':
if test_neigh_path.exists() and not self.hparams['compute_similarities']:
self.test_N_border = torch.tensor(np.load(test_neigh_path, allow_pickle=True))
else:
self.test_N_border = self.initialize_border_sets(test_neigh_path, self.test_cc_ids, self.hparams["neigh_sample_border_size"], ego_graph_dict)
elif split == 'train_val':
if train_neigh_path.exists() and not self.hparams['compute_similarities']:
self.train_N_border = torch.tensor(np.load(train_neigh_path, allow_pickle=True))
else:
self.train_N_border = self.initialize_border_sets(train_neigh_path, self.train_cc_ids, self.hparams["neigh_sample_border_size"], ego_graph_dict)
if val_neigh_path.exists() and not self.hparams['compute_similarities']:
self.val_N_border = torch.tensor(np.load(val_neigh_path, allow_pickle=True))
else:
self.val_N_border = self.initialize_border_sets(val_neigh_path, self.val_cc_ids, self.hparams["neigh_sample_border_size"], ego_graph_dict)
else: # otherwise, we can just set these to None
self.train_N_border = None
self.val_N_border = None
self.test_N_border = None
##################################################
# Compute similarities between the anchor patches & the subgraphs
def compute_shortest_path_similarities(self, fname, shortest_paths, cc_ids):
'''
Creates a similarity matrix with shape (n_subgraphs, max num cc, number of nodes in graph) that stores the shortest
path between each cc (for each subgraph) and all nodes in the graph.
'''
print('---- Precomputing Shortest Path Similarities ----')
n_subgraphs, max_n_cc, _ = cc_ids.shape
n_nodes_in_graph = len(self.networkx_graph.nodes()) #get number of nodes in the underlying base graph
cc_id_mask = (cc_ids[:,:,0] != config.PAD_VALUE)
similarities = torch.zeros((n_subgraphs, max_n_cc, n_nodes_in_graph)) \
.fill_(config.PAD_VALUE)
#NOTE: could use multiprocessing to speed up this calculation
for s, subgraph in enumerate(cc_ids):
for c, component in enumerate(subgraph):
non_padded_component = component[component != config.PAD_VALUE].cpu().numpy() #remove padding
if len(non_padded_component) > 0:
# NOTE: indexing is off by 1 bc node ids are indexed starting at 1
similarities[s,c,:] = torch.tensor(np.min(shortest_paths[non_padded_component - 1,:], axis=0))
# add padding (because each subgraph has variable # CC) & save to file
if not fname.parent.exists(): fname.parent.mkdir(parents=True)
print('---- Saving Shortest Path Similarities ----')
similarities[~cc_id_mask] = config.PAD_VALUE
np.save(fname, similarities.cpu().numpy())
return similarities
def compute_structure_patch_similarities(self, degree_dict, fname, internal, cc_ids, sim_path, dataset_type, border_set=None):
'''
Calculate the similarity between the sampled anchor patches and the connected components
The default structure similarity function is DTW over the patch and component degree sequences.
Returns tensor of similarities of shape (n_subgraphs, max_n_cc, n anchor patches)
'''
print('---Computing Structure Patch Similarities---')
n_anchors = self.structure_anchors.shape[0]
n_subgraphs, max_n_cc, _ = cc_ids.shape
cc_id_mask = (cc_ids[:,:,0] != config.PAD_VALUE)
# the default structure similarity function is dynamic time warping (DTW) over the degree sequences of the anchor patches & connected components
if self.hparams['structure_similarity_fn'] == 'dtw':
# store the degree sequence for each anchor patch into a dict
anchor_degree_seq_dict = {}
for a, anchor_patch in enumerate(self.structure_anchors):
anchor_degree_seq_dict[a] = gamma.get_degree_sequence(self.networkx_graph, anchor_patch, degree_dict, internal=internal)
# store the degree sequence for each connected component into a dict
component_degree_seq_dict = {}
cc_ids_reshaped = cc_ids.view(n_subgraphs*max_n_cc, -1)
for c, component in enumerate(cc_ids_reshaped):
component_degree_seq_dict[c] = gamma.get_degree_sequence(self.networkx_graph, component, degree_dict, internal=internal)
# to use multiprocessing to calculate the similarity, we first create a list of all of the inputs
inputs = []
for c in range(len(cc_ids_reshaped)):
for a in range(len(self.structure_anchors)):
inputs.append((component_degree_seq_dict[c], anchor_degree_seq_dict[a]))
# use starmap to calculate DTW between the anchor patches & connected components' degree sequences
with multiprocessing.Pool(processes=self.hparams['n_processes']) as pool:
sims = pool.starmap(gamma.calc_dtw, inputs)
# reshape similarities to a matrix of shape (n_subgraphs, max_n_cc, n anchor patches)
similarities = torch.tensor(sims, dtype=torch.float).view(n_subgraphs, max_n_cc, -1)
else:
# other structure similarity functions can be added here
raise NotImplementedError
# add padding & save to file
print('---- Saving Similarities ----')
if not fname.parent.exists(): fname.parent.mkdir(parents=True)
similarities[~cc_id_mask] = config.PAD_VALUE
np.save(fname, similarities.cpu().numpy())
return similarities
def get_similarities(self, split):
'''
For the N/P channels: precomputes the shortest paths between all connected components (for all subgraphs) and all nodes in the graph
For the S channel: precomputes structure anchor patches & random walks as well as structure similarity calculations between the anchor patches and all connected components
'''
# path where similarities are stored
sim_path = config.PROJECT_ROOT / self.similarities_path
# If we're using the position or neighborhood channels, we need to calculate the relevant shortest path similarities
if self.hparams['use_position'] or self.hparams['use_neighborhood']:
# read in precomputed shortest paths between all nodes in the graph
pairwise_shortest_paths_path = config.PROJECT_ROOT / self.shortest_paths_path
pairwise_shortest_paths = np.load(pairwise_shortest_paths_path, allow_pickle=True)
# Read in precomputed similarities if they exist. If they don't, calculate them
train_np_path = sim_path / (str(config.PAD_VALUE) + '_train_similarities.npy')
val_np_path = sim_path / (str(config.PAD_VALUE) + '_val_similarities.npy')
test_np_path = sim_path / (str(config.PAD_VALUE) + '_test_similarities.npy')
if split == 'test':
if test_np_path.exists() and not self.hparams['compute_similarities']:
print('--- Loading Position Similarities from File ---')
self.test_neigh_pos_similarities = torch.tensor(np.load(test_np_path, allow_pickle=True))#.to(self.device)
else:
self.test_neigh_pos_similarities = self.compute_shortest_path_similarities(test_np_path, pairwise_shortest_paths, self.test_cc_ids)
elif split == 'train_val':
if train_np_path.exists() and not self.hparams['compute_similarities']:
print('--- Loading Train Position Similarities from File ---')
self.train_neigh_pos_similarities = torch.tensor(np.load(train_np_path, allow_pickle=True))#.to(self.device)
else:
self.train_neigh_pos_similarities = self.compute_shortest_path_similarities(train_np_path, pairwise_shortest_paths, self.train_cc_ids)
if val_np_path.exists() and not self.hparams['compute_similarities']:
print('--- Loading Val Position Similarities from File ---')
self.val_neigh_pos_similarities = torch.tensor(np.load(val_np_path, allow_pickle=True))#.to(self.device)
else:
self.val_neigh_pos_similarities = self.compute_shortest_path_similarities(val_np_path, pairwise_shortest_paths, self.val_cc_ids)
else: # if we're only using the structure channel, we can just set these to None
self.train_neigh_pos_similarities = None
self.val_neigh_pos_similarities = None
self.test_neigh_pos_similarities = None
if self.hparams['use_structure']:
# load in degree dictionary {node id: degree}
degree_path = config.PROJECT_ROOT / self.degree_dict_path
if degree_path.exists():
with open(str(degree_path), 'r') as f:
degree_dict = json.load(f)
degree_dict = {int(key): value for key, value in degree_dict.items()}
else: degree_dict = None
# (1) sample structure anchor patches
# sample walk len: length of the random walk used to sample the anchor patches
# structure_patch_type: either 'triangular_random_walk' (default) or 'ego_graph'
# MAX_SIM_EPOCHS:
struc_anchor_patches_path = sim_path / ('struc_patches_' + str(self.hparams['sample_walk_len']) + '_' + self.hparams['structure_patch_type'] + '_' + str(self.hparams['max_sim_epochs']) + '.npy')
if struc_anchor_patches_path.exists() and not self.hparams['compute_similarities']:
self.structure_anchors = torch.tensor(np.load(struc_anchor_patches_path, allow_pickle=True))
else:
self.structure_anchors = sample_structure_anchor_patches(self.hparams, self.networkx_graph, self.device, self.hparams['max_sim_epochs'])
np.save(struc_anchor_patches_path, self.structure_anchors.cpu().numpy())
# (2) perform internal and border random walks over sampled anchor patches
#border
bor_struc_patch_random_walks_path = sim_path / ('bor_struc_patch_random_walks_' + str(self.hparams['n_triangular_walks']) + '_' + str(self.hparams['random_walk_len']) + '_' + str(self.hparams['sample_walk_len']) + '_' + self.hparams['structure_patch_type'] + '_' + str(self.hparams['max_sim_epochs']) + '.npy')
if bor_struc_patch_random_walks_path.exists() and not self.hparams['compute_similarities']:
self.bor_structure_anchor_random_walks = torch.tensor(np.load(bor_struc_patch_random_walks_path, allow_pickle=True))#.to(self.device)
else:
self.bor_structure_anchor_random_walks = perform_random_walks(self.hparams, self.networkx_graph, self.structure_anchors, inside=False)
np.save(bor_struc_patch_random_walks_path, self.bor_structure_anchor_random_walks.cpu().numpy())
#internal
int_struc_patch_random_walks_path = sim_path / ('int_struc_patch_random_walks_' + str(self.hparams['n_triangular_walks']) + '_' + str(self.hparams['random_walk_len']) + '_' + str(self.hparams['sample_walk_len']) + '_' + self.hparams['structure_patch_type'] + '_' + str(self.hparams['max_sim_epochs']) + '.npy')
if int_struc_patch_random_walks_path.exists() and not self.hparams['compute_similarities']:
self.int_structure_anchor_random_walks = torch.tensor(np.load(int_struc_patch_random_walks_path, allow_pickle=True))#.to(self.device)
else:
self.int_structure_anchor_random_walks = perform_random_walks(self.hparams, self.networkx_graph, self.structure_anchors, inside=True)
np.save(int_struc_patch_random_walks_path, self.int_structure_anchor_random_walks.cpu().numpy())
# (3) calculate similarities between anchor patches and connected components
# filenames where outputs will be stored
struc_sim_type_fname = '_' + self.hparams['structure_similarity_fn'] if self.hparams['structure_similarity_fn'] != 'dtw' else '' #we only add info about the structure similarity function to the filename if it's not the default dtw
train_int_struc_path = sim_path / ('int_struc_' + str(self.hparams['sample_walk_len']) + '_' + self.hparams['structure_patch_type'] + '_' + str(self.hparams['max_sim_epochs']) + '_' + str(config.PAD_VALUE) + struc_sim_type_fname + '_train_similarities.npy')
val_int_struc_path = sim_path / ('int_struc_' + str(self.hparams['sample_walk_len']) + '_' + self.hparams['structure_patch_type'] + '_' + str(self.hparams['max_sim_epochs']) + '_' + str(config.PAD_VALUE) + struc_sim_type_fname + '_val_similarities.npy')
test_int_struc_path = sim_path / ('int_struc_' + str(self.hparams['sample_walk_len']) + '_' + self.hparams['structure_patch_type'] + '_' + str(self.hparams['max_sim_epochs']) + '_' + str(config.PAD_VALUE) + struc_sim_type_fname + '_test_similarities.npy')
train_bor_struc_path = sim_path / ('bor_struc_' + str(self.hparams['sample_walk_len']) + '_' + self.hparams['structure_patch_type'] + '_' + str(self.hparams['max_sim_epochs']) + '_' + str(config.PAD_VALUE) + struc_sim_type_fname + '_train_similarities.npy')
val_bor_struc_path = sim_path / ('bor_struc_' + str(self.hparams['sample_walk_len']) + '_' + self.hparams['structure_patch_type'] + '_' + str(self.hparams['max_sim_epochs']) + '_' + str(config.PAD_VALUE) + struc_sim_type_fname + '_val_similarities.npy')
test_bor_struc_path = sim_path / ('bor_struc_' + str(self.hparams['sample_walk_len']) + '_' + self.hparams['structure_patch_type'] + '_' + str(self.hparams['max_sim_epochs']) + '_' + str(config.PAD_VALUE) + struc_sim_type_fname + '_test_similarities.npy')
if split == 'test':
if test_int_struc_path.exists() and not self.hparams['compute_similarities']:
print('--- Loading Test Structure Similarities from File ---', flush=True)
self.test_int_struc_similarities = torch.tensor(np.load(test_int_struc_path, allow_pickle=True))#.to(self.device)
else:
self.test_int_struc_similarities = self.compute_structure_patch_similarities(degree_dict, test_int_struc_path, True, self.test_cc_ids, sim_path, 'test', self.test_N_border)
elif split == 'train_val':
if train_int_struc_path.exists() and not self.hparams['compute_similarities']:
print('--- Loading Train Structure Similarities from File ---', flush=True)
self.train_int_struc_similarities = torch.tensor(np.load(train_int_struc_path, allow_pickle=True))#.to(self.device)
else:
self.train_int_struc_similarities = self.compute_structure_patch_similarities(degree_dict, train_int_struc_path, True, self.train_cc_ids, sim_path, 'train', self.train_N_border)
if val_int_struc_path.exists() and not self.hparams['compute_similarities']:
print('--- Loading Val Structure Similarities from File ---', flush=True)
self.val_int_struc_similarities = torch.tensor(np.load(val_int_struc_path, allow_pickle=True))#.to(self.device)
else:
self.val_int_struc_similarities = self.compute_structure_patch_similarities(degree_dict, val_int_struc_path, True, self.val_cc_ids, sim_path, 'val', self.val_N_border)
print('Done computing internal structure similarities', flush=True)
# read in structure similarities
print('computing border structure sims')
if split == 'test':
if test_bor_struc_path.exists() and not self.hparams['compute_similarities']:
print('--- Loading Test Structure Similarities from File ---')
self.test_bor_struc_similarities = torch.tensor(np.load(test_bor_struc_path, allow_pickle=True))
else:
self.test_bor_struc_similarities = self.compute_structure_patch_similarities(degree_dict, test_bor_struc_path, False, self.test_cc_ids, sim_path, 'test', self.test_N_border)
if split == 'train_val':
if train_bor_struc_path.exists() and not self.hparams['compute_similarities']:
print('--- Loading Train Structure Similarities from File ---')
self.train_bor_struc_similarities = torch.tensor(np.load(train_bor_struc_path, allow_pickle=True))
else:
self.train_bor_struc_similarities = self.compute_structure_patch_similarities(degree_dict, train_bor_struc_path, False, self.train_cc_ids, sim_path,'train', self.train_N_border)
if val_bor_struc_path.exists() and not self.hparams['compute_similarities']:
print('--- Loading Val Structure Similarities from File ---')
self.val_bor_struc_similarities = torch.tensor(np.load(val_bor_struc_path, allow_pickle=True))
else:
self.val_bor_struc_similarities = self.compute_structure_patch_similarities(degree_dict, val_bor_struc_path, False, self.val_cc_ids, sim_path, 'val', self.val_N_border)
print('Done computing border structure similarities')
else: # if we're not using the structure channel, we can just set these to None
self.structure_anchors = None
self.train_int_struc_similarities = None
self.val_int_struc_similarities = None
self.test_int_struc_similarities = None
self.train_bor_struc_similarities = None
self.val_bor_struc_similarities = None
self.test_bor_struc_similarities = None
##################################################
# Prepare data
def prepare_test_data(self):
'''
Same as prepare_data, but for test dataset
'''
print('--- Started Preparing Test Data ---')
self.test_cc_ids = self.initialize_cc_ids(self.test_sub_G)
print('--- Initialize embeddings ---')
self.init_all_embeddings(split = 'test', trainable = self.hparams['trainable_cc'])
print('--- Getting Border Sets ---')
self.get_border_sets(split='test')
print('--- Getting Similarities ---')
self.get_similarities(split='test')
print('--- Initializing Anchor Patches ---')
# note that we don't need to initialize border position & structure anchor patches because those are shared
if self.hparams['use_neighborhood']:
self.anchors_neigh_int, self.anchors_neigh_border = init_anchors_neighborhood('test', \
self.hparams, self.networkx_graph, self.device, None, None, \
self.test_cc_ids, None, None, self.test_N_border)
else: self.anchors_neigh_int, self.anchors_neigh_border = None, None
if self.hparams['use_position']:
self.anchors_pos_int = init_anchors_pos_int('test', self.hparams, self.networkx_graph, self.device, self.train_sub_G, self.val_sub_G, self.test_sub_G)
else: self.anchors_pos_int = None
print('--- Finished Preparing Test Data ---')
def prepare_data(self):
'''
Initialize connected components, precomputed similarity calculations, and anchor patches
'''
print('--- Started Preparing Data ---', flush=True)
# Intialize connected component matrix (n_subgraphs, max_n_cc, max_len_cc)
self.train_cc_ids = self.initialize_cc_ids(self.train_sub_G)
self.val_cc_ids = self.initialize_cc_ids(self.val_sub_G)
# initialize embeddings for each cc
# 'trainable_cc' flag determines whether the cc embeddings are trainable
print('--- Initializing CC Embeddings ---', flush=True)
self.init_all_embeddings(split = 'train_val', trainable = self.hparams['trainable_cc'])
# Initialize border sets for each cc
print('--- Initializing CC Border Sets ---', flush=True)
self.get_border_sets(split='train_val')
# calculate similarities
print('--- Getting Similarities ---', flush=True)
self.get_similarities(split='train_val')
# Initialize neighborhood, position, and structure anchor patches
print('--- Initializing Anchor Patches ---', flush=True)
if self.hparams['use_neighborhood']:
self.anchors_neigh_int, self.anchors_neigh_border = init_anchors_neighborhood('train_val', \
self.hparams, self.networkx_graph, self.device, self.train_cc_ids, self.val_cc_ids, \
None, self.train_N_border, self.val_N_border, None) # we pass in None for the test_N_border
else: self.anchors_neigh_int, self.anchors_neigh_border = None, None
if self.hparams['use_position']:
self.anchors_pos_int = init_anchors_pos_int('train_val', self.hparams, self.networkx_graph, self.device, self.train_sub_G, self.val_sub_G, self.test_sub_G)
self.anchors_pos_ext = init_anchors_pos_ext(self.hparams, self.networkx_graph, self.device)
else: self.anchors_pos_int, self.anchors_pos_ext = None, None
if self.hparams['use_structure']:
# pass in precomputed sampled structure anchor patches and random walks from which to further subsample
self.anchors_structure = init_anchors_structure(self.hparams, self.structure_anchors, self.int_structure_anchor_random_walks, self.bor_structure_anchor_random_walks)
else: self.anchors_structure = None
print('--- Finished Preparing Data ---', flush=True)
##################################################
# Data loaders
def _pad_collate(self, batch):
'''
Stacks all examples in the batch to be in shape (batch_sz, ..., ...)
& trims padding from border sets & connected component tensors, which were originally
padded to the max length across the whole dataset, not the batch
'''
subgraph_ids, con_component_ids, N_border, NP_sim, I_S_sim, B_S_sim, idx, labels = zip(*batch)
# subgraph_ids: (batch_sz, n_nodes_in_subgraph)
# con_component_ids: (batch_sz, n_con_components, n_nodes_in_cc)
# con_component_embeds: (batch_sz, n_con_components, hidden_dim)
# pad subgraph ids in batch to be of shape (batch_sz, max_subgraph_len)
subgraph_ids_pad = pad_sequence(subgraph_ids, batch_first=True, padding_value=config.PAD_VALUE)
# stack similarity matrics
if None in NP_sim: NP_sim = None
else: NP_sim = torch.stack(NP_sim)
if None in I_S_sim: I_S_sim = None
else: I_S_sim = torch.stack(I_S_sim)
if None in B_S_sim: B_S_sim = None
else: B_S_sim = torch.stack(B_S_sim)
# stack and trim the matrix of nodes in each component's border
if None in N_border: N_border_trimmed = None
else:
N_border = torch.stack(N_border)
# Trim neighbor border to only be as big as needed for the batch.
# This is necessary because the matrix was padded to the max length across all components, not just for the batch
batch_sz, max_n_cc, _ = N_border.shape
N_border_reshaped = N_border.view(batch_sz*max_n_cc, -1)
ind = (torch.sum(torch.abs(N_border_reshaped), dim=0) != 0)
N_border_trimmed = N_border_reshaped[:,ind].view(batch_sz, max_n_cc, -1)
labels = torch.stack(labels).squeeze() # (batch_sz, 1)
idx = torch.stack(idx)
cc_ids = torch.stack(con_component_ids)
# Trim connected component ids to only be as big as needed for the batch
batch_sz, max_n_cc, _ = cc_ids.shape
cc_ids_reshaped = cc_ids.view(batch_sz*max_n_cc, -1)
ind = (torch.sum(torch.abs(cc_ids_reshaped), dim=0) != 0)
cc_ids_trimmed = cc_ids_reshaped[:,ind].view(batch_sz, max_n_cc, -1)
return {'subgraph_ids': subgraph_ids_pad, 'cc_ids': cc_ids_trimmed, 'N_border': N_border_trimmed, \
'NP_sim': NP_sim, 'I_S_sim':I_S_sim, 'B_S_sim':B_S_sim, \
'subgraph_idx': idx, 'label':labels}
def train_dataloader(self):
'''
Prepare dataloader for training data
'''
dataset = SubgraphDataset(self.train_sub_G, self.train_sub_G_label, self.train_cc_ids, \
self.train_N_border, self.train_neigh_pos_similarities, self.train_int_struc_similarities, \
self.train_bor_struc_similarities, self.multilabel, self.multilabel_binarizer)
# drop last examples in batch if batch size is <= number of subgraphs in the training set (this will usually evaluate to true)
drop_last = self.hparams['batch_size'] <= len(self.train_sub_G)
loader = DataLoader(dataset, batch_size = self.hparams['batch_size'], shuffle=True, collate_fn=self._pad_collate, drop_last=drop_last) #ADDED DROP LAST
return loader
def val_dataloader(self):
'''
Prepare dataloader for validation data
'''
dataset = SubgraphDataset(self.val_sub_G, self.val_sub_G_label, self.val_cc_ids, \
self.val_N_border, self.val_neigh_pos_similarities, self.val_int_struc_similarities, \
self.val_bor_struc_similarities, self.multilabel, self.multilabel_binarizer)
loader = DataLoader(dataset, batch_size = self.hparams['batch_size'], shuffle=False, collate_fn=self._pad_collate)
return loader
def test_dataloader(self):
'''
Prepare dataloader for test data
'''
self.prepare_test_data()
dataset = SubgraphDataset(self.test_sub_G, self.test_sub_G_label, self.test_cc_ids, \
self.test_N_border, self.test_neigh_pos_similarities, self.test_int_struc_similarities, \
self.test_bor_struc_similarities, self.multilabel, self.multilabel_binarizer)
loader = DataLoader(dataset, batch_size = self.hparams['batch_size'], shuffle=False, collate_fn=self._pad_collate)
return loader
##################################################
# Optimization
def configure_optimizers(self):
'''
Set up Adam optimizer with specified learning rate
'''
optimizer = torch.optim.Adam(self.parameters(), lr=self.hparams['learning_rate'])
return optimizer
def backward(self, trainer, loss, optimizer, optimizer_idx):
loss.backward(retain_graph=True) | 64,806 | 54.676117 | 326 | py |
SubGNN | SubGNN-main/SubGNN/gamma.py | # General
import sys
import time
import numpy as np
# Pytorch & Networkx
import torch
import networkx as nx
# Dynamic time warping
from fastdtw import fastdtw
# Our methods
sys.path.insert(0, '..') # add config to path
import config
###########################################
# DTW of degree sequences
def get_degree_sequence(graph, nodes, degree_dict=None, internal=True):
'''
Returns the ordered degree sequence of a list of nodes
'''
# remove badding
nodes = nodes[nodes != config.PAD_VALUE].cpu().numpy()
subgraph = graph.subgraph(nodes)
internal_degree_seq = [degree for node, degree in list(subgraph.degree(nodes))]
# for the internal structure channel, the sorted internal degree sequence is used
if internal:
# return the internal degree sequence
internal_degree_seq.sort()
return internal_degree_seq
# for the border structure channel, the sorted external degree sequence is used
else:
# if we have the degree dict, use that instead of recomputing the degree of each node
if degree_dict == None:
graph_degree_seq = [degree for node, degree in list(graph.degree(nodes))]
else:
graph_degree_seq = [degree_dict[n-1] for n in nodes]
external_degree_seq = [full_degree - i_degree for full_degree, i_degree in zip(graph_degree_seq, internal_degree_seq)]
external_degree_seq.sort()
return external_degree_seq
def calc_dist(a, b):
return ((max(a,b) + 1)/(min(a,b) + 1)) - 1
def calc_dtw( component_degree, patch_degree):
'''
calculate dynamic time warping between the component degree sequence and the patch degree sequence
'''
dist, path = fastdtw(component_degree, patch_degree, dist=calc_dist)
return 1. / (dist + 1.)
| 1,810 | 28.209677 | 126 | py |
SubGNN | SubGNN-main/SubGNN/attention.py | import torch
from torch.nn.parameter import Parameter
# All of the below code is taken from AllenAI's AllenNLP library
def tiny_value_of_dtype(dtype: torch.dtype):
"""
Returns a moderately tiny value for a given PyTorch data type that is used to avoid numerical
issues such as division by zero.
This is different from `info_value_of_dtype(dtype).tiny` because it causes some NaN bugs.
Only supports floating point dtypes.
"""
if not dtype.is_floating_point:
raise TypeError("Only supports floating point dtypes.")
if dtype == torch.float or dtype == torch.double:
return 1e-13
elif dtype == torch.half:
return 1e-4
else:
raise TypeError("Does not support dtype " + str(dtype))
def masked_softmax(
vector: torch.Tensor, mask: torch.BoolTensor, dim: int = -1, memory_efficient: bool = False,
) -> torch.Tensor:
"""
`torch.nn.functional.softmax(vector)` does not work if some elements of `vector` should be
masked. This performs a softmax on just the non-masked portions of `vector`. Passing
`None` in for the mask is also acceptable; you'll just get a regular softmax.
`vector` can have an arbitrary number of dimensions; the only requirement is that `mask` is
broadcastable to `vector's` shape. If `mask` has fewer dimensions than `vector`, we will
unsqueeze on dimension 1 until they match. If you need a different unsqueezing of your mask,
do it yourself before passing the mask into this function.
If `memory_efficient` is set to true, we will simply use a very large negative number for those
masked positions so that the probabilities of those positions would be approximately 0.
This is not accurate in math, but works for most cases and consumes less memory.
In the case that the input vector is completely masked and `memory_efficient` is false, this function
returns an array of `0.0`. This behavior may cause `NaN` if this is used as the last layer of
a model that uses categorical cross-entropy loss. Instead, if `memory_efficient` is true, this function
will treat every element as equal, and do softmax over equal numbers.
"""
if mask is None:
result = torch.nn.functional.softmax(vector, dim=dim)
else:
while mask.dim() < vector.dim():
mask = mask.unsqueeze(1)
if not memory_efficient:
# To limit numerical errors from large vector elements outside the mask, we zero these out.
result = torch.nn.functional.softmax(vector * mask, dim=dim)
result = result * mask
result = result / (
result.sum(dim=dim, keepdim=True) + tiny_value_of_dtype(result.dtype)
)
else:
masked_vector = vector.masked_fill(~mask, min_value_of_dtype(vector.dtype))
result = torch.nn.functional.softmax(masked_vector, dim=dim)
return result
class Attention(torch.nn.Module):
"""
An `Attention` takes two inputs: a (batched) vector and a matrix, plus an optional mask on the
rows of the matrix. We compute the similarity between the vector and each row in the matrix,
and then (optionally) perform a softmax over rows using those computed similarities.
Inputs:
- vector: shape `(batch_size, embedding_dim)`
- matrix: shape `(batch_size, num_rows, embedding_dim)`
- matrix_mask: shape `(batch_size, num_rows)`, specifying which rows are just padding.
Output:
- attention: shape `(batch_size, num_rows)`.
# Parameters
normalize : `bool`, optional (default = `True`)
If true, we normalize the computed similarities with a softmax, to return a probability
distribution for your attention. If false, this is just computing a similarity score.
"""
def __init__(self, normalize: bool = True) -> None:
super().__init__()
self._normalize = normalize
def forward(
self, vector: torch.Tensor, matrix: torch.Tensor, matrix_mask: torch.BoolTensor = None
) -> torch.Tensor:
similarities = self._forward_internal(vector, matrix)
if self._normalize:
return masked_softmax(similarities, matrix_mask)
else:
return similarities
def _forward_internal(self, vector: torch.Tensor, matrix: torch.Tensor) -> torch.Tensor:
raise NotImplementedError
class DotProductAttention(Attention):
"""
Computes attention between a vector and a matrix using dot product.
Registered as an `Attention` with name "dot_product".
"""
def _forward_internal(self, vector: torch.Tensor, matrix: torch.Tensor) -> torch.Tensor:
return matrix.bmm(vector.unsqueeze(-1)).squeeze(-1)
class AdditiveAttention(Attention):
"""
Computes attention between a vector and a matrix using an additive attention function. This
function has two matrices `W`, `U` and a vector `V`. The similarity between the vector
`x` and the matrix `y` is computed as `V tanh(Wx + Uy)`.
This attention is often referred as concat or additive attention. It was introduced in
<https://arxiv.org/abs/1409.0473> by Bahdanau et al.
Registered as an `Attention` with name "additive".
# Parameters
vector_dim : `int`, required
The dimension of the vector, `x`, described above. This is `x.size()[-1]` - the length
of the vector that will go into the similarity computation. We need this so we can build
the weight matrix correctly.
matrix_dim : `int`, required
The dimension of the matrix, `y`, described above. This is `y.size()[-1]` - the length
of the vector that will go into the similarity computation. We need this so we can build
the weight matrix correctly.
normalize : `bool`, optional (default : `True`)
If true, we normalize the computed similarities with a softmax, to return a probability
distribution for your attention. If false, this is just computing a similarity score.
"""
def __init__(self, vector_dim: int, matrix_dim: int, normalize: bool = True) -> None:
super().__init__(normalize)
self._w_matrix = Parameter(torch.Tensor(vector_dim, vector_dim))
self._u_matrix = Parameter(torch.Tensor(matrix_dim, vector_dim))
self._v_vector = Parameter(torch.Tensor(vector_dim, 1))
self.reset_parameters()
def reset_parameters(self):
torch.nn.init.xavier_uniform_(self._w_matrix)
torch.nn.init.xavier_uniform_(self._u_matrix)
torch.nn.init.xavier_uniform_(self._v_vector)
def _forward_internal(self, vector: torch.Tensor, matrix: torch.Tensor) -> torch.Tensor:
intermediate = vector.matmul(self._w_matrix).unsqueeze(1) + matrix.matmul(self._u_matrix)
intermediate = torch.tanh(intermediate)
return intermediate.matmul(self._v_vector).squeeze(2)
| 6,880 | 47.801418 | 107 | py |
SubGNN | SubGNN-main/SubGNN/train.py | # General
import numpy as np
import random
import argparse
import tqdm
import pickle
import json
import joblib
import os
import time
import sys
import pathlib
import random
import string
# Pytorch
import torch
from torch.utils.data import DataLoader
from torch.nn.functional import one_hot
import pytorch_lightning as pl
from pytorch_lightning.loggers import TensorBoardLogger
from pytorch_lightning.callbacks import ModelCheckpoint
from pytorch_lightning.profiler import AdvancedProfiler
# Optuna
import optuna
from optuna.samplers import TPESampler
from optuna.integration import PyTorchLightningPruningCallback
# Our Methods
import SubGNN as md
sys.path.insert(0, '..') # add config to path
import config
'''
There are several options for running `train.py`:
(1) Specify a model path via restoreModelPath. This script will use the hyperparameters at that path to train a model.
(2) Specify opt_n_trials != None and restoreModelPath == None. This script will use the hyperparameter ranges set
in the `get_hyperparams_optuma` function to run optuna trials.
(3) Specify opt_n_trials == None and restoreModelPath == None. This script will use the hyperparameters in the
`get_hyperparams` function to train/test the model.
'''
###################################################
# Parse arguments
def parse_arguments():
'''
Collect and parse arguments to script
'''
parser = argparse.ArgumentParser(description="Learn subgraph embeddings")
parser.add_argument('-embedding_path', type=str, help='Directory where node embeddings are saved')
parser.add_argument('-subgraphs_path', type=str, help='File where subgraphs are saved')
parser.add_argument('-shortest_paths_path', type=str, help='File where subgraphs are saved')
parser.add_argument('-graph_path', type=str, help='File where graph is saved')
parser.add_argument('-similarities_path', type=str, help='File where graph is saved')
parser.add_argument('-task', type=str, help='Task name (e.g. hpo_metab)')
# Max Epochs
parser.add_argument("-max_epochs", type=int, default=None, help="Max number of epochs to train")
parser.add_argument("-seed", type=int, default=None, help="Random Seed")
# Log
parser.add_argument('-log_path', type=str, default=None, help='Place to store results. By default, use tensorboard directory unless -no_save.')
parser.add_argument('-no_save', action='store_true', help='Makes model not save any specifications.')
parser.add_argument('-print_train_times', action='store_true', help='Print train times.')
# Tensorboard Arguments
parser.add_argument('-tb_logging', action='store_true', help='Log to Tensorboard')
parser.add_argument('-tb_dir', type=str, default="tensorboard", help='Directory for Tensorboard Logs')
parser.add_argument('-tb_name', type=str, default="sg", help='Base Model Name for Tensorboard Log')
parser.add_argument('-tb_version', help='Version Name for Tensorboard Log. (By default, created automatically.)')
# Checkpoint
parser.add_argument('-no_checkpointing', action='store_true', help='Specifies not to do model checkpointing.')
parser.add_argument('-checkpoint_k', type=int, default=3, help='Frequency with which to save model checkpoints')
parser.add_argument('-monitor_metric', type=str, default='val_micro_f1', help='Metric to monitor for checkpointing/stopping')
# Optuma
parser.add_argument("-opt_n_trials", type=int, default=None, help="Number of optuma trials to run")
parser.add_argument("-opt_n_cores", type=int, default=-1, help="Number of cores (-1 = all available)")
parser.add_argument("-opt_prune", action='store_true', help="Prune trials early if not promising")
parser.add_argument("-grid_search", action='store_true', help="Grid search")
#Debug
parser.add_argument('-debug_mode', action='store_true', help='Plot gradients + GPU usage')
parser.add_argument('-subset_data', action='store_true', help='Subset data to one batch per dataset')
# Restore Model
parser.add_argument('-restoreModelPath', type=str, default=None, help='Parent directory of model, hparams, kwargs')
parser.add_argument('-restoreModelName', type=str, default=None, help='Name of model to restore')
# Test set
parser.add_argument('-runTest', action='store_true', help='Run on the test set')
parser.add_argument('-noTrain', action='store_true', help='No training')
args = parser.parse_args()
return args
###################################################
# Set Hyperparameters
# TODO: change the values here if you run this script
def get_hyperparams(args):
'''
You, the user, should change these hyperparameters to best suit your model/run
NOTE: These hyperparameters are only used if args.opt_n_trials is None and restoreModelPath is None
'''
hyperparameters = {
"max_epochs": 200,
"use_neighborhood": True,
"use_structure": True,
"use_position": True,
"seed": 3,
"node_embed_size": 128,
"structure_patch_type": "triangular_random_walk",
"lstm_aggregator": "last",
"n_processes": 4,
"resample_anchor_patches": False,
"freeze_node_embeds": False,
"use_mpn_projection": True,
"print_train_times": False,
"compute_similarities": False,
"sample_walk_len": 50,
"n_triangular_walks": 5,
"random_walk_len": 10,
"rw_beta": 0.65,
"set2set": False,
"ff_attn": False,
"batch_size": 64,
"learning_rate": 0.00025420762516423353,
"grad_clip": 0.2160947806012501,
"n_layers": 1,
"neigh_sample_border_size": 1,
"n_anchor_patches_pos_out": 123,
"n_anchor_patches_pos_in": 34,
"n_anchor_patches_N_in": 19,
"n_anchor_patches_N_out": 69,
"n_anchor_patches_structure": 37,
"linear_hidden_dim_1": 64,
"linear_hidden_dim_2": 32,
"lstm_dropout": 0.21923625197416907,
"lstm_n_layers": 2,
"lin_dropout": 0.04617609616314509,
"cc_aggregator": "max",
"trainable_cc": True,
"auto_lr_find": True
}
return hyperparameters
def get_hyperparams_optuma(args, trial):
'''
If you specify args.opt_n_trials != None (and restoreModelPath == None), then the script will use the hyperparameter ranges
specified here to train/test the model
'''
hyperparameters={'seed': 42,
'batch_size': trial.suggest_int('batch_size', 64,150),
'learning_rate': trial.suggest_float('learning_rate', 1e-5, 1e-3, log=True), #learning rate
'grad_clip': trial.suggest_float('grad_clip', 0, 0.5), #gradient clipping
'max_epochs': args.max_epochs, #max number of epochs
'node_embed_size': 32, # dim of node embedding
'n_layers': trial.suggest_int('gamma_shortest_max_distance_N', 1,5), # number of layers
'n_anchor_patches_pos_in': trial.suggest_int('n_anchor_patches_pos_in', 25, 75), # number of anchor patches (P, INTERNAL)
'n_anchor_patches_pos_out': trial.suggest_int('n_anchor_patches_pos_out', 50, 200), # number of anchor patches (P, BORDER)
'n_anchor_patches_N_in': trial.suggest_int('n_anchor_patches_N_in', 10, 25), # number of anchor patches (N, INTERNAL)
'n_anchor_patches_N_out': trial.suggest_int('n_anchor_patches_N_out', 25, 75), # number of anchor patches (N, BORDER)
'n_anchor_patches_structure': trial.suggest_int('n_anchor_patches_structure', 15, 40), # number of anchor patches (S, INTERNAL & BORDER)
'neigh_sample_border_size': trial.suggest_int('neigh_sample_border_size', 1,2),
'linear_hidden_dim_1': trial.suggest_int('linear_hidden_dim', 16, 96),
'linear_hidden_dim_2': trial.suggest_int('linear_hidden_dim', 16, 96),
'n_triangular_walks': trial.suggest_int('n_triangular_walks', 5, 15),
'random_walk_len': trial.suggest_int('random_walk_len', 18, 26),
'sample_walk_len': trial.suggest_int('sample_walk_len', 18, 26),
'rw_beta': trial.suggest_float('rw_beta', 0.1, 0.9), #triangular random walk parameter, beta
'lstm_aggregator': 'last',
'lstm_dropout': trial.suggest_float('lstm_dropout', 0.0, 0.4),
'lstm_n_layers': trial.suggest_int('lstm_n_layers', 1, 2), #number of layers in LSTM used for embedding structural anchor patches
'n_processes': 4, # multiprocessing
'lin_dropout': trial.suggest_float('lin_dropout', 0.0, 0.6),
'resample_anchor_patches': False,
'compute_similarities': False,
'use_mpn_projection':True,
'use_neighborhood': True,
'use_structure': False,
'use_position': False,
'cc_aggregator': trial.suggest_categorical('cc_aggregator', ['sum', 'max']), #approach for aggregating node embeddings in components
'trainable_cc': trial.suggest_categorical('trainable_cc', [True, False]),
'freeze_node_embeds':False,
'print_train_times':args.print_train_times
}
return hyperparameters
###################################################
def get_paths(args, hyperparameters):
'''
Returns the paths to data (subgraphs, embeddings, similarity calculations, etc)
'''
if args.task is not None:
task = args.task
embedding_type = hyperparameters['embedding_type']
# paths to subgraphs, edge list, and shortest paths between all nodes in the graph
subgraphs_path = os.path.join(task, "subgraphs.pth")
graph_path = os.path.join(task, "edge_list.txt")
shortest_paths_path = os.path.join(task, "shortest_path_matrix.npy")
degree_sequence_path = os.path.join(task, "degree_sequence.txt")
ego_graph_path = os.path.join(task, "ego_graphs.txt")
#directory where similarity calculations will be stored
similarities_path = os.path.join(task, "similarities/")
# get location of node embeddings
if embedding_type == 'gin':
embedding_path = os.path.join(task, "gin_embeddings.pth")
elif embedding_type == 'graphsaint':
embedding_path = os.path.join(task, "graphsaint_gcn_embeddings.pth")
else:
raise NotImplementedError
return graph_path, subgraphs_path, embedding_path, similarities_path, shortest_paths_path, degree_sequence_path, ego_graph_path
else:
return args.graph_path, args.subgraphs_path, args.embedding_path, args.similarities_path, args.shortest_paths_path, args.degree_sequence_path, args.ego_graph_path
def build_model(args, trial = None):
'''
Creates SubGNN from the hyperparameters specifid in either (1) restoreModelPath, (2) get_hyperparams_optuma, or (3) get_hyperparams
'''
#get hyperparameters
if args.restoreModelPath is not None: # load in hyperparameters from file
print("Loading Hyperparams")
with open(os.path.join(args.restoreModelPath, "hyperparams.json")) as data_file:
hyperparameters = json.load(data_file)
if args.max_epochs:
hyperparameters['max_epochs'] = args.max_epochs
elif trial is not None: #select hyperparams from ranges specified in trial
hyperparameters = get_hyperparams_optuma(args, trial)
else: #get hyperparams from passed in args
hyperparameters = get_hyperparams(args)
# set subset_data
if args.subset_data:
hyperparameters['subset_data'] = True
# set seed
if hasattr(args,"seed") and args.seed is not None:
hyperparameters['seed'] = args.seed
# set for reproducibility
torch.manual_seed(hyperparameters['seed'])
np.random.seed(hyperparameters['seed'])
torch.cuda.manual_seed(hyperparameters['seed'])
torch.cuda.manual_seed_all(hyperparameters['seed'])
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
# get locations of file paths & instantiate model
graph_path, subgraphs_path, embedding_path, similarities_path, shortest_paths_path, degree_dict_path, ego_graph_path = get_paths(args, hyperparameters)
model = md.SubGNN(hyperparameters, graph_path, subgraphs_path, embedding_path, similarities_path, shortest_paths_path, degree_dict_path, ego_graph_path)
# Restore Previous Weights, if relevant
if args.restoreModelName:
checkpoint_path = os.path.join(args.restoreModelPath, args.restoreModelName)
if not torch.cuda.is_available():
checkpoint = torch.load(checkpoint_path, torch.device('cpu') )
else:
checkpoint = torch.load(checkpoint_path)
model_dict = model.state_dict()
pretrain_dict = {k: v for k, v in checkpoint['state_dict'].items() if k in model_dict}
model.load_state_dict(pretrain_dict)
return model, hyperparameters
def build_trainer(args, hyperparameters, trial = None):
'''
Set up optuna trainer
'''
if 'progress_bar_refresh_rate' in hyperparameters:
p_refresh = hyperparameters['progress_bar_refresh_rate']
else:
p_refresh = 5
# set epochs, gpus, gradient clipping, etc.
# if 'no_gpu' in run config, then use CPU
trainer_kwargs={'max_epochs': hyperparameters['max_epochs'],
"gpus":1,
"num_sanity_val_steps":0,
"progress_bar_refresh_rate":p_refresh,
"gradient_clip_val": hyperparameters['grad_clip']
}
# set auto learning rate finder param
if 'auto_lr_find' in hyperparameters and hyperparameters['auto_lr_find']:
trainer_kwargs['auto_lr_find'] = hyperparameters['auto_lr_find']
# Create tensorboard logger
if not args.no_save and args.tb_logging:
lgdir = os.path.join(args.tb_dir, args.tb_name)
if not os.path.exists(lgdir):
os.makedirs(lgdir)
if args.tb_version is not None:
tb_version = args.tb_version
else:
tb_version = "version_"+ str(random.randint(0, 10000000))
logger = TensorBoardLogger(args.tb_dir, name=args.tb_name, version=tb_version)
if not os.path.exists(logger.log_dir):
os.makedirs(logger.log_dir)
print("Tensorboard logging at ", logger.log_dir)
trainer_kwargs["logger"] = logger
# set up model saving
results_path = None
if not args.no_save:
if args.log_path:
results_path = args.log_path
elif args.tb_logging:
results_path = logger.log_dir
else:
raise Exception('No results path has been specified.')
if (not args.no_save) and (not args.no_checkpointing):
trainer_kwargs["checkpoint_callback"] = ModelCheckpoint(
filepath= os.path.join(results_path, "{epoch}-{val_micro_f1:.2f}-{val_acc:.2f}-{val_auroc:.2f}"),
save_top_k = args.checkpoint_k,
verbose=True,
monitor=args.monitor_metric,
mode='max'
)
if trial is not None and args.opt_prune:
trainer_kwargs['early_stop_callback'] = PyTorchLightningPruningCallback(trial, monitor=args.monitor_metric)
# enable debug mode
if args.debug_mode:
print("\n**** DEBUG MODE ON! ****\n")
trainer_kwargs["track_grad_norm"] = 2
trainer_kwargs["log_gpu_memory"] = True
trainer_kwargs['print_nan_grads'] = False
if not args.no_save:
profile_path = os.path.join(results_path, "profiler.log")
print("Profiling to ", profile_path)
trainer_kwargs["profiler"] = AdvancedProfiler(output_filename=profile_path)
else:
trainer_kwargs["profiler"] = AdvancedProfiler()
# set GPU availability
if not torch.cuda.is_available():
trainer_kwargs['gpus'] = 0
trainer = pl.Trainer(**trainer_kwargs)
return trainer, trainer_kwargs, results_path
def train_model(args, trial = None):
'''
Train a single model whose hyperparameters are specified in the run config
Returns the max (or min) metric specified by 'monitor_metric' in the run config
'''
model, hyperparameters = build_model(args, trial)
trainer, trainer_kwargs, results_path = build_trainer(args, hyperparameters, trial)
random.seed(hyperparameters['seed'])
# save hyperparams and trainer kwargs to file
if results_path is not None:
hparam_file = open(os.path.join(results_path, "hyperparams.json"),"w")
hparam_file.write(json.dumps(hyperparameters, indent=4))
hparam_file.close()
tkwarg_file = open(os.path.join(results_path, "trainer_kwargs.json"),"w")
pop_keys = [key for key in ['logger','profiler','early_stop_callback','checkpoint_callback'] if key in trainer_kwargs.keys()]
[trainer_kwargs.pop(key) for key in pop_keys]
tkwarg_file.write(json.dumps(trainer_kwargs, indent=4))
tkwarg_file.close()
# optionally train the model
if not args.noTrain:
trainer.fit(model)
# optionally test the model
if args.runTest or args.noTrain:
# reproducibility
torch.manual_seed(hyperparameters['seed'])
np.random.seed(hyperparameters['seed'])
torch.cuda.manual_seed(hyperparameters['seed'])
torch.cuda.manual_seed_all(hyperparameters['seed'])
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
if not args.no_checkpointing:
for file in os.listdir(results_path):
if file.endswith(".ckpt") and file.startswith("epoch"):
print(f"Loading model {file}")
if not torch.cuda.is_available():
checkpoint = torch.load(os.path.join(results_path, file), torch.device('cpu') )
else:
checkpoint = torch.load(os.path.join(results_path, file))
model_dict = model.state_dict()
pretrain_dict = {k: v for k, v in checkpoint['state_dict'].items() if k in model_dict}
model.load_state_dict(pretrain_dict)
trainer.test(model)
# save results
if results_path is not None:
scores_file = open(os.path.join(results_path, "final_metric_scores.json"),"w")
results_serializable = {k:float(v) for k,v in model.metric_scores[-1].items()}
scores_file.write(json.dumps(results_serializable, indent=4))
scores_file.close()
if args.runTest:
scores_file = open(os.path.join(results_path, "test_results.json"),"w")
results_serializable = {k:float(v) for k,v in model.test_results.items()}
scores_file.write(json.dumps(results_serializable, indent=4))
scores_file.close()
# print results
if args.runTest:
print(model.test_results)
return model.test_results
elif args.noTrain:
print(model.test_results)
return model.test_results
else:
all_scores = [score[args.monitor_metric].numpy() for score in model.metric_scores]
if args.monitor_metric == "val_loss":
return(np.min(all_scores))
else:
return(np.max(all_scores))
def main(args):
torch.autograd.set_detect_anomaly(True)
# specify tensorboard directory
if args.tb_dir is not None:
args.tb_dir = os.path.join(config.PROJECT_ROOT, args.tb_dir)
# if args.opt_n_trials is None, then we use either read in hparams from file or use the hyperparameters in get_hyperparams
if args.opt_n_trials is None:
return train_model(args)
else:
print(f'Running {args.opt_n_trials} Trials of optuna')
if args.opt_prune:
pruner = optuna.pruners.MedianPruner()
else:
pruner = None
if args.monitor_metric == 'val_loss':
direction = "minimize"
else:
direction = "maximize"
if args.log_path:
study_path = args.log_path
elif args.tb_logging:
study_path = os.path.join(args.tb_dir, args.tb_name)
print("Logging to ", study_path)
db_file = os.path.join(study_path, 'optuma_study_sqlite.db')
pathlib.Path(study_path).mkdir(parents=True, exist_ok=True)
# set up optuna study
if args.grid_search:
search_space = {
'neigh_sample_border_size': [1,2],
'gamma_shortest_max_distance_P': [3,4,5,6]
}
sampler = optuna.samplers.GridSampler(search_space)
else:
sampler = optuna.samplers.RandomSampler()
study = optuna.create_study(direction=direction,
sampler=sampler,
pruner=pruner,
storage= 'sqlite:///' + db_file,
study_name=study_path,
load_if_exists=True)
study.optimize(lambda trial: train_model(args, trial), n_trials=args.opt_n_trials, n_jobs = args.opt_n_cores)
optuma_results_path = os.path.join(study_path, 'optuna_study.pkl')
print("Saving Study Results to", optuma_results_path)
joblib.dump(study, optuma_results_path)
print(study.best_params)
if __name__ == "__main__":
args = parse_arguments()
main(args)
| 21,662 | 42.5 | 170 | py |
SubGNN | SubGNN-main/prepare_dataset/train_node_emb.py | # General
import numpy as np
import random
import argparse
import os
import config_prepare_dataset as config
import preprocess
import model as mdl
import utils
# Pytorch
import torch
from torch_geometric.utils.convert import to_networkx, to_scipy_sparse_matrix
from torch_geometric.data import Data, DataLoader, NeighborSampler
if config.MINIBATCH == "GraphSaint": from torch_geometric.data import GraphSAINTRandomWalkSampler
from torch_geometric.utils import negative_sampling
# Global Variables
log_f = open(str(config.DATASET_DIR / "node_emb.log"), "w")
all_data = None
device = None
best_val_acc = -1
best_embeddings = None
best_model = None
all_losses = {}
eps = 10e-4
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print('Using device:', device)
best_hyperparameters = dict()
if device.type == 'cuda': print(torch.cuda.get_device_name(0))
if config.MINIBATCH == "NeighborSampler":
all_hyperparameters = {'batch_size': config.POSSIBLE_BATCH_SIZES, 'hidden': config.POSSIBLE_HIDDEN, 'output': config.POSSIBLE_OUTPUT, 'lr': config.POSSIBLE_LR, 'wd': config.POSSIBLE_WD, 'nb_size': config.POSSIBLE_NB_SIZE, 'dropout': config.POSSIBLE_DROPOUT}
curr_hyperparameters = {'batch_size': config.POSSIBLE_BATCH_SIZES[0], 'hidden': config.POSSIBLE_HIDDEN[0], 'output': config.POSSIBLE_OUTPUT[0], 'lr': config.POSSIBLE_LR[0], 'wd': config.POSSIBLE_WD[0], 'nb_size': config.POSSIBLE_NB_SIZE[0], 'dropout': config.POSSIBLE_DROPOUT[0]}
elif config.MINIBATCH == "GraphSaint":
all_hyperparameters = {'batch_size': config.POSSIBLE_BATCH_SIZES, 'hidden': config.POSSIBLE_HIDDEN, 'output': config.POSSIBLE_OUTPUT, 'lr': config.POSSIBLE_LR, 'wd': config.POSSIBLE_WD, 'walk_length': config.POSSIBLE_WALK_LENGTH, 'num_steps': config.POSSIBLE_NUM_STEPS, 'dropout': config.POSSIBLE_DROPOUT}
curr_hyperparameters = {'batch_size': config.POSSIBLE_BATCH_SIZES[0], 'hidden': config.POSSIBLE_HIDDEN[0], 'output': config.POSSIBLE_OUTPUT[0], 'lr': config.POSSIBLE_LR[0], 'wd': config.POSSIBLE_WD[0], 'walk_length': config.POSSIBLE_WALK_LENGTH[0], 'num_steps': config.POSSIBLE_NUM_STEPS[0], 'dropout': config.POSSIBLE_DROPOUT[0]}
def train(epoch, model, optimizer):
global all_data, best_val_acc, best_embeddings, best_model, curr_hyperparameters, best_hyperparameters
# Save predictions
total_loss = 0
roc_val = []
ap_val = []
f1_val = []
acc_val = []
# Minibatches
if config.MINIBATCH == "NeighborSampler":
loader = NeighborSampler(all_data.edge_index, sizes = [curr_hyperparameters['nb_size']], batch_size = curr_hyperparameters['batch_size'], shuffle = True)
elif config.MINIBATCH == "GraphSaint":
all_data.num_classes = torch.tensor([2])
loader = GraphSAINTRandomWalkSampler(all_data, batch_size=curr_hyperparameters['batch_size'], walk_length=curr_hyperparameters['walk_length'], num_steps=curr_hyperparameters['num_steps'])
# Iterate through minibatches
for data in loader:
if config.MINIBATCH == "NeighborSampler": data = preprocess.set_data(data, all_data, config.MINIBATCH)
curr_train_pos = data.edge_index[:, data.train_mask]
curr_train_neg = negative_sampling(curr_train_pos, num_neg_samples=curr_train_pos.size(1) // 4)
curr_train_total = torch.cat([curr_train_pos, curr_train_neg], dim=-1)
data.y = torch.zeros(curr_train_total.size(1)).float()
data.y[:curr_train_pos.size(1)] = 1.
# Perform training
data.to(device)
optimizer.zero_grad()
out = model(data.x, data.edge_index)
curr_dot_embed = utils.el_dot(out, curr_train_total)
loss = utils.calc_loss_both(data, curr_dot_embed)
if torch.isnan(loss) == False:
total_loss += loss
loss.backward()
optimizer.step()
curr_train_pos_mask = torch.zeros(curr_train_total.size(1)).bool()
curr_train_pos_mask[:curr_train_pos.size(1)] = 1
curr_train_neg_mask = (curr_train_pos_mask == 0)
roc_score, ap_score, train_acc, train_f1 = utils.calc_roc_score(pred_all = curr_dot_embed.T[1], pos_edges = curr_train_pos_mask, neg_edges = curr_train_neg_mask)
print(">>>>>>Train: (ROC) ", roc_score, " (AP) ", ap_score, " (ACC) ", train_acc, " (F1) ", train_f1)
curr_val_pos = data.edge_index[:, data.val_mask]
curr_val_neg = negative_sampling(curr_val_pos, num_neg_samples=curr_val_pos.size(1) // 4)
curr_val_total = torch.cat([curr_val_pos, curr_val_neg], dim=-1)
curr_val_pos_mask = torch.zeros(curr_val_total.size(1)).bool()
curr_val_pos_mask[:curr_val_pos.size(1)] = 1
curr_val_neg_mask = (curr_val_pos_mask == 0)
val_dot_embed = utils.el_dot(out, curr_val_total)
data.y = torch.zeros(curr_val_total.size(1)).float()
data.y[:curr_val_pos.size(1)] = 1.
roc_score, ap_score, val_acc, val_f1 = utils.calc_roc_score(pred_all = val_dot_embed.T[1], pos_edges = curr_val_pos_mask, neg_edges = curr_val_neg_mask)
roc_val.append(roc_score)
ap_val.append(ap_score)
acc_val.append(val_acc)
f1_val.append(val_f1)
res = "\t".join(["Epoch: %04d" % (epoch + 1), "train_loss = {:.5f}".format(total_loss), "val_roc = {:.5f}".format(np.mean(roc_val)), "val_ap = {:.5f}".format(np.mean(ap_val)), "val_f1 = {:.5f}".format(np.mean(f1_val)), "val_acc = {:.5f}".format(np.mean(acc_val))])
print(res)
log_f.write(res + "\n")
# Save best model and parameters
if best_val_acc <= np.mean(acc_val) + eps:
best_val_acc = np.mean(acc_val)
with open(str(config.DATASET_DIR / "best_model.pth"), 'wb') as f:
torch.save(model.state_dict(), f)
best_hyperparameters = curr_hyperparameters
best_model = model
return total_loss
def test(model):
global all_data, best_embeddings, best_hyperparameters, all_losses
model.load_state_dict(torch.load(str(config.DATASET_DIR / "best_model.pth")))
model.to(device)
model.eval()
test_pos = all_data.edge_index[:, all_data.test_mask]
test_neg = negative_sampling(test_pos, num_neg_samples=test_pos.size(1) // 4)
test_total = torch.cat([test_pos, test_neg], dim=-1)
test_pos_edges = torch.zeros(test_total.size(1)).bool()
test_pos_edges[:test_pos.size(1)] = 1
test_neg_edges = (test_pos_edges == 0)
dot_embed = utils.el_dot(best_embeddings, test_total, test = True)
roc_score, ap_score, test_acc, test_f1 = utils.calc_roc_score(pred_all = dot_embed, pos_edges = test_pos_edges.flatten(), neg_edges = test_neg_edges.flatten(), loss = all_losses, save_plots = config.DATASET_DIR / "train_plots.pdf")
print('Test ROC score: {:.5f}'.format(roc_score))
print('Test AP score: {:.5f}'.format(ap_score))
print('Test Accuracy: {:.5f}'.format(test_acc))
print('Test F1 score: {:.5f}'.format(test_f1))
log_f.write('Test ROC score: {:.5f}\n'.format(roc_score))
log_f.write('Test AP score: {:.5f}\n'.format(ap_score))
log_f.write('Test Accuracy: {:.5f}\n'.format(test_acc))
log_f.write('Test F1 score: {:.5f}\n'.format(test_f1))
def generate_emb():
global all_data, best_embeddings, best_model, all_hyperparameters, curr_hyperparameters, best_hyperparameters, all_losses, device
all_data = preprocess.read_graphs(str(config.DATASET_DIR / "edge_list.txt"))
# Iterate through hyperparameter type (shuffled)
shuffled_param_type = random.sample(all_hyperparameters.keys(), len(all_hyperparameters.keys()))
for param_type in shuffled_param_type:
# Iterate through hyperparameter values of the specified type (shuffled)
shuffled_param_val = random.sample(all_hyperparameters[param_type], len(all_hyperparameters[param_type]))
for param_val in shuffled_param_val:
# Initiate current hyperparameter
curr_hyperparameters[param_type] = param_val
print(curr_hyperparameters)
log_f.write(str(curr_hyperparameters) + "\n")
# Set up
model = mdl.TrainNet(all_data.x.shape[1], curr_hyperparameters['hidden'], curr_hyperparameters['output'], config.CONV.lower().split("_")[1], curr_hyperparameters['dropout']).to(device)
optimizer = torch.optim.Adam(model.parameters(), lr = curr_hyperparameters['lr'], weight_decay = curr_hyperparameters['wd'])
# Train model
model.train()
curr_losses = []
for epoch in range(config.EPOCHS):
loss = train(epoch, model, optimizer)
curr_losses.append(loss)
all_losses[";".join([str(v) for v in curr_hyperparameters.values()])] = curr_losses
# Set up for next hyperparameter
curr_hyperparameters[param_type] = best_hyperparameters[param_type]
print("Best Hyperparameters: ", best_hyperparameters)
print("Optimization finished!")
log_f.write("Best Hyperparameters: %s \n" % best_hyperparameters)
# Save best embeddings
device = torch.device('cpu')
best_model = best_model.to(device)
best_embeddings = utils.get_embeddings(best_model, all_data, device)
# Test
test(best_model)
# Save best embeddings
torch.save(best_embeddings, config.DATASET_DIR / (config.CONV.lower() + "_embeddings.pth"))
| 9,326 | 48.611702 | 334 | py |
SubGNN | SubGNN-main/prepare_dataset/utils.py | # General
import random
import numpy as np
# Pytorch
import torch
import torch.nn.functional as F
from torch.nn import Sigmoid
from torch_geometric.data import Dataset
# Matplotlib
from matplotlib import pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
# Sci-kit Learn
from sklearn.metrics import roc_auc_score, average_precision_score, accuracy_score, f1_score, roc_curve, precision_recall_curve
# Global variables
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
def calc_loss_both(data, dot_pred):
"""
Calculate loss via link prediction
Args
- data (Data object): graph
- dot_pred (tensor long, shape=(nodes, classes)): predictions calculated from dot product
Return
- loss (float): loss
"""
loss = F.nll_loss(F.log_softmax(dot_pred.to(device), dim=-1), data.y.long())
loss.requires_grad = True
return loss
def el_dot(embed, edges, test=False):
"""
Calculate element-wise dot product for link prediction
Args
- embed (tensor): embedding
- edges (tensor): list of edges
Return
- tensor of element-wise dot product
"""
embed = embed.cpu().detach()
edges = edges.cpu().detach()
source = torch.index_select(embed, 0, edges[0, :])
target = torch.index_select(embed, 0, edges[1, :])
dots = torch.bmm(source.view(edges.shape[1], 1, embed.shape[1]), target.view(edges.shape[1], embed.shape[1], 1))
dots = torch.sigmoid(np.squeeze(dots))
if test: return dots
diff = np.squeeze(torch.ones((1, len(dots))) - dots)
return torch.stack((diff, dots), 1)
def calc_roc_score(pred_all, pos_edges=[], neg_edges=[], true_all=[], save_plots="", loss = [], multi_class=False, labels=[], multilabel=False):
"""
Calculate ROC score
Args
- pred_all
- pos_edges
- neg_edges
- true_all
- save_plots
- loss
- multi_class
- labels
- multilabel
Return
- roc_auc
- ap_score
- acc
- f1
"""
if multi_class:
if save_plots != "":
class_roc, class_ap, class_f1 = plot_roc_ap(true_all, pred_all, save_plots, loss = loss, labels = labels, multilabel = multilabel)
roc_auc = roc_auc_score(true_all, pred_all, multi_class = 'ovr')
if multilabel:
pred_all = (pred_all > 0.5)
else:
true_all = torch.argmax(true_all, axis = 1)
pred_all = torch.argmax(torch.tensor(pred_all), axis = 1)
f1_micro = f1_score(true_all, pred_all, average = "micro")
acc = accuracy_score(true_all, pred_all)
if save_plots != "": return roc_auc, acc, f1_micro, class_roc, class_ap, class_f1
return roc_auc, acc, f1_micro
else:
pred_pos = pred_all[pos_edges]
pred_neg = pred_all[neg_edges]
pred_all = torch.cat((pred_pos, pred_neg), 0).cpu().detach().numpy()
true_all = torch.cat((torch.ones(len(pred_pos)), torch.zeros(len(pred_neg))), 0).cpu().detach().numpy()
roc_auc = roc_auc_score(true_all, pred_all)
ap_score = average_precision_score(true_all, pred_all)
acc = accuracy_score(true_all, (pred_all > 0.5))
f1 = f1_score(true_all, (pred_all > 0.5))
if save_plots != "": plot_roc_ap(true_all, pred_all, save_plots, loss, multilabel = multilabel)
return roc_auc, ap_score, acc, f1
def plot_roc_ap(y_true, y_pred, save_plots, loss = {}, labels = [], multilabel = False):
with PdfPages(save_plots) as pdf:
# ROC
fpr = dict()
tpr = dict()
roc = dict()
if len(labels) > 0: # Multiclass classification
for c in range(y_true.shape[1]):
fpr[c], tpr[c], _ = roc_curve(y_true[:, c], y_pred[:, c])
roc[c] = roc_auc_score(y_true[:, c], y_pred[:, c])
plt.plot(fpr[c], tpr[c], label = str(labels[c]) + " (area = {:.5f})".format(roc[c]))
print("[ROC] " + str(labels[c]) + ": {:.5f}".format(roc[c]))
else: # Binary classification
fpr, tpr, _ = roc_curve(y_true, y_pred)
roc = roc_auc_score(y_true, y_pred)
plt.plot(fpr, tpr, label = "ROC = {:.5f}".format(roc))
plt.plot([0, 1], [0, 1], linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel("False Positive Rate")
plt.ylabel("True Positive Rate")
plt.legend(loc="best")
plt.title("ROC")
pdf.savefig()
plt.close()
# Precision-Recall curve
precision = dict()
recall = dict()
ap = dict()
if len(labels) > 0: # Multiclass classification
for c in range(y_true.shape[1]):
precision[c], recall[c], _ = precision_recall_curve(y_true[:, c], y_pred[:, c])
ap[c] = average_precision_score(y_true[:, c], y_pred[:, c])
plt.plot(recall[c], precision[c], label = str(labels[c]) + " (area = {:.5f})".format(ap[c]))
print("[AP] " + str(labels[c]) + ": {:.5f}".format(ap[c]))
else: # Binary classification
precision, recall, _ = precision_recall_curve(y_true, y_pred)
ap = average_precision_score(y_true, y_pred)
n_true = sum(y_true)/len(y_true)
plt.plot(recall, precision, label = "AP = {:.5f}".format(ap))
plt.plot([0, 1], [n_true, n_true], linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel("Recall")
plt.ylabel("Precision")
if len(labels) > 0: plt.legend(loc="best")
plt.title("Precision-recall curve")
pdf.savefig()
plt.close()
# Loss
if len(loss) > 0:
max_epochs = max([len(l) for k, l in loss.items()])
for k, l in loss.items():
plt.plot(np.arange(max_epochs), l, label = k)
plt.xlabel("Epochs")
plt.ylabel("Loss")
plt.xlim([0, max_epochs])
plt.legend(loc="best")
plt.title("Training Loss")
pdf.savefig()
plt.close()
# F1 score
f1 = []
if len(labels) > 0: # Multiclass classification
if not multilabel:
y_true = torch.argmax(y_true, axis = 1)
y_pred = torch.argmax(torch.tensor(y_pred), axis = 1)
else: y_pred = (y_pred > 0.5)
f1 = f1_score(y_true, y_pred, range(len(labels)), average = None)
for c in range(len(f1)):
print("[F1] " + str(labels[c]) + ": {:.5f}".format(f1[c]))
return roc, ap, f1
######################################################
# Get best embeddings
#def get_embeddings(model, data_loader, device):
@torch.no_grad()
def get_embeddings(model, data, device):
"""
Get best embeddings
Args
- model (torch object): best model
- data (Data object): dataset
- device (torch object): cpu or cuda
Return
- all_emb (tensor): best embedding for all nodes
"""
model.eval()
data = data.to(device)
all_emb = model(data.x, data.edge_index)
print(all_emb.shape)
return all_emb
| 7,302 | 32.810185 | 144 | py |
SubGNN | SubGNN-main/prepare_dataset/model.py | # Pytorch
import torch
import torch.nn as nn
from torch.nn import Linear, LayerNorm, ReLU
from torch_geometric.nn import GINConv, GCNConv
import torch.nn.functional as F
# General
import numpy as np
import torch
import utils
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
class TrainNet(nn.Module):
def __init__(self, nfeat, nhid, nclass, conv_type, dropout):
super(TrainNet, self).__init__()
# GINConv
if conv_type == "gin":
nn1 = nn.Sequential(nn.Linear(nfeat, nhid))
self.conv1 = GINConv(nn1)
nn2 = nn.Sequential(nn.Linear(nhid, nclass))
self.conv2 = GINConv(nn2)
# GCNConv
if conv_type == "gcn":
self.conv1 = GCNConv(nfeat, nhid)
self.conv2 = GCNConv(nhid, nclass)
self.dropout = dropout
def forward(self, x, edge_index):
x = F.relu(self.conv1(x, edge_index))
x = F.dropout(x, p = self.dropout, training = self.training)
return self.conv2(x, edge_index)
| 1,045 | 27.27027 | 69 | py |
SubGNN | SubGNN-main/prepare_dataset/prepare_dataset.py | # General
import numpy as np
import random
import typing
import logging
from collections import Counter, defaultdict
import config_prepare_dataset as config
import os
if not os.path.exists(config.DATASET_DIR):
os.makedirs(config.DATASET_DIR)
import train_node_emb
# Pytorch
import torch
from torch_geometric.data import Data
from torch_geometric.utils import from_networkx
# NetworkX
import networkx as nx
from networkx.generators.random_graphs import barabasi_albert_graph, extended_barabasi_albert_graph
from networkx.generators.duplication import duplication_divergence_graph
class SyntheticGraph():
def __init__(self, base_graph_type: str, subgraph_type: str,
features_type: str, base_graph=None, feature_matrix=None, **kwargs):
self.base_graph_type = base_graph_type
self.subgraph_type = subgraph_type
self.features_type = features_type
self.graph = self.generate_base_graph(**kwargs)
self.subgraphs = self.generate_and_add_subgraphs(**kwargs)
self.subgraph_labels = self.generate_subgraph_labels(**kwargs)
self.feature_matrix = self.initialize_features(**kwargs)
def generate_base_graph(self, **kwargs):
"""
Generate the base graph.
Return
- G (networkx object): base graph
"""
if self.base_graph_type == 'barabasi_albert':
m = kwargs.get('m', 5)
n = kwargs.get('n', 500)
G = barabasi_albert_graph(n, m, seed=config.RANDOM_SEED)
elif self.base_graph_type == 'duplication_divergence_graph':
n = kwargs.get('n', 500)
p = kwargs.get('p', 0.5)
G = duplication_divergence_graph(n, p, seed=config.RANDOM_SEED)
else:
raise Exception('The base graph you specified is not implemented')
return G
def initialize_features(self, **kwargs):
"""
Initialize node features in base graph.
Return
- Numpy matrix
"""
n_nodes = len(self.graph.nodes)
if self.features_type == 'one_hot':
return np.eye(n_nodes, dtype=int)
elif self.features_type == 'constant':
n_features = kwargs.pop('n_features', 20)
return np.full((n_nodes, n_features), 1)
else:
raise Exception('The feature initialization you specified is not implemented')
def generate_and_add_subgraphs(self, **kwargs):
"""
Generate and add subgraphs to the base graph.
Return
- subgraphs (list of lists): list of subgraphs, where each subgraph is a list of nodes
"""
n_subgraphs = kwargs.pop('n_subgraphs', 3)
n_nodes_in_subgraph = kwargs.pop('n_subgraph_nodes', 5)
n_connected_components = kwargs.pop('n_connected_components', 1)
modify_graph_for_properties = kwargs.pop('modify_graph_for_properties', False)
desired_property = kwargs.get('desired_property', None)
if self.subgraph_type == 'random':
subgraphs = self._get_subgraphs_randomly(n_subgraphs, n_nodes_in_subgraph, **kwargs)
elif self.subgraph_type == 'bfs':
subgraphs = self._get_subgraphs_by_bfs(n_subgraphs, n_nodes_in_subgraph, n_connected_components, **kwargs)
elif self.subgraph_type == 'staple':
subgraphs = self._get_subgraphs_by_k_hops(n_subgraphs, n_nodes_in_subgraph, n_connected_components, **kwargs)
elif self.subgraph_type == 'plant':
if desired_property == 'coreness':
subgraphs = self._get_subgraphs_by_coreness(n_subgraphs, n_nodes_in_subgraph, n_connected_components, **kwargs)
else:
subgraphs = self._get_subgraphs_by_planting(n_subgraphs, n_nodes_in_subgraph, n_connected_components, **kwargs)
else:
raise Exception('The subgraph generation you specified is not implemented')
if modify_graph_for_properties:
self._modify_graph_for_desired_subgraph_properties(subgraphs, **kwargs)
self._relabel_nodes(subgraphs, **kwargs)
return subgraphs
def _get_subgraphs_randomly(self, n_subgraphs, n_nodes_in_subgraph, **kwargs):
"""
Randomly generates subgraphs of size n_nodes_in_subgraph
Args
- n_subgraphs (int): number of subgraphs
- n_nodes_in_subgraph (int): number of nodes in each subgraph
Return
- subgraphs (list of lists): list of subgraphs, where each subgraph is a list of nodes
"""
subgraphs = []
for s in range(n_subgraphs):
sampled_nodes = random.sample(self.graph.nodes, n_nodes_in_subgraph)
subgraphs.append(sampled_nodes)
return subgraphs
def staple_component_to_graph(self, n_nodes_in_subgraph, graph_root_node, **kwargs):
"""
Staple a connected component to a graph.
Args
- n_nodes_in_subgraph (int): number of nodes in each subgraph
- graph_root_node (int): node in the base graph that the component should be "stapled" to
Return
- cc_node_ids (list): nodes in a connected component
- cc_root_node (int): node in the connected component (subgraph) to connect with the graph_root_node
"""
# Create new connected component for the node in base graph
con_component = self.generate_subgraph(n_nodes_in_subgraph, **kwargs)
cc_node_ids = list(range(len(self.graph.nodes), len(self.graph.nodes) + n_nodes_in_subgraph ))
# Staple the connected component to the base graph
joined_graph = nx.disjoint_union(self.graph, con_component)
cc_root_node = random.sample(cc_node_ids, 1)[0]
joined_graph.add_edge(graph_root_node, cc_root_node)
self.graph = joined_graph.copy()
return cc_node_ids, cc_root_node
def _get_subgraphs_by_k_hops(self, n_subgraphs, n_nodes_in_subgraph, n_connected_components, **kwargs):
"""
Generate subgraphs that are k hops apart, staple each subgraph to the base graph by adding edge between a random node
from the subgraph and a random node from the base graph
Args
- n_subgraphs (int): number of subgraphs
- n_nodes_in_subgraph (int): number of nodes in each subgraph
- n_connected_components (int): number of connected components in each subgraph
Return
- validated_subgraphs (list of lists): list of subgraphs, where each subgraph is a list of nodes
"""
diameter = nx.diameter(self.graph)
k_hops_range = [int(diameter * k) for k in config.K_HOPS_RANGE]
p_range = [float(p) for p in config.BA_P_RANGE]
cc_range = [int(cc) for cc in config.CC_RANGE]
shuffle_cc = False
if n_connected_components == None: shuffle_cc = True
print("DIAMETER: ", diameter)
print("K-HOPS RANGE: ", k_hops_range)
print("N CONNECTED COMPONENTS: ", n_connected_components)
subgraphs = []
original_node_ids = self.graph.nodes
for s in range(n_subgraphs):
curr_subgraph = []
seen_nodes = []
all_cc_start_nodes = []
k_hops = random.sample(k_hops_range, 1)[0]
p = p_range[k_hops_range.index(k_hops)]
kwargs['p'] = p
# Randomly select a node from base graph
graph_root_node = random.sample(original_node_ids, 1)[0]
seen_nodes.append(graph_root_node)
cc_node_ids, cc_root_node = self.staple_component_to_graph(n_nodes_in_subgraph, graph_root_node, **kwargs)
curr_subgraph.extend(cc_node_ids)
seen_nodes.extend(cc_node_ids)
all_cc_start_nodes.append(cc_root_node) # keep track of start nodes across CCs
# Get nodes that are k hops away
n_hops_paths = nx.single_source_shortest_path_length(self.graph, graph_root_node, cutoff=k_hops)
candidate_nodes = [node for node in n_hops_paths if self.is_k_hops_from_all_cc(node, all_cc_start_nodes, k_hops) and node not in seen_nodes]
if len(candidate_nodes) == 0: candidate_nodes = [node for node, length in n_hops_paths.items() if length == max(n_hops_paths.values())]
if shuffle_cc: n_connected_components = random.sample(cc_range, 1)[0]
for c in range(n_connected_components - 1):
new_graph_root_node = random.sample(candidate_nodes, 1)[0] # choose a random node that is k hops away
seen_nodes.append(new_graph_root_node)
cc_node_ids, cc_root_node = self.staple_component_to_graph(n_nodes_in_subgraph, new_graph_root_node, **kwargs)
curr_subgraph.extend(cc_node_ids)
seen_nodes.extend(cc_node_ids)
all_cc_start_nodes.append(cc_root_node) # keep track of start nodes across CCs
if len(curr_subgraph) >= n_nodes_in_subgraph * n_connected_components:
actual_num_cc = nx.number_connected_components(self.graph.subgraph(curr_subgraph))
if shuffle_cc and actual_num_cc in config.CC_RANGE: subgraphs.append(curr_subgraph)
elif not shuffle_cc and actual_num_cc > 1: subgraphs.append(curr_subgraph) # must have >1 CC
# Validate that subgraphs have the desired number of CCs
validated_subgraphs = []
for s in subgraphs:
actual_num_cc = nx.number_connected_components(self.graph.subgraph(s))
if shuffle_cc and actual_num_cc in config.CC_RANGE: validated_subgraphs.append(s)
elif not shuffle_cc and actual_num_cc > 1: validated_subgraphs.append(s) # must have >1 CC
print(len(validated_subgraphs))
return validated_subgraphs
def _get_subgraphs_by_coreness(self, n_subgraphs, n_nodes_in_subgraph, n_connected_components, remove_edges=False, **kwargs):
"""
Sample nodes from the base graph that have at least n nodes with k core. Merge the edges from the generated
subgraph with the edges from the base graph. Optionally, remove all other edges in the subgraphs
Args
- n_subgraphs (int): number of subgraphs
- n_nodes_in_subgraph (int): number of nodes in each subgraph
- n_connected_components (int): number of connected components in each subgraph
- remove_edges (bool): true if should remove unmerged edges in subgraphs, false otherwise
Return
- subgraphs (list of lists): list of subgraphs, where each subgraph is a list of nodes
"""
subgraphs = []
k_core_dict = nx.core_number(self.graph)
nodes_per_k_core = Counter(list(k_core_dict.values()))
print(nodes_per_k_core)
nodes_with_core_number = defaultdict()
for n, k in k_core_dict.items():
if k in nodes_with_core_number: nodes_with_core_number[k].append(n)
else: nodes_with_core_number[k] = [n]
for k in nodes_with_core_number:
# Get nodes with core number k that have not been sampled already
nodes_with_k_cores = nodes_with_core_number[k]
# Sample n_subgraphs subgraphs per core number
for s in range(n_subgraphs):
curr_subgraph = []
for c in range(n_connected_components):
if len(nodes_with_k_cores) < n_nodes_in_subgraph: break
con_component = self.generate_subgraph(n_nodes_in_subgraph, **kwargs)
cc_node_ids = random.sample(nodes_with_k_cores, n_nodes_in_subgraph)
# Relabel subgraph to have the same ids as the randomly sampled nodes
cc_id_mapping = {curr_id:new_id for curr_id, new_id in zip(con_component.nodes, cc_node_ids)}
nx.relabel_nodes(con_component, cc_id_mapping, copy=False)
if remove_edges:
# Remove the existing edges between nodes in the planted subgraph (except the ones to be added)
self.graph.remove_edges_from(self.graph.subgraph(cc_node_ids).edges)
# Combine the base graph & subgraph. Nodes with the same ID are merged
joined_graph = nx.compose(self.graph, con_component) #NOTE: attributes from subgraph take precedent over attributes from self.graph
self.graph = joined_graph.copy()
curr_subgraph.extend(cc_node_ids) # add nodes to subgraph
nodes_with_k_cores = list(set(nodes_with_k_cores).difference(set(cc_node_ids)))
nodes_with_core_number[k] = nodes_with_k_cores
if len(curr_subgraph) > 0: subgraphs.append(curr_subgraph)
return subgraphs
def _get_subgraphs_by_bfs(self, n_subgraphs, n_nodes_in_subgraph, n_connected_components, **kwargs):
"""
Sample n_connected_components number of start nodes from the base graph. Perform BFS to create subgraphs
of size n_nodes_in_subgraph.
Args
- n_subgraphs (int): number of subgraphs
- n_nodes_in_subgraph (int): number of nodes in each subgraph
- n_connected_components (int): number of connected components in each subgraph
Return
- subgraphs (list of lists): list of subgraphs, where each subgraph is a list of nodes
"""
max_depth = kwargs.pop('max_depth', 3)
subgraphs = []
for s in range(n_subgraphs):
#randomly select start nodes. # of start nodes == n connected components
curr_subgraph = []
start_nodes = random.sample(self.graph.nodes, n_connected_components)
for start_node in start_nodes:
edges = nx.bfs_edges(self.graph, start_node, depth_limit=max_depth)
nodes = [start_node] + [v for u, v in edges]
nodes = nodes[:n_nodes_in_subgraph] #limit nodes to n_nodes_in_subgraph
if max(nodes) > max(self.graph.nodes): print(max(nodes), max(self.graph.nodes))
assert max(nodes) <= max(self.graph.nodes)
assert nx.is_connected(self.graph.subgraph(nodes)) #check to see if selected nodes represent a conencted component
curr_subgraph.extend(nodes)
subgraphs.append(curr_subgraph)
seen = []
for g in subgraphs:
seen += g
assert max(seen) <= max(self.graph.nodes)
return subgraphs
def generate_subgraph(self, n_nodes_in_subgraph, **kwargs):
"""
Generate a subgraph with specified properties.
Args
- n_nodes_in_subgraph (int): number of nodes in each subgraph
Return
- G (networkx object): subgraph
"""
subgraph_generator = kwargs.pop('subgraph_generator', 'path')
if subgraph_generator == 'cycle':
G = nx.cycle_graph(n_nodes_in_subgraph)
elif subgraph_generator == 'path':
G = nx.path_graph(n_nodes_in_subgraph)
elif subgraph_generator == 'house':
G = nx.house_graph()
elif subgraph_generator == 'complete':
G = nx.complete_graph(n_nodes_in_subgraph)
elif subgraph_generator == 'star':
G = nx.star_graph(n_nodes_in_subgraph)
elif subgraph_generator == 'barabasi_albert':
m = kwargs.get('m', 5)
G = barabasi_albert_graph(n_nodes_in_subgraph, m, seed=config.RANDOM_SEED)
elif subgraph_generator == 'extended_barabasi_albert':
m = kwargs.get('m', 5)
p = kwargs.get('p', 0.5)
q = kwargs.get('q', 0)
G = extended_barabasi_albert_graph(n_nodes_in_subgraph, m, p, q, seed=config.RANDOM_SEED)
elif subgraph_generator == 'duplication_divergence_graph':
p = kwargs.get('p', 0.5)
G = duplication_divergence_graph(n_nodes_in_subgraph, p)
else:
raise Exception('The subgraph generator you specified is not implemented.')
return G
def is_k_hops_away(self, start, end, n_hops):
"""
Check whether the start node is k hops away from the end node.
Args
- start (int): start node
- end (int): end node
- n_hops (int): k hops
Return
- True if the start node is k hops away from the end node, false otherwise
"""
shortest_path_lengh = nx.shortest_path_length(self.graph, start, end)
if shortest_path_lengh == n_hops:
return True
else:
return False
def is_k_hops_from_all_cc(self, cand, all_cc_start_nodes, k_hops):
"""
Check whether the candidate node is k hops away from all CC start nodes.
Args
- cand (int): candidate node
- all_cc_start_nodes (list): cc start nodes
- k_hops (int): k hops
Return
- True if the candidate node is k hops away from all CC start nodes, false otherwise
"""
for cc_start in all_cc_start_nodes:
if not self.is_k_hops_away(cc_start, cand, k_hops):
return False
return True
def _get_subgraphs_by_stapling(self, n_subgraphs, n_nodes_in_subgraph, n_connected_components, **kwargs):
"""
Generate n subgraphs, staple each subgraph to the base graph by adding an edge between random node
from the subgraph and a random node from the base graph.
Args
- n_subgraphs (int): number of subgraphs
- n_nodes_in_subgraph (int): number of nodes in each subgraph
- n_connected_components (int): number of connected components in each subgraph
Return
- subgraphs (list of lists): list of subgraphs, where each subgraph is a list of nodes
"""
k_core_to_sample = kwargs.pop('k_core_to_sample', -1)
k_hops = kwargs.pop('k_hops', -1)
subgraphs = []
original_node_ids = self.graph.nodes
for s in range(n_subgraphs):
curr_subgraph = []
all_cc_start_nodes = []
for c in range(n_connected_components):
con_component = self.generate_subgraph(n_nodes_in_subgraph, **kwargs)
graph_root_node = random.sample(original_node_ids, 1)[0]
if c > 0 and k_hops != -1:
# make sure to sample the next node k hops away from the previously sampled root node
# and check to see that the selected start node is k hops away from all previous start nodes
n_hops_paths = nx.single_source_shortest_path_length(self.graph, cc_root_node, cutoff=k_hops)
candidate_nodes = [node for node,length in n_hops_paths.items()]
random.shuffle(candidate_nodes)
candidate_nodes = [cand for cand in candidate_nodes if self.is_k_hops_from_all_cc(cand, all_cc_start_nodes, k_hops)]
if len(candidate_nodes) == 0:
raise Exception('There are no nodes that are k hops away from all other CC start nodes.')
cc_root_node = random.sample(candidate_nodes, 1)[0]
all_cc_start_nodes.append(cc_root_node) # keep track of start nodes across CCs
elif k_core_to_sample != -1:
k_core_dict = nx.core_number(self.graph)
nodes_with_core_number = [node for node, core_num in k_core_dict.items()if core_num == k_core_to_sample]
cc_root_node = random.sample(nodes_with_core_number, 1)[0]
all_cc_start_nodes.append(cc_root_node) # keep track of start nodes across CCs
else: # if we're not trying to sample each CC k hops away OR if it's the first time we sample a CC,
# just randomly sample a start node from the graph
#randomly sample root node where the CC will be attached
cc_node_ids = list(range(len(self.graph.nodes), len(self.graph.nodes) + n_nodes_in_subgraph ))
cc_root_node = random.sample(cc_node_ids, 1)[0]
all_cc_start_nodes.append(cc_root_node) # keep track of start nodes across CCs
#combine the generated subgraph & the graph
joined_graph = nx.disjoint_union(self.graph, con_component)
# add an edge between one node in the graph & subgraph
joined_graph.add_edge(graph_root_node, cc_root_node)
self.graph = joined_graph.copy()
#add connected component to IDs for current subgraph
curr_subgraph.extend(cc_node_ids)
subgraphs.append(curr_subgraph)
return subgraphs
def _get_subgraphs_by_planting(self, n_subgraphs, n_nodes_in_subgraph, n_connected_components, remove_edges=False, **kwargs):
"""
Randomly sample nodes from base graph that will be in each subgraph. Merge the edges from the generated
subgraph with the edges from the base graph. Optionally, remove all other edges in the subgraphs
Args
- n_subgraphs (int): number of subgraphs
- n_nodes_in_subgraph (int): number of nodes in each subgraph
- n_connected_components (int): number of connected components in each subgraph
- remove_edges (bool): true if should remove unmerged edges in subgraphs, false otherwise
Return
- subgraphs (list of lists): list of subgraphs, where each subgraph is a list of nodes
"""
k_core_to_sample = kwargs.pop('k_core_to_sample', -1)
subgraphs = []
for s in range(n_subgraphs):
curr_subgraph = []
for c in range(n_connected_components):
con_component = self.generate_subgraph(n_nodes_in_subgraph, **kwargs)
#randomly sample which nodes from the base graph will be the subgraph
if k_core_to_sample != -1:
k_core_dict = nx.core_number(self.graph)
nodes_with_core_number = [node for node, core_num in k_core_dict.items()if core_num == k_core_to_sample]
cc_node_ids = random.sample(nodes_with_core_number, n_nodes_in_subgraph)
else:
cc_node_ids = random.sample(self.graph.nodes, n_nodes_in_subgraph)
#relabel subgraph to have the same ids as the randomly sampled nodes
cc_id_mapping = {curr_id:new_id for curr_id, new_id in zip(con_component.nodes, cc_node_ids)}
nx.relabel_nodes(con_component, cc_id_mapping, copy=False)
if remove_edges:
#remove the existing edges between nodes in the planted subgraph (except the ones to be added)
self.graph.remove_edges_from(self.graph.subgraph(cc_node_ids).edges)
# combine the base graph & subgraph. Nodes with the same ID are merged
joined_graph = nx.compose(self.graph, con_component) #NOTE: attributes from subgraph take precedent over attributes from self.graph
self.graph = joined_graph.copy()
curr_subgraph.extend(cc_node_ids)
subgraphs.append(curr_subgraph)
return subgraphs
def _get_property(self, subgraph, subgraph_property):
"""
Compute the value of a specified property.
Args
- subgraph (networkx object): subgraph
- subgraph_property (str): desired property of subgraph
Return
- Value of subgraph
"""
if subgraph_property == 'density':
return nx.density(subgraph)
elif subgraph_property == 'cut_ratio':
nodes_except_subgraph = set(self.graph.nodes).difference(set(subgraph.nodes))
n_boundary_edges = len(list(nx.edge_boundary(self.graph, subgraph.nodes, nodes_except_subgraph)))
n_nodes = len(list(self.graph.nodes))
n_sugraph_nodes = len(list(subgraph.nodes))
return n_boundary_edges / (n_sugraph_nodes * (n_nodes - n_sugraph_nodes))
elif subgraph_property == 'coreness':
all_cores = nx.core_number(subgraph)
avg_coreness = np.average(list(all_cores.values()))
return avg_coreness
elif subgraph_property == 'cc':
return nx.number_connected_components(self.graph.subgraph(subgraph))
else:
raise Exception('The subgraph property you specificed is not implemented.')
def _modify_graph_for_desired_subgraph_properties(self, subgraphs, **kwargs):
"""
Modify the graph to achieve the desired subgraph property.
Args
- subgraphs (list of lists): list of subgraphs, where each subgraph is a list of nodes
"""
desired_property = kwargs.get('desired_property', 'density')
# Iterate through subgraphs
for s in subgraphs:
subgraph = self.graph.subgraph(s)
# DENSITY
if desired_property == 'density':
# Randomly select a density value
desired_prop_value = random.sample(config.DENSITY_RANGE, 1)[0]
n_tries = 0
while True:
curr_subg_property = self._get_property(subgraph, desired_property)
if abs(curr_subg_property - desired_prop_value) < config.DENSITY_EPSILON: break
if n_tries >= config.MAX_TRIES: break
if curr_subg_property > desired_prop_value: #remove edges to decrease density
sampled_edge = random.sample(subgraph.edges, 1)[0]
self.graph.remove_edge(*sampled_edge)
else: # add edges to increase density
sampled_nodes = random.sample(subgraph.nodes, 2)
self.graph.add_edge(*sampled_nodes)
n_tries += 1
# CUT RATIO
elif desired_property == 'cut_ratio':
# Randomly select a cut ratio value
desired_prop_value = random.sample(config.CUT_RATIO_RANGE, 1)[0]
n_tries = 0
while True:
curr_subg_property = self._get_property(subgraph, desired_property)
if abs(curr_subg_property - desired_prop_value) < config.CUT_RATIO_EPSILON: break
if n_tries >= config.MAX_TRIES: break
# get edges on boundary
nodes_except_subgraph = set(self.graph.nodes).difference(set(subgraph.nodes))
subgraph_boundary_edges = list(nx.edge_boundary(self.graph, subgraph.nodes, nodes_except_subgraph))
if curr_subg_property > desired_prop_value: # high cut ratio -> too many edges
edge_to_remove = random.sample(subgraph_boundary_edges, 1)[0]
self.graph.remove_edge(*edge_to_remove)
else: # low cut ratio -> too few edges -> add edge
sampled_subgraph_node = random.sample(subgraph.nodes, 1)[0]
sampled_rest_graph_node = random.sample(nodes_except_subgraph,1)[0]
self.graph.add_edge(sampled_subgraph_node, sampled_rest_graph_node)
n_tries += 1
elif desired_property == 'coreness' or desired_property == 'cc':
continue
else:
raise Exception('Other properties have not yet been implemented')
def _relabel_nodes(self, subgraphs, **kwargs):
"""
Relabel nodes in the graph and subgraphs to ensure that all nodes are indexed consecutively
"""
largest_cc = max(nx.connected_components(self.graph), key=len)
removed_nodes = set(list(self.graph.nodes)).difference(set(largest_cc))
print("Original graph: %d, Largest cc: %d, Removed nodes: %d" % (len(self.graph.nodes), len(largest_cc), len(removed_nodes)))
self.graph = self.graph.subgraph(largest_cc)
mapping = {k: v for k, v in zip(list(self.graph.nodes), range(len(self.graph.nodes)))}
self.graph = nx.relabel_nodes(self.graph, mapping)
new_subgraphs = []
for s in subgraphs:
new_s = [mapping[n] for n in s if n not in removed_nodes]
new_subgraphs.append(new_s)
return new_subgraphs
def generate_subgraph_labels(self, **kwargs):
"""
Generate subgraph labels
Return
- labels (list): subgraph labels
"""
# Make sure base graph is connected
if nx.is_connected(self.graph) == False:
max_cc = max(nx.connected_components(self.graph), key=len)
self.graph = self.graph.subgraph(max_cc)
# Setup
densities = []
cut_ratios = []
coreness = []
cc = []
desired_property = kwargs.get('desired_property', 'density')
for subgraph_nodes in self.subgraphs:
subgraph = self.graph.subgraph(subgraph_nodes).copy()
if desired_property == 'density':
value = self._get_property(subgraph, desired_property)
densities.append(value)
elif desired_property == 'cut_ratio':
value = self._get_property(subgraph, desired_property)
cut_ratios.append(value)
elif desired_property == 'coreness':
value = self._get_property(subgraph, desired_property)
coreness.append(value)
elif desired_property == 'cc':
value = self._get_property(subgraph, desired_property)
cc.append(value)
if desired_property == 'density':
bins = self.generate_bins(sorted(densities), len(config.DENSITY_RANGE))
labels = np.digitize(densities, bins = bins)
labels = self.convert_number_to_chr(labels)
print(Counter(labels))
return labels
elif desired_property == 'cut_ratio':
bins = self.generate_bins(sorted(cut_ratios), len(config.CUT_RATIO_RANGE))
labels = np.digitize(cut_ratios, bins = bins)
labels = self.convert_number_to_chr(labels)
print(Counter(labels))
return labels
elif desired_property == 'coreness':
n_bins = kwargs.pop('n_bins', 5)
bins = self.generate_bins(sorted(coreness), n_bins)
labels = np.digitize(coreness, bins = bins)
labels = self.convert_number_to_chr(labels)
print(Counter(labels))
return labels
elif desired_property == 'cc':
print(Counter(cc))
bins = [1, 5] # 1 CC vs. >1 CC
labels = np.digitize(cc, bins = bins)
labels = self.convert_number_to_chr(labels)
print(Counter(labels))
assert len(list(Counter(labels).keys())) == len(bins)
return labels
else:
raise Exception('Other properties have not yet been implemented')
def generate_bins(self, values, n_bins):
"""
Generate bins for given subgraph values.
Args
- values (list): values for each subgraph
- n_bins (int): number of pins to split the subgraph values into
Return
- bins (list): cutoffs values for each bin
"""
bins = (len(values) / float(n_bins)) * np.arange(1, n_bins + 1)
bins = np.unique(np.array([values[int(b) - 1] for b in bins]))
bins = np.delete(bins, len(bins) - 1)
print("Bins: ", bins, "Min: ", min(values), "Max: ", max(values))
return bins
def convert_number_to_chr(self, labels):
"""
Convert label bins from int to str.
Args
- labels (list): subgraph labels
Return
- new_labels (list): converted subgraph labels as strings
"""
types = {}
alpha_int = 65 # A
# Create new keys
for t in set(labels):
types[t] = chr(alpha_int)
alpha_int += 1
# Convert labels
new_labels = []
for l in labels:
new_labels.append(types[l])
return new_labels
def generate_mask(n_subgraphs):
"""
Generate train/val/test masks for the subgraphs.
Args
- n_subgraphs (int): number of subgraphs
Return
- mask (list): 0 if subgraph is in train set, 1 if in val set, 2 if in test set
"""
idx = set(range(n_subgraphs))
train_mask = list(random.sample(idx, int(len(idx) * 0.8)))
idx = idx.difference(set(train_mask))
val_mask = list(random.sample(idx, len(idx) // 2))
idx = idx.difference(set(val_mask))
test_mask = list(random.sample(idx, len(idx)))
mask = []
for i in range(n_subgraphs):
if i in train_mask: mask.append(0)
elif i in val_mask: mask.append(1)
elif i in test_mask: mask.append(2)
return mask
def write_f(sub_f, sub_G, sub_G_label, mask):
"""
Write subgraph information into the appropriate format for SubGNN (tab-delimited file where each row
has dash-delimited nodes, subgraph label, and train/val/test label).
Args
- sub_f (str): file directory to save subgraph information
- sub_G (list of lists): list of subgraphs, where each subgraph is a list of nodes
- sub_G_label (list): subgraph labels
- mask (list): 0 if subgraph is in train set, 1 if in val set, 2 if in test set
"""
with open(sub_f, "w") as fout:
for g, l, m in zip(sub_G, sub_G_label, mask):
g = [str(val) for val in g]
if len(g) == 0: continue
if m == 0: fout.write("\t".join(["-".join(g), str(l), "train", "\n"]))
elif m == 1: fout.write("\t".join(["-".join(g), str(l), "val", "\n"]))
elif m == 2: fout.write("\t".join(["-".join(g), str(l), "test", "\n"]))
def main():
if config.GENERATE_SYNTHETIC_G:
synthetic_graph = SyntheticGraph(base_graph_type = config.BASE_GRAPH_TYPE,
subgraph_type = config.SUBGRAPH_TYPE,
n_subgraphs = config.N_SUBGRAPHS,
n_connected_components = config.N_CONNECTED_COMPONENTS,
n_subgraph_nodes = config.N_SUBGRAPH_NODES,
features_type = config.FEATURES_TYPE,
n = config.N,
p = config.P,
q = config.Q,
m = config.M,
n_bins = config.N_BINS,
subgraph_generator = config.SUBGRAPH_GENERATOR,
modify_graph_for_properties = config.MODIFY_GRAPH_FOR_PROPERTIES,
desired_property = config.DESIRED_PROPERTY)
nx.write_edgelist(synthetic_graph.graph, str(config.DATASET_DIR / "edge_list.txt"), data=False)
sub_G = synthetic_graph.subgraphs
sub_G_label = synthetic_graph.subgraph_labels
mask = generate_mask(len(sub_G_label))
write_f(str(config.DATASET_DIR / "subgraphs.pth"), sub_G, sub_G_label, mask)
if config.GENERATE_NODE_EMB: train_node_emb.generate_emb()
if __name__ == "__main__":
main()
| 36,300 | 42.63101 | 152 | py |
SubGNN | SubGNN-main/prepare_dataset/preprocess.py | # General
import numpy as np
import random
import pickle
from collections import Counter
# Pytorch
import torch
from torch_geometric.data import Data
from torch_geometric.utils import from_networkx, negative_sampling
from torch_geometric.utils.convert import to_networkx
# NetworkX
import networkx as nx
from networkx.relabel import convert_node_labels_to_integers, relabel_nodes
from networkx.generators.random_graphs import barabasi_albert_graph
# Sklearn
from sklearn.feature_extraction.text import CountVectorizer
import sys
sys.path.insert(0, '../') # add config to path
import config_prepare_dataset as config
import utils
def read_graphs(edge_f):
"""
Read in base graph and create a Data object for Pytorch geometric
Args
- edge_f (str): directory of edge list
Return
- all_data (Data object): Data object of base graph
"""
nx_G = nx.read_edgelist(edge_f, nodetype = int)
feat_mat = np.eye(len(nx_G.nodes), dtype=int)
print("Graph density", nx.density(nx_G))
all_data = create_dataset(nx_G, feat_mat)
print(all_data)
assert nx.is_connected(nx_G)
assert len(nx_G) == all_data.x.shape[0]
return all_data
def create_dataset(G, feat_mat, split=False):
"""
Create Data object of the base graph for Pytorch geometric
Args
- G (object): NetworkX graph
- feat_mat (tensor): feature matrix for each node
Return
- new_G (Data object): new Data object of base graph for Pytorch geometric
"""
edge_index = torch.tensor(list(G.edges)).t().contiguous()
x = torch.tensor(feat_mat, dtype=torch.float) # Feature matrix
y = torch.ones(edge_index.shape[1])
num_classes = len(torch.unique(y))
split_idx = np.arange(len(y))
np.random.shuffle(split_idx)
train_idx = split_idx[ : 8 * len(split_idx) // 10]
val_idx = split_idx[ 8 * len(split_idx) // 10 : 9 * len(split_idx) // 10]
test_idx = split_idx[9 * len(split_idx) // 10 : ]
# Train set
train_mask = torch.zeros(len(y), dtype=torch.bool)
train_mask[train_idx] = 1
# Val set
val_mask = torch.zeros(len(y), dtype=torch.bool)
val_mask[val_idx] = 1
# Test set
test_mask = torch.zeros(len(y), dtype=torch.bool)
test_mask[test_idx] = 1
new_G = Data(x = x, y = y, num_classes = num_classes, edge_index = edge_index, train_mask = train_mask, val_mask = val_mask, test_mask = test_mask)
return new_G
def set_data(data, all_data, minibatch):
"""
Create per-minibatch Data object
Args
- data (Data object): batched dataset
- all_data (Data object): full dataset
- minibatch (str): NeighborSampler
Return
- data (Data object): base graph as Pytorch Geometric Data object
"""
batch_size, n_id, adjs = data
data = Data(edge_index = adjs[0], n_id = n_id, e_id = adjs[1])
data.x = all_data.x[data.n_id]
data.train_mask = all_data.train_mask[data.e_id]
data.val_mask = all_data.val_mask[data.e_id]
data.y = torch.ones(len(data.e_id))
return data
| 3,076 | 27.490741 | 152 | py |
SFL-Structural-Federated-Learning | SFL-Structural-Federated-Learning-main/main.py | import torch
import random
import copy
import numpy as np
import time
from BResidual import BResidual
from options import arg_parameter
from data_util import load_cifar10, load_mnist
from federated import Cifar10FedEngine
from aggregator import parameter_aggregate, read_out
from util import *
def main(args):
args.device = torch.device(args.device)
print("Prepare data and model...")
if args.dataset == "cifar10":
train_batches, test_batches, A, overall_tbatches = load_cifar10(args)
model = BResidual(3)
elif args.dataset == "mnist":
train_batches, test_batches, A, overall_tbatches = load_mnist(args)
model = BResidual(1)
else:
print("Unknown model type ... ")
train_batches, test_batches, A, overall_tbatches, model = None
print("Prepare parameter holders")
w_server, w_local = model.get_state()
w_server = [w_server] * args.clients
w_local = [w_local] * args.clients
global_model = copy.deepcopy(w_server)
personalized_model = copy.deepcopy(w_server)
server_state = None
client_states = [None] * args.clients
print2file(str(args), args.logDir, True)
nParams = sum([p.nelement() for p in model.parameters()])
print2file('Number of model parameters is ' + str(nParams), args.logDir, True)
print("Start Training...")
num_collaborator = max(int(args.client_frac * args.clients), 1)
for com in range(1, args.com_round + 1):
selected_user = np.random.choice(range(args.clients), num_collaborator, replace=False)
train_time = []
train_loss = []
train_acc = []
for c in selected_user:
# Training
engine = Cifar10FedEngine(args, copy.deepcopy(train_batches[c]), global_model[c], personalized_model[c],
w_local[c], {}, c, 0, "Train", server_state, client_states[c])
outputs = engine.run()
w_server[c] = copy.deepcopy(outputs['params'][0])
w_local[c] = copy.deepcopy(outputs['params'][1])
train_time.append(outputs["time"])
train_loss.append(outputs["loss"])
train_acc.append(outputs["acc"])
client_states[c] = outputs["c_state"]
mtrain_time = np.mean(train_time)
mtrain_loss = np.mean(train_loss)
mtrain_acc = np.mean(train_acc)
log = 'Communication Round: {:03d}, Train Loss: {:.4f},' \
' Train Accuracy: {:.4f}, Training Time: {:.4f}/com_round'
print2file(log.format(com, mtrain_time, mtrain_loss, mtrain_acc),
args.logDir, True)
# Server aggregation
t1 = time.time()
personalized_model, client_states, server_state = \
parameter_aggregate(args, A, w_server, global_model, server_state, client_states, selected_user)
t2 = time.time()
log = 'Communication Round: {:03d}, Aggregation Time: {:.4f} secs'
print2file(log.format(com, (t2 - t1)), args.logDir, True)
# Readout for global model
global_model = read_out(personalized_model, args.device)
# Validation
if com % args.valid_freq == 0:
single_vtime = []
single_vloss = []
single_vacc = []
all_vtime = []
all_vloss = []
all_vacc = []
for c in range(args.clients):
batch_time = []
batch_loss = []
batch_acc = []
for batch in test_batches:
tengine = Cifar10FedEngine(args, copy.deepcopy(batch), personalized_model[c], personalized_model[c],
w_local[c], {}, c, 0, "Test", server_state, client_states[c])
outputs = tengine.run()
batch_time.append(outputs["time"])
batch_loss.append(outputs["loss"])
batch_acc.append(outputs["acc"])
single_vtime.append(batch_time[c])
single_vloss.append(batch_loss[c])
single_vacc.append(batch_acc[c])
all_vtime.append(np.mean(batch_time))
all_vloss.append(np.mean(batch_loss))
all_vacc.append(np.mean(batch_acc))
single_log = 'SingleValidation Round: {:03d}, Valid Loss: {:.4f}, ' \
'Valid Accuracy: {:.4f}, Valid SD: {:.4f}, Test Time: {:.4f}/epoch'
print2file(single_log.format(com, np.mean(single_vloss), np.mean(single_vacc), np.std(single_vacc),
np.mean(single_vtime)), args.logDir, True)
all_log = 'AllValidation Round: {:03d}, Valid Loss: {:.4f}, ' \
'Valid Accuracy: {:.4f}, Valid SD: {:.4f}, Test Time: {:.4f}/epoch'
print2file(all_log.format(com, np.mean(all_vloss), np.mean(all_vacc), np.std(all_vacc),
np.mean(all_vtime)), args.logDir, True)
if __name__ == "__main__":
torch.backends.cudnn.benchmark = True
option = arg_parameter()
initial_environment(option.seed)
main(option)
print("Everything so far so good....") | 5,188 | 37.437037 | 120 | py |
SFL-Structural-Federated-Learning | SFL-Structural-Federated-Learning-main/BResidual.py | import torch.nn as nn
import torch
from collections import namedtuple
import numpy as np
union = lambda *dicts: {k: v for d in dicts for (k, v) in d.items()}
sep = '_'
RelativePath = namedtuple('RelativePath', ('parts'))
rel_path = lambda *parts: RelativePath(parts)
class BResidual(nn.Module):
def __init__(self, reg_channel):
losses = {
'loss': (nn.CrossEntropyLoss(reduction='none'), [('classifier',), ('target',)]),
'correct': (Correct(), [('classifier',), ('target',)]),
}
network = union(net(reg_channel), losses)
self.graph = build_graph(network)
super().__init__()
for n, (v, _) in self.graph.items():
setattr(self, n, v)
def forward(self, inputs):
self.cache = dict(inputs)
for n, (_, i) in self.graph.items():
self.cache[n] = getattr(self, n)(*[self.cache[x] for x in i])
return self.cache
def half(self):
# for module in self.children():
# if type(module) is not nn.BatchNorm2d:
# module.half()
return self
def get_state(self, mode="full"):
# return [i for i in self.named_parameters()], []
return self.state_dict(), []
def set_state(self, w_server, w_local, mode="full"):
# sd = self.state_dict()
# for key, param in w_server:
# if key in sd.keys():
# sd[key] = param.clone().detach()
# else:
# print("Server layers mismatch at 'set_state' function.")
#
# for key, param in w_local:
# if key in sd.keys():
# sd[key] = param.clone().detach()
# else:
# print("Local layers mismatch at 'set_state' function.")
self.load_state_dict(w_server)
def conv_bn(c_in, c_out, bn_weight_init=1.0, **kw):
return {
'conv': nn.Conv2d(c_in, c_out, kernel_size=3, stride=1, padding=1, bias=False),
'bn': batch_norm(c_out, bn_weight_init=bn_weight_init, **kw),
'relu': nn.ReLU(True)
}
def residual(c, **kw):
return {
'in': Identity(),
'res1': conv_bn(c, c, **kw),
'res2': conv_bn(c, c, **kw),
'add': (Add(), [rel_path('in'), rel_path('res2', 'relu')]),
}
def basic_net(channels, weight, pool, **kw):
return {
'prep': conv_bn(channels["reg"], channels['prep'], **kw),
# 'prep': conv_bn(3, channels['prep'], **kw),
'layer1': dict(conv_bn(channels['prep'], channels['layer1'], **kw), pool=pool),
'layer2': dict(conv_bn(channels['layer1'], channels['layer2'], **kw), pool=pool),
'layer3': dict(conv_bn(channels['layer2'], channels['layer3'], **kw), pool=pool),
# 'pool': nn.MaxPool2d(8),
'pool': nn.MaxPool2d(2),
'flatten': Flatten(),
'linear': nn.Linear(channels['layer3'], 10, bias=False),
'classifier': Mul(weight),
}
def net(reg_channel, channels=None, weight=0.2, pool=nn.MaxPool2d(2), extra_layers=(), res_layers=('layer1', 'layer2'), **kw):
channels = channels or {'reg': reg_channel, 'prep': 64, 'layer1': 128, 'layer2': 256, 'layer3': 256, }
n = basic_net(channels, weight, pool, **kw)
for layer in res_layers:
n[layer]['residual'] = residual(channels[layer], **kw)
for layer in extra_layers:
n[layer]['extra'] = conv_bn(channels[layer], channels[layer], **kw)
return n
def build_graph(net):
net = dict(path_iter(net))
default_inputs = [[('input',)]]+[[k] for k in net.keys()]
with_default_inputs = lambda vals: (val if isinstance(val, tuple) else (val, default_inputs[idx]) for idx, val in enumerate(vals))
parts = lambda path, pfx: tuple(pfx) + path.parts if isinstance(path, RelativePath) else (path,) if isinstance(path, str) else path
return {sep.join((*pfx, name)): (val, [sep.join(parts(x, pfx)) for x in inputs]) for (*pfx, name), (val, inputs) in zip(net.keys(), with_default_inputs(net.values()))}
def path_iter(nested_dict, pfx=()):
for name, val in nested_dict.items():
if isinstance(val, dict):
yield from path_iter(val, (*pfx, name))
else:
yield ((*pfx, name), val)
def batch_norm(num_channels, bn_bias_init=None, bn_bias_freeze=False, bn_weight_init=None, bn_weight_freeze=False):
m = nn.BatchNorm2d(num_channels)
if bn_bias_init is not None:
m.bias.data.fill_(bn_bias_init)
if bn_bias_freeze:
m.bias.requires_grad = False
if bn_weight_init is not None:
m.weight.data.fill_(bn_weight_init)
if bn_weight_freeze:
m.weight.requires_grad = False
return m
class Identity(nn.Module):
def forward(self, x): return x
class Mul(nn.Module):
def __init__(self, weight):
super().__init__()
self.weight = weight
def __call__(self, x):
return x * self.weight
class Flatten(nn.Module):
def forward(self, x): return x.view(x.size(0), x.size(1))
class Add(nn.Module):
def forward(self, x, y): return x + y
class Correct(nn.Module):
def forward(self, classifier, target):
return classifier.max(dim = 1)[1] == target
#####################
## data preprocessing
#####################
cifar10_mean = (0.4914, 0.4822, 0.4465) # equals np.mean(train_set.train_data, axis=(0,1,2))/255
cifar10_std = (0.2471, 0.2435, 0.2616) # equals np.std(train_set.train_data, axis=(0,1,2))/255
def normalise(x, mean=cifar10_mean, std=cifar10_std):
x, mean, std = [np.array(a, np.float32) for a in (x, mean, std)]
x -= mean * 255
x *= 1.0 / (255 * std)
return x
def pad(x, border=4):
return np.pad(x, [(0, 0), (border, border), (border, border), (0, 0)], mode='reflect')
def transpose(x, source='NHWC', target='NCHW'):
return x.transpose([source.index(d) for d in target])
#####################
## data loading
#####################
class Batches():
def __init__(self, dataset, batch_size, shuffle, device, set_random_choices=False, num_workers=0, drop_last=False):
self.dataset = dataset
self.batch_size = batch_size
self.set_random_choices = set_random_choices
self.device = device
self.dataloader = torch.utils.data.DataLoader(
dataset, batch_size=batch_size, num_workers=num_workers, pin_memory=True, shuffle=shuffle,
drop_last=drop_last
)
def __iter__(self):
if self.set_random_choices:
self.dataset.set_random_choices()
# return ({'input': x.to(device).half(), 'target': y.to(device).long()} for (x, y) in self.dataloader)
if self.device is not None:
return ({'input': x.to(self.device), 'target': y.to(self.device).long()} for (x, y) in self.dataloader)
else:
return ({'input': x, 'target': y.long()} for (x, y) in self.dataloader)
def __len__(self):
return len(self.dataloader)
| 6,919 | 32.756098 | 171 | py |
SFL-Structural-Federated-Learning | SFL-Structural-Federated-Learning-main/GraphConstructor.py | import torch
import torch.nn as nn
import torch.nn.functional as F
class GraphConstructor(nn.Module):
def __init__(self, nnodes, k, dim, device, alpha=3, static_feat=None):
super(GraphConstructor, self).__init__()
self.nnodes = nnodes
if static_feat is not None:
xd = static_feat.shape[1]
self.lin1 = nn.Linear(xd, dim)
self.lin2 = nn.Linear(xd, dim)
else:
self.emb1 = nn.Embedding(nnodes, dim)
self.emb2 = nn.Embedding(nnodes, dim)
self.lin1 = nn.Linear(dim, dim)
self.lin2 = nn.Linear(dim, dim)
self.device = device
self.k = k
self.dim = dim
self.alpha = alpha
self.static_feat = static_feat
def forward(self, idx):
if self.static_feat is None:
nodevec1 = self.emb1(idx)
nodevec2 = self.emb2(idx)
else:
nodevec1 = self.static_feat[idx, :]
nodevec2 = nodevec1
nodevec1 = torch.tanh(self.alpha*self.lin1(nodevec1))
nodevec2 = torch.tanh(self.alpha*self.lin2(nodevec2))
a = torch.mm(nodevec1, nodevec2.transpose(1, 0)) - torch.mm(nodevec2, nodevec1.transpose(1, 0))
adj = F.relu(torch.tanh(self.alpha*a))
return adj
def eval(self, idx, full=False):
if self.static_feat is None:
nodevec1 = self.emb1(idx)
nodevec2 = self.emb2(idx)
else:
nodevec1 = self.static_feat[idx, :]
nodevec2 = nodevec1
nodevec1 = torch.tanh(self.alpha*self.lin1(nodevec1))
nodevec2 = torch.tanh(self.alpha*self.lin2(nodevec2))
a = torch.mm(nodevec1, nodevec2.transpose(1, 0))-torch.mm(nodevec2, nodevec1.transpose(1, 0))
adj = F.relu(torch.tanh(self.alpha*a))
if not full:
mask = torch.zeros(idx.size(0), idx.size(0)).to(self.device)
mask.fill_(float('0'))
s1, t1 = adj.topk(self.k, 1)
mask.scatter_(1, t1, s1.fill_(1))
adj = adj*mask
return adj
| 2,074 | 31.421875 | 103 | py |
SFL-Structural-Federated-Learning | SFL-Structural-Federated-Learning-main/aggregator.py | import copy
import torch
import os
import pickle as pk
from util import sd_matrixing
from data_util import normalize_adj
from GraphConstructor import GraphConstructor
from optimiser import FedProx
import numpy as np
from scipy import linalg
def parameter_aggregate(args, A, w_server, global_model, server_state, client_states, active_idx):
# update global weights
new_s_state = None
new_c_state = [None] * args.clients
if args.agg == 'avg' or args.agg == "prox" or args.agg == "scaf":
w_server = average_dic(w_server, args.device)
w_server = [w_server] * args.clients
personalized_model = copy.deepcopy(w_server)
elif args.agg == "att":
w_server = att_dic(w_server, global_model[0], args.device)
w_server = [w_server] * args.clients
personalized_model = copy.deepcopy(w_server)
elif args.agg == "graph" or args.agg == "graph_v2" or args.agg == "graph_v3":
personalized_model = graph_dic(w_server, A, args)
elif args.agg == "scaffold":
new_s_state, new_c_state = scaffold_update(server_state, client_states, active_idx, args)
w_server = average_dic(w_server, args.device)
w_server = [w_server] * args.clients
personalized_model = copy.deepcopy(w_server)
else:
personalized_model = None
exit('Unrecognized aggregation.')
return personalized_model, new_c_state, new_s_state
def average_dic(model_dic, device, dp=0.001):
w_avg = copy.deepcopy(model_dic[0])
for k in w_avg.keys():
for i in range(1, len(model_dic)):
w_avg[k] = w_avg[k].data.clone().detach() + model_dic[i][k].data.clone().detach()
w_avg[k] = w_avg[k].data.clone().detach().div(len(model_dic)) + torch.mul(torch.randn(w_avg[k].shape), dp)
return w_avg
def att_dic(w_clients, w_server, device, stepsize=1, metric=1, dp=0.001):
w_next = copy.deepcopy(w_server)
att, att_mat = {}, {}
for k in w_server.keys():
w_next[k] = torch.zeros_like(w_server[k]).cpu()
att[k] = torch.zeros(len(w_clients)).cpu()
for k in w_next.keys():
for i in range(0, len(w_clients)):
att[k][i] = torch.norm((w_server[k]-w_clients[i][k]).type(torch.float32), metric)
for k in w_next.keys():
att[k] = torch.nn.functional.softmax(att[k], dim=0)
for k in w_next.keys():
att_weight = torch.zeros_like(w_server[k])
for i in range(0, len(w_clients)):
datatype = w_server[k].dtype
att_weight += torch.mul(w_server[k] - w_clients[i][k], att[k][i].type(datatype))
w_next[k] = w_server[k] - torch.mul(att_weight, stepsize) + torch.mul(torch.randn(w_server[k].shape), dp)
return w_next
def graph_dic(models_dic, pre_A, args):
keys = []
key_shapes = []
param_metrix = []
for model in models_dic:
param_metrix.append(sd_matrixing(model).clone().detach())
param_metrix = torch.stack(param_metrix)
for key, param in models_dic[0].items():
keys.append(key)
key_shapes.append(list(param.data.shape))
if args.agg == "graph_v2" or args.agg == "graph_v3":
# constract adj
subgraph_size = min(args.subgraph_size, args.clients)
A = generate_adj(param_metrix, args, subgraph_size).cpu().detach().numpy()
A = normalize_adj(A)
A = torch.tensor(A)
if args.agg == "graph_v3":
A = (1 - args.adjbeta) * pre_A + args.adjbeta * A
else:
A = pre_A
# Aggregating
aggregated_param = torch.mm(A, param_metrix)
for i in range(args.layers - 1):
aggregated_param = torch.mm(A, aggregated_param)
new_param_matrix = (args.serveralpha * aggregated_param) + ((1 - args.serveralpha) * param_metrix)
# reconstract parameter
for i in range(len(models_dic)):
pointer = 0
for k in range(len(keys)):
num_p = 1
for n in key_shapes[k]:
num_p *= n
models_dic[i][keys[k]] = new_param_matrix[i][pointer:pointer + num_p].reshape(key_shapes[k])
pointer += num_p
return models_dic
def scaffold_update(server_state, client_states, active_ids, args):
active_clients = [client_states[i] for i in active_ids]
c_delta = []
cc = [client_state["c_i_delta"] for client_state in active_clients]
for ind in range(len(server_state["c"])):
# handles the int64 and float data types jointly
c_delta.append(
torch.mean(torch.stack([c_i_delta[ind].float() for c_i_delta in cc]), dim=0).to(server_state["c"][ind].dtype)
)
c_delta = tuple(c_delta)
c = []
for param_1, param_2 in zip(server_state["c"], c_delta):
c.append(param_1 + param_2 * args.clients * args.client_frac / args.clients)
c = tuple(c)
new_server_state = {
"global_round": server_state["global_round"] + 1,
"c": c
}
new_client_state = [{
"global_round": new_server_state["global_round"],
"model_delta": None,
"c_i": client["c_i"],
"c_i_delta": None,
"c": server_state["c"]
} for client in client_states]
return new_server_state, new_client_state
def generate_adj(param_metrix, args, subgraph_size):
dist_metrix = torch.zeros((len(param_metrix), len(param_metrix)))
for i in range(len(param_metrix)):
for j in range(len(param_metrix)):
dist_metrix[i][j] = torch.nn.functional.pairwise_distance(
param_metrix[i].view(1, -1), param_metrix[j].view(1, -1), p=2).clone().detach()
dist_metrix = torch.nn.functional.normalize(dist_metrix).to(args.device)
gc = GraphConstructor(args.clients, subgraph_size, args.node_dim,
args.device, args.adjalpha).to(args.device)
idx = torch.arange(args.clients).to(args.device)
optimizer = torch.optim.SGD(gc.parameters(), lr=args.lr, weight_decay=args.weight_decay)
for e in range(args.gc_epoch):
optimizer.zero_grad()
adj = gc(idx)
adj = torch.nn.functional.normalize(adj)
loss = torch.nn.functional.mse_loss(adj, dist_metrix)
loss.backward()
optimizer.step()
adj = gc.eval(idx).to("cpu")
return adj
def read_out(personalized_models, device):
# average pooling as read out function
global_model = average_dic(personalized_models, device, 0)
return [global_model] * len(personalized_models)
| 6,429 | 34.921788 | 121 | py |
SFL-Structural-Federated-Learning | SFL-Structural-Federated-Learning-main/optimiser.py | import numpy as np
import torch
from collections import namedtuple
from util import PiecewiseLinear
from torch.optim.optimizer import Optimizer, required
import torch.distributed as dist
class TorchOptimiser():
def __init__(self, weights, optimizer, step_number=0, **opt_params):
self.weights = weights
self.step_number = step_number
self.opt_params = opt_params
self._opt = optimizer(weights, **self.param_values())
def param_values(self):
return {k: v(self.step_number) if callable(v) else v for k, v in self.opt_params.items()}
def step(self):
self.step_number += 1
self._opt.param_groups[0].update(**self.param_values())
self._opt.step()
def __repr__(self):
return repr(self._opt)
def SGD(weights, lr=0, momentum=0, weight_decay=0, dampening=0, nesterov=False):
return TorchOptimiser(weights, torch.optim.SGD, lr=lr, momentum=momentum,
weight_decay=weight_decay, dampening=dampening,
nesterov=nesterov)
class FedProx(Optimizer):
def __init__(self, params, ratio, gmf, lr=required, momentum=0, dampening=0,
weight_decay=0, nesterov=False, variance=0, mu=0):
self.gmf = gmf
self.ratio = ratio
self.itr = 0
self.a_sum = 0
self.mu = mu
if lr is not required and lr < 0.0:
raise ValueError("Invalid learning rate: {}".format(lr))
if momentum < 0.0:
raise ValueError("Invalid momentum value: {}".format(momentum))
if weight_decay < 0.0:
raise ValueError("Invalid weight_decay value: {}".format(weight_decay))
defaults = dict(lr=lr, momentum=momentum, dampening=dampening,
weight_decay=weight_decay, nesterov=nesterov, variance=variance)
if nesterov and (momentum <= 0 or dampening != 0):
raise ValueError("Nesterov momentum requires a momentum and zero dampening")
super(FedProx, self).__init__(params, defaults)
def __setstate__(self, state):
super(FedProx, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('nesterov', False)
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
weight_decay = group['weight_decay']
momentum = group['momentum']
dampening = group['dampening']
nesterov = group['nesterov']
for p in group['params']:
if p.grad is None:
continue
d_p = p.grad.data
if weight_decay != 0:
d_p.add_(weight_decay, p.data)
param_state = self.state[p]
if 'old_init' not in param_state:
param_state['old_init'] = torch.clone(p.data).detach()
if momentum != 0:
if 'momentum_buffer' not in param_state:
buf = param_state['momentum_buffer'] = torch.clone(d_p).detach()
else:
buf = param_state['momentum_buffer']
buf.mul_(momentum).add_(1 - dampening, d_p)
if nesterov:
d_p = d_p.add(momentum, buf)
else:
d_p = buf
# apply proximal update
d_p.add_(self.mu, p.data - param_state['old_init'])
p.data.add_(-group['lr'], d_p)
return loss
def average(self):
param_list = []
for group in self.param_groups:
for p in group['params']:
p.data.mul_(self.ratio)
param_list.append(p.data)
communicate(param_list, dist.all_reduce)
for group in self.param_groups:
for p in group['params']:
param_state = self.state[p]
param_state['old_init'] = torch.clone(p.data).detach()
# Reinitialize momentum buffer
if 'momentum_buffer' in param_state:
param_state['momentum_buffer'].zero_()
# helper functions for fedprox
def communicate(tensors, communication_op):
"""
Reference: https://github.com/facebookresearch/stochastic_gradient_push
Communicate a list of tensors.
Arguments:
tensors (Iterable[Tensor]): list of tensors.
communication_op: a method or partial object which takes a tensor as
input and communicates it. It can be a partial object around
something like torch.distributed.all_reduce.
"""
flat_tensor = flatten_tensors(tensors)
communication_op(tensor=flat_tensor)
for f, t in zip(unflatten_tensors(flat_tensor, tensors), tensors):
t.set_(f)
def flatten_tensors(tensors):
"""
Reference: https://github.com/facebookresearch/stochastic_gradient_push
Flatten dense tensors into a contiguous 1D buffer. Assume tensors are of
same dense type.
Since inputs are dense, the resulting tensor will be a concatenated 1D
buffer. Element-wise operation on this buffer will be equivalent to
operating individually.
Arguments:
tensors (Iterable[Tensor]): dense tensors to flatten.
Returns:
A 1D buffer containing input tensors.
"""
if len(tensors) == 1:
return tensors[0].view(-1).clone()
flat = torch.cat([t.view(-1) for t in tensors], dim=0)
return flat
def unflatten_tensors(flat, tensors):
"""
Reference: https://github.com/facebookresearch/stochastic_gradient_push
View a flat buffer using the sizes of tensors. Assume that tensors are of
same dense type, and that flat is given by flatten_dense_tensors.
Arguments:
flat (Tensor): flattened dense tensors to unflatten.
tensors (Iterable[Tensor]): dense tensors whose sizes will be used to
unflatten flat.
Returns:
Unflattened dense tensors with sizes same as tensors and values from
flat.
"""
outputs = []
offset = 0
for tensor in tensors:
numel = tensor.numel()
outputs.append(flat.narrow(0, offset, numel).view_as(tensor))
offset += numel
return tuple(outputs) | 6,524 | 35.049724 | 97 | py |
SFL-Structural-Federated-Learning | SFL-Structural-Federated-Learning-main/data_util.py | import torch
import numpy as np
import scipy.sparse as sp
from torchvision import datasets
from collections import namedtuple
from torchvision import datasets, transforms
import pickle as pk
def load_image(args):
data_dir = "./data/" + str(args.dataset)
data_mean = (0.4914, 0.4822, 0.4465) # equals np.mean(train_set.train_data, axis=(0,1,2))/255
data_std = (0.2471, 0.2435, 0.2616) # equals np.std(train_set.train_data, axis=(0,1,2))/255
trans = [transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(0.1),
transforms.RandomVerticalFlip(0.1),
transforms.ToTensor(),
transforms.Normalize(data_mean, data_std)]
apply_transform = transforms.Compose(trans)
train_set = datasets.CIFAR10(data_dir, train=True, download=True, transform=apply_transform)
test_set = datasets.CIFAR10(data_dir, train=False, download=True, transform=apply_transform)
train_set.topk = 5
train_set.targets = np.array(train_set.targets)
test_set.targets = np.array(test_set.targets)
# split
train_user_groups, test_user_groups, A = split_equal_noniid(
train_set, test_set, args.shards, args.edge_frac, args.clients)
def load_cifar10(args):
data_dir = "./data/" + str(args.dataset)
train_set = datasets.CIFAR10(root=data_dir, train=True, download=True)
test_set = datasets.CIFAR10(root=data_dir, train=False, download=True)
data_mean = (0.4914, 0.4822, 0.4465) # equals np.mean(train_set.train_data, axis=(0,1,2))/255
data_std = (0.2471, 0.2435, 0.2616) # equals np.std(train_set.train_data, axis=(0,1,2))/255
train_set.topk = 5
train_set.targets = np.array(train_set.targets)
test_set.targets = np.array(test_set.targets)
train_transforms = [Crop(32, 32), FlipLR(), Cutout(8, 8)]
# split
train_user_groups, test_user_groups, A = split_equal_noniid(
train_set, test_set, args.shards, args.edge_frac, args.clients)
train_set = list(zip(transpose(normalise(pad(train_set.data, 4), data_mean, data_std)), train_set.targets))
test_set = list(zip(transpose(normalise(test_set.data, data_mean, data_std)), test_set.targets))
train_batches = []
test_batches = []
for key, users in train_user_groups.items():
train_batches.append(Batches(Transform([train_set[u.astype(int)] for u in users],
train_transforms), args.batch_size, shuffle=True, device=args.device,
set_random_choices=True, drop_last=True))
for key, users in test_user_groups.items():
test_batches.append(Batches([test_set[u.astype(int)] for u in users],
args.batch_size, shuffle=False, device=args.device, drop_last=False))
overall_tbatches = Batches(test_set, args.batch_size, shuffle=False,
device=args.device, drop_last=False)
return train_batches, test_batches, A, overall_tbatches
# Image data related
def load_mnist(args):
data_dir = "./data/" + str(args.dataset)
trans = [transforms.ToTensor(),
transforms.Normalize(*((0.1307,), (0.3081,)))]
apply_transform = transforms.Compose(trans)
train_dataset = datasets.MNIST(data_dir, train=True, download=True, transform=apply_transform)
test_dataset = datasets.MNIST(data_dir, train=False, download=True, transform=apply_transform)
train_dataset.topk = 5
train_dataset.data = torch.unsqueeze(train_dataset.data, 1)
train_dataset.targets = np.array(train_dataset.targets)
train_dataset.data = train_dataset.data.type(torch.FloatTensor)
test_dataset.data = torch.unsqueeze(test_dataset.data, 1)
test_dataset.targets = np.array(test_dataset.targets)
test_dataset.data = test_dataset.data.type(torch.FloatTensor)
train_user_groups, test_user_groups, A = split_equal_noniid(
train_dataset, test_dataset, args.shards, args.edge_frac, args.clients)
train_set = list(zip(train_dataset.data, train_dataset.targets))
test_set = list(zip(test_dataset.data, test_dataset.targets))
train_batches = []
test_batches = []
for key, users in train_user_groups.items():
train_batches.append(Batches([train_set[u.astype(int)] for u in users], args.batch_size,
shuffle=True, device=args.device, drop_last=True))
for key, users in test_user_groups.items():
test_batches.append(Batches([test_set[u.astype(int)] for u in users], args.batch_size,
shuffle=False, device=args.device, drop_last=False))
overall_tbatches = Batches(test_set, args.batch_size, shuffle=False,
device=args.device, drop_last=False)
return train_batches, test_batches, A, overall_tbatches
def split_equal_noniid(train_dataset, test_dataset, shards, edge_frac, clients):
"""
:param train_dataset:
:param test_dataset:
:param shards:
:param edge_frac:
:param clients:
:return:
"""
total_shards = shards * clients
shard_size = int(len(train_dataset.data) / total_shards)
idx_shard = [i for i in range(total_shards)]
train_dict_users = {i: np.array([]) for i in range(clients)}
idxs = np.arange(total_shards * shard_size)
labels = train_dataset.targets
dict_label_dist = {i: np.array([]) for i in range(clients)}
# sort labels
idxs_labels = np.vstack((idxs, labels))
idxs_labels = idxs_labels[:, idxs_labels[1, :].argsort()]
idxs = idxs_labels[0, :]
label_count = np.bincount(idxs_labels[1])
# generate adj
A = np.zeros((clients, clients))
num_label = len(set(labels))
label_dist = [[] for _ in range(num_label)]
# partitions for train data
for i in range(clients):
rand_set = np.random.choice(idx_shard, shards, replace=False)
idx_shard = list(set(idx_shard) - set(rand_set))
selected_labels = idxs_labels[1, rand_set * shard_size]
label_type = np.array(list(set(selected_labels)))
sample_size = [np.count_nonzero(selected_labels == j) for j in label_type]
int(shard_size * shards / len(label_type))
dict_label_dist[i] = np.array((label_type, sample_size))
for j, l in enumerate(label_type):
start_idx = sum(label_count[0:l])
end_idx = start_idx + label_count[l]
sample_array = idxs[start_idx: end_idx]
train_dict_users[i] = np.concatenate(
(train_dict_users[i], np.random.choice(
sample_array, sample_size[j] * shard_size, replace=False)), axis=0)
# for cifar-100, control the sparsity of A
label_size = np.array([np.count_nonzero(
labels[train_dict_users[i].astype(int)] == j) for j in label_type])
pram_label_idx = np.array(sorted(range(len(label_size)),
key=lambda i: label_size[i])[min(-train_dataset.topk, shards):])
for label_type in label_type[pram_label_idx]:
label_dist[label_type].append(i)
# prepare A
link_list = []
for user_arr in label_dist:
for user_a in user_arr:
for user_b in user_arr:
link_list.append([user_a, user_b])
link_sample = list(range(len(link_list)))
link_idx = np.random.choice(link_sample, int(edge_frac * len(link_list)), replace=False)
for idx in link_idx:
# A[link_list[idx][0], link_list[idx][1]] = A[link_list[idx][0], link_list[idx][1]] + 1
A[link_list[idx][0], link_list[idx][1]] = 1
# partition for test data
total_shards = shards * clients
shard_size = int(len(test_dataset.data) / total_shards)
test_dict_users = {i: np.array([]) for i in range(clients)}
idxs = np.arange(total_shards * shard_size)
labels = test_dataset.targets
# sort labels
idxs_labels = np.vstack((idxs, labels))
idxs_labels = idxs_labels[:, idxs_labels[1, :].argsort()]
idxs = idxs_labels[0, :]
label_count = np.bincount(idxs_labels[1])
for i in range(clients):
for j, l in enumerate(dict_label_dist[i][0]):
start_idx = sum(label_count[0:l])
end_idx = start_idx + label_count[l]
sample_array = idxs[start_idx: end_idx]
test_dict_users[i] = np.concatenate(
(test_dict_users[i], np.random.choice(
sample_array, dict_label_dist[i][1][j] * shard_size, replace=False)), axis=0)
return train_dict_users, test_dict_users, torch.tensor(normalize_adj(A), dtype=torch.float32)
class Batches():
def __init__(self, dataset, batch_size, shuffle, device, set_random_choices=False, num_workers=0, drop_last=False):
self.dataset = dataset
self.batch_size = batch_size
self.set_random_choices = set_random_choices
self.device = device
self.dataloader = torch.utils.data.DataLoader(
dataset, batch_size=batch_size, num_workers=num_workers, pin_memory=True, shuffle=shuffle,
drop_last=drop_last
)
def __iter__(self):
if self.set_random_choices:
self.dataset.set_random_choices()
if self.device is not None:
return ({'input': x.to(self.device), 'target': y.to(self.device).long()} for (x, y) in self.dataloader)
else:
return ({'input': x, 'target': y.long()} for (x, y) in self.dataloader)
def __len__(self):
return len(self.dataloader)
#####################
## data augmentation
#####################
class Crop(namedtuple('Crop', ('h', 'w'))):
def __call__(self, x, x0, y0):
return x[:, y0:y0 + self.h, x0:x0 + self.w]
def options(self, x_shape):
C, H, W = x_shape
return {'x0': range(W + 1 - self.w), 'y0': range(H + 1 - self.h)}
def output_shape(self, x_shape):
C, H, W = x_shape
return (C, self.h, self.w)
class FlipLR(namedtuple('FlipLR', ())):
def __call__(self, x, choice):
return x[:, :, ::-1].copy() if choice else x
def options(self, x_shape):
return {'choice': [True, False]}
class Cutout(namedtuple('Cutout', ('h', 'w'))):
def __call__(self, x, x0, y0):
x = x.copy()
x[:, y0:y0 + self.h, x0:x0 + self.w].fill(0.0)
return x
def options(self, x_shape):
C, H, W = x_shape
return {'x0': range(W + 1 - self.w), 'y0': range(H + 1 - self.h)}
class Transform:
def __init__(self, dataset, transforms):
self.dataset, self.transforms = dataset, transforms
self.choices = None
def __len__(self):
return len(self.dataset)
def __getitem__(self, index):
data, labels = self.dataset[index]
for choices, f in zip(self.choices, self.transforms):
args = {k: v[index] for (k, v) in choices.items()}
data = f(data, **args)
return data, labels
def set_random_choices(self):
self.choices = []
x_shape = self.dataset[0][0].shape
N = len(self)
for t in self.transforms:
options = t.options(x_shape)
x_shape = t.output_shape(x_shape) if hasattr(t, 'output_shape') else x_shape
self.choices.append({k: np.random.choice(v, size=N) for (k, v) in options.items()})
def normalise(x, mean, std):
x, mean, std = [np.array(a, np.float32) for a in (x, mean, std)]
x -= mean * 255
x *= 1.0 / (255 * std)
return x
def pad(x, border=4):
return np.pad(x, [(0, 0), (border, border), (border, border), (0, 0)], mode='reflect')
def transpose(x, source='NHWC', target='NCHW'):
return x.transpose([source.index(d) for d in target])
def normalize_adj(mx):
"""Row-normalize sparse matrix"""
rowsum = np.array(mx.sum(1))
r_inv = np.power(rowsum, -1).flatten()
r_inv[np.isinf(r_inv)] = 0.
r_mat_inv = sp.diags(r_inv)
mx = r_mat_inv.dot(mx)
return mx | 11,858 | 36.647619 | 119 | py |
SFL-Structural-Federated-Learning | SFL-Structural-Federated-Learning-main/federated.py | import threading
import datetime
import torch
import time
import numpy as np
from BResidual import BResidual
from optimiser import SGD
from util import sd_matrixing, PiecewiseLinear, trainable_params, StatsLogger
class Cifar10FedEngine:
def __init__(self, args, dataloader, global_param, server_param, local_param,
outputs, cid, tid, mode, server_state, client_states):
self.args = args
self.dataloader = dataloader
self.global_param = global_param
self.server_param = server_param
self.local_param = local_param
self.server_state = server_state
self.client_state = client_states
self.client_id = cid
self.outputs = outputs
self.thread = tid
self.mode = mode
self.model = self.prepare_model()
# self.threadLock = threading.Lock()
self.m1, self.m2, self.m3, self.reg1, self.reg2 = None, None, None, None, None
def prepare_model(self):
if self.args.dataset == "cifar10":
model = BResidual(3)
elif self.args.dataset == "mnist":
model = BResidual(1)
else:
print("Unknown model type ... ")
model = None
model.set_state(self.global_param, self.local_param)
return model
def run(self):
self.model.to(self.args.device)
output = self.client_run()
self.free_memory()
return output
def client_run(self):
lr_schedule = PiecewiseLinear([0, 5, self.args.client_epochs], [0, 0.4, 0.001])
lr = lambda step: lr_schedule(step / len(self.dataloader)) / self.args.batch_size
opt = SGD(trainable_params(self.model), lr=lr, momentum=0.9, weight_decay=5e-4
* self.args.batch_size, nesterov=True)
mean_loss = []
mean_acc = []
t1 = time.time()
c_state = None
if self.mode == "Train":
# training process
for epoch in range(self.args.client_epochs):
stats = self.batch_run(True, opt.step)
mean_loss.append(stats.mean('loss'))
mean_acc.append(stats.mean('correct'))
# log = "Train - Epoch: " + str(epoch) + ' train loss: ' + str(stats.mean('loss')) +\
# ' train acc: ' + str(stats.mean('correct'))
# self.logger(log, True)
elif self.mode == "Test":
# validation process
stats = self.batch_run(False)
mean_loss.append(stats.mean('loss'))
mean_acc.append(stats.mean('correct'))
# log = 'Test - test loss: ' + str(stats.mean('loss')) + ' test acc: ' \
# + str(stats.mean('correct'))
# self.logger(log)
time_cost = time.time() - t1
log = self.mode + ' - Thread: {:03d}, Client: {:03d}. Average Loss: {:.4f},' \
' Average Accuracy: {:.4f}, Total Time Cost: {:.4f}'
self.logger(log.format(self.thread, self.client_id, np.mean(mean_loss), np.mean(mean_acc),
time_cost), True)
self.model.to("cpu")
output = {"params": self.model.get_state(),
"time": time_cost,
"loss": np.mean(mean_loss),
"acc": np.mean(mean_acc),
"client_state": self.client_state,
"c_state": c_state}
# self.outputs[self.thread] = output
return output
def batch_run(self, training, optimizer_step=None, stats=None):
stats = stats or StatsLogger(('loss', 'correct'))
self.model.train(training)
for batch in self.dataloader:
output = self.model(batch)
output['loss'] = self.criterion(output['loss'], self.mode)
stats.append(output)
if training:
output['loss'].sum().backward()
optimizer_step()
self.model.zero_grad()
batch["input"].to("cpu")
batch["target"].to("cpu")
return stats
def criterion(self, loss, mode):
if self.args.agg == "avg":
pass
elif self.args.reg > 0 and mode != "PerTrain" and self.args.clients != 1:
self.m1 = sd_matrixing(self.model.get_state()[0]).reshape(1, -1).to(self.args.device)
self.m2 = sd_matrixing(self.server_param).reshape(1, -1).to(self.args.device)
self.m3 = sd_matrixing(self.global_param).reshape(1, -1).to(self.args.device)
self.reg1 = torch.nn.functional.pairwise_distance(self.m1, self.m2, p=2)
self.reg2 = torch.nn.functional.pairwise_distance(self.m1, self.m3, p=2)
loss = loss + 0.3 * self.reg1 + 0.3 * self.reg2
return loss
def free_memory(self):
if self.m1 is not None:
self.m1.to("cpu")
if self.m2 is not None:
self.m2.to("cpu")
if self.m3 is not None:
self.m3.to("cpu")
if self.reg1 is not None:
self.reg1.to("cpu")
if self.reg2 is not None:
self.reg2.to("cpu")
torch.cuda.empty_cache()
def logger(self, buf, p=False):
if p:
print(buf)
# self.threadLock.acquire()
with open(self.args.logDir, 'a+') as f:
f.write(str(datetime.datetime.now()) + '\t' + buf + '\n')
# self.threadLock.release()
| 5,404 | 35.033333 | 101 | py |
SFL-Structural-Federated-Learning | SFL-Structural-Federated-Learning-main/util.py | import datetime
import random
import os
import torch
import numpy as np
from collections import namedtuple
from functools import singledispatch
def print2file(buf, out_file, p=False):
if p:
print(buf)
outfd = open(out_file, 'a+')
outfd.write(str(datetime.datetime.now()) + '\t' + buf + '\n')
outfd.close()
def initial_environment(seed, cpu_num=5, deterministic=False):
os.environ['OMP_NUM_THREADS'] = str(cpu_num)
os.environ['OPENBLAS_NUM_THREADS'] = str(cpu_num)
os.environ['MKL_NUM_THREADS'] = str(cpu_num)
os.environ['VECLIB_MAXIMUM_THREADS'] = str(cpu_num)
os.environ['NUMEXPR_NUM_THREADS'] = str(cpu_num)
torch.set_num_threads(cpu_num)
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
if deterministic:
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
def sd_matrixing(state_dic):
"""
Turn state dic into a vector
:param state_dic:
:return:
"""
keys = []
param_vector = None
for key, param in state_dic.items():
keys.append(key)
if param_vector is None:
param_vector = param.clone().detach().flatten().cpu()
else:
if len(list(param.size())) == 0:
param_vector = torch.cat((param_vector, param.clone().detach().view(1).cpu().type(torch.float32)), 0)
else:
param_vector = torch.cat((param_vector, param.clone().detach().flatten().cpu()), 0)
return param_vector
def trainable_params(model):
result = []
for p in model.parameters():
if p.requires_grad:
result.append(p)
return result
class PiecewiseLinear(namedtuple('PiecewiseLinear', ('knots', 'vals'))):
def __call__(self, t):
return np.interp([t], self.knots, self.vals)[0]
class StatsLogger():
def __init__(self, keys):
self._stats = {k: [] for k in keys}
def append(self, output):
for k, v in self._stats.items():
v.append(output[k].detach())
def stats(self, key):
return cat(*self._stats[key])
def mean(self, key):
return np.mean(to_numpy(self.stats(key)), dtype=np.float)
@singledispatch
def cat(*xs):
raise NotImplementedError
@singledispatch
def to_numpy(x):
raise NotImplementedError
@cat.register(torch.Tensor)
def _(*xs):
return torch.cat(xs)
@to_numpy.register(torch.Tensor)
def _(x):
return x.detach().cpu().numpy() | 2,561 | 24.366337 | 117 | py |
MGANet-DCC2020 | MGANet-DCC2020-master/codes/MGANet_test_LD37.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
from numpy import *
# from scipy.misc import imresize
from skimage.measure import compare_ssim
import cv2
import glob
import time
import os
import argparse
import Net.MGANet as MGANet
import torch
import copy
def yuv_import(filename, dims ,startfrm,numframe):
fp = open(filename, 'rb')
frame_size = np.prod(dims) * 3 / 2
fp.seek(0, 2)
ps = fp.tell()
totalfrm = int(ps // frame_size)
d00 = dims[0] // 2
d01 = dims[1] // 2
assert startfrm+numframe<=totalfrm
Y = np.zeros(shape=(numframe, 1,dims[0], dims[1]), dtype=np.uint8, order='C')
U = np.zeros(shape=(numframe, 1,d00, d01),dtype= np.uint8, order='C')
V = np.zeros(shape=(numframe, 1,d00, d01),dtype= np.uint8, order='C')
fp.seek(int(frame_size * startfrm), 0)
for i in range(startfrm,startfrm+numframe):
for m in range(dims[0]):
for n in range(dims[1]):
Y[i-startfrm,0, m, n] = ord(fp.read(1))
for m in range(d00):
for n in range(d01):
U[i-startfrm,0, m, n] = ord(fp.read(1))
for m in range(d00):
for n in range(d01):
V[i-startfrm,0, m, n] = ord(fp.read(1))
fp.close()
Y = Y.astype(np.float32)
U = U.astype(np.float32)
V = V.astype(np.float32)
return Y, U, V
def get_w_h(filename):
width = int((filename.split('x')[0]).split('_')[-1])
height = int((filename.split('x')[1]).split('_')[0])
return (height,width)
def get_data(one_filename,video_index,num_frame,startfrm_position):
one_filename_length = len(one_filename)
data_Y = []
for i in range(one_filename_length+1):
if i == 0:
data_37_filename = np.sort(glob.glob(one_filename[i]+'/*.yuv'))
data_37_filename_length = len(data_37_filename )
for i_0 in range(video_index,video_index+1):
file_name = data_37_filename[i_0]
dims = get_w_h(filename=file_name)
data_37_filename_Y,data_37_filename_U,data_37_filename_V = yuv_import(filename=file_name, dims=dims ,startfrm=startfrm_position,numframe=num_frame)
data_Y.append(data_37_filename_Y)
if i == 1:
mask_37_filename = np.sort(glob.glob(one_filename[i] + '/*.yuv'))
mask_37_filename_length = len(mask_37_filename)
for i_1 in range(video_index,video_index+1):
file_name = mask_37_filename[i_1]
dims = get_w_h(filename=file_name)
mask_37_filename_Y, mask_37_filename_U, mask_37_filename_V = yuv_import(filename=file_name, dims=dims,startfrm=startfrm_position, numframe=num_frame)
data_Y.append(mask_37_filename_Y)
if i == 2:
label_37_filename = np.sort(glob.glob('../test_yuv/label/' + '*.yuv'))
label_37_filename_length = len(label_37_filename)
for i_2 in range(video_index,video_index+1):
file_name = label_37_filename[i_2]
dims = get_w_h(filename=file_name)
label_37_filename_Y, label_37_filename_U, label_37_filename_V = yuv_import(filename=file_name, dims=dims,startfrm=startfrm_position, numframe=num_frame)
data_Y.append(label_37_filename_Y)
return data_Y
def test_batch_key(data_Y, start, batch_size=1):
data_pre = (data_Y[0][start-2:start-1,...])/255.0
data_cur = data_Y[0][start:start+1,...]/255.0
data_aft = data_Y[0][start+2:start+3,...]/255.0
mask = data_Y[1][start:start+1,...]/255.0
label = data_Y[2][start:start+1,...]
start+=1
return data_pre,data_cur,data_aft,mask,label,start
def test_batch(data_Y, start, batch_size=1):
data_pre = (data_Y[0][start-1:start,...])/255.0
data_cur = data_Y[0][start:start+1,...]/255.0
data_aft = data_Y[0][start+1:start+2,...]/255.0
mask = data_Y[1][start:start+1,...]/255.0
label = data_Y[2][start:start+1,...]
start+=1
return data_pre,data_cur,data_aft,mask,label,start
def PSNR(img1, img2):
mse = np.mean( (img1.astype(np.float32) - img2.astype(np.float32)) ** 2 ).astype(np.float32)
if mse == 0:
return 100
PIXEL_MAX = 255.0
return 20 * math.log10(PIXEL_MAX / math.sqrt(mse))
def image_test(one_filename,net_G,patch_size=[128,128],f_txt=None,opt=None):
ave_diff_psnr =0.
ave_psnr_pre_gt =0.
ave_psnr_data_gt =0.
video_num=opt.video_nums
for video_index in range(video_num):
data_37_filename = np.sort(glob.glob(one_filename[0]+'/*.yuv'))
data_Y = get_data(one_filename,video_index=video_index,num_frame=92,startfrm_position=opt.startfrm_position)
start =1
psnr_diff_sum = 0
psnr_pre_gt_sum=0
psnr_data_gt_sum=0
nums =opt.frame_nums
for itr in range(0, nums):
if (start - 2) % 4 == 0:
data_pre, data_cur, data_aft, mask, label, start = test_batch_key(data_Y=data_Y, start=start, batch_size=1)
else:
data_pre, data_cur, data_aft, mask, label, start = test_batch(data_Y=data_Y, start=start, batch_size=1)
height = data_pre.shape[2]
width = data_pre.shape[3]
data_pre_value_patch = torch.from_numpy(data_pre).float().cuda()
data_cur_value_patch = torch.from_numpy(data_cur).float().cuda()
data_aft_value_patch = torch.from_numpy(data_aft).float().cuda()
data_mask_value_patch = torch.from_numpy(mask).float().cuda()
start_time = time.time()
fake_image = net_G(data_pre_value_patch,data_cur_value_patch,data_aft_value_patch,data_mask_value_patch)
end_time=time.time()
fake_image_numpy = fake_image.detach().cpu().numpy()
fake_image_numpy = np.squeeze(fake_image_numpy)*255.0
finally_image=np.squeeze(fake_image_numpy)
mask_image = np.squeeze(mask)*255.
os.makedirs(opt.result_path+'/result_enhanced_data/%02d'%(video_index+1),exist_ok = True)
os.makedirs(opt.result_path+'/result_mask/%02d'%(video_index+1),exist_ok = True)
os.makedirs(opt.result_path+'/result_label/%02d'%(video_index+1),exist_ok = True)
os.makedirs(opt.result_path+'/result_compression_data/%02d'%(video_index+1),exist_ok = True)
cv2.imwrite(opt.result_path+'/result_enhanced_data/%02d/%02d.png'%(video_index+1,itr+2),finally_image.astype(np.uint8))
cv2.imwrite(opt.result_path+'/result_mask/%02d/%02d.png'%(video_index+1,itr+2),mask_image.astype(np.uint8))
data_cur_image = (np.squeeze(data_cur)*255.0).astype(np.float32)
label = np.squeeze(label).astype(np.float32)
cv2.imwrite(opt.result_path+'/result_label/%02d/%02d.png'%(video_index+1,itr+2),label.astype(np.uint8))
cv2.imwrite(opt.result_path+'/result_compression_data/%02d/%02d.png'%(video_index+1,itr+2),data_cur_image.astype(np.uint8))
psnr_pre_gt = PSNR(finally_image, label)
psnr_data_gt = PSNR(data_cur_image, label)
psnr_diff = psnr_pre_gt - psnr_data_gt
psnr_diff_sum +=psnr_diff
psnr_pre_gt_sum+=psnr_pre_gt
psnr_data_gt_sum+=psnr_data_gt
print('psnr_gain:%.05f'%(psnr_diff))
print('psnr_predict:{:.04f} psnr_anchor:{:.04f} psnr_gain:{:.04f}'.format(psnr_pre_gt,psnr_data_gt,psnr_diff),file=f_txt)
print('video_index:{:2d} psnr_predict_average:{:.04f} psnr_anchor_average:{:.04f} psnr_gain_average:{:.04f}'.format(video_index,psnr_pre_gt_sum/nums,psnr_data_gt_sum/nums,psnr_diff_sum/nums),file=f_txt)
print('{}'.format(data_37_filename[video_index]),file=f_txt)
f_txt.write('\r\n')
ave_diff_psnr+=psnr_diff_sum/nums
ave_psnr_pre_gt +=psnr_pre_gt_sum/nums
ave_psnr_data_gt +=psnr_data_gt_sum/nums
print(' average_psnr_predict:{:.04f} average_psnr_anchor:{:.04f} average_psnr_gain:{:0.4f}'.format(ave_psnr_pre_gt / video_num, ave_psnr_data_gt / video_num, ave_diff_psnr / video_num))
print(' average_psnr_predict:{:.04f} average_psnr_anchor:{:.04f} average_psnr_gain:{:0.4f}'.format(ave_psnr_pre_gt / video_num, ave_psnr_data_gt / video_num, ave_diff_psnr / video_num), file=f_txt)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="MGANet_test")
parser.add_argument('--net_G', default='../model/model_epoch_LD37.pth',help="add checkpoint")
parser.add_argument("--gpu_id", default=0, type=int, help="gpu ids (default: 0)")
parser.add_argument("--video_nums", default=1, type=int, help="video number (default: 0)")
parser.add_argument("--frame_nums", default=19, type=int, help="frame number of one video to test (default: 90)")
parser.add_argument("--startfrm_position", default=0, type=int, help="start frame position (default: 0)")
parser.add_argument("--is_training", default=False, type=bool, help="train or test mode")
parser.add_argument("--result_path", default='./result_LD37/', type=str, help="store results")
opts = parser.parse_args()
torch.cuda.set_device(opts.gpu_id)
txt_name = './MGANet_test_data_LD37.txt'
if os.path.isfile(txt_name):
f = open(txt_name, 'w+')
else:
os.mknod(txt_name)
f = open(txt_name, 'w+')
one_filename = np.sort(glob.glob('../test_yuv/LD37/' + '*'))
print(one_filename)
patch_size =[240,416]
net_G = MGANet.Gen_Guided_UNet(batchNorm=False,input_size=patch_size,is_training=opts.is_training)
net_G.eval()
net_G.load_state_dict(torch.load(opts.net_G,map_location=lambda storage, loc: storage.cuda(opts.gpu_id)))
print('....')
net_G.cuda()
image_test(one_filename=one_filename,net_G=net_G,patch_size=patch_size,f_txt = f,opt = opts)
f.close()
| 9,952 | 41.900862 | 211 | py |
MGANet-DCC2020 | MGANet-DCC2020-master/codes/MGANet_test_AI37.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
from numpy import *
# from scipy.misc import imresize
from skimage.measure import compare_ssim
import cv2
import glob
import time
import os
import argparse
import Net.MGANet as MGANet
import torch
import copy
def yuv_import(filename, dims ,startfrm,numframe):
fp = open(filename, 'rb')
frame_size = np.prod(dims) * 3 / 2
fp.seek(0, 2)
ps = fp.tell()
totalfrm = int(ps // frame_size)
d00 = dims[0] // 2
d01 = dims[1] // 2
assert startfrm+numframe<=totalfrm
Y = np.zeros(shape=(numframe, 1,dims[0], dims[1]), dtype=np.uint8, order='C')
U = np.zeros(shape=(numframe, 1,d00, d01),dtype= np.uint8, order='C')
V = np.zeros(shape=(numframe, 1,d00, d01),dtype= np.uint8, order='C')
fp.seek(int(frame_size * startfrm), 0)
for i in range(startfrm,startfrm+numframe):
for m in range(dims[0]):
for n in range(dims[1]):
Y[i-startfrm,0, m, n] = ord(fp.read(1))
for m in range(d00):
for n in range(d01):
U[i-startfrm,0, m, n] = ord(fp.read(1))
for m in range(d00):
for n in range(d01):
V[i-startfrm,0, m, n] = ord(fp.read(1))
fp.close()
Y = Y.astype(np.float32)
U = U.astype(np.float32)
V = V.astype(np.float32)
return Y, U, V
def get_w_h(filename):
width = int((filename.split('x')[0]).split('_')[-1])
height = int((filename.split('x')[1]).split('_')[0])
return (height,width)
def get_data(one_filename,video_index,num_frame,startfrm_position):
one_filename_length = len(one_filename)
data_Y = []
for i in range(one_filename_length+1):
if i == 0:
data_37_filename = np.sort(glob.glob(one_filename[i]+'/*.yuv'))
data_37_filename_length = len(data_37_filename )
for i_0 in range(video_index,video_index+1):
file_name = data_37_filename[i_0]
dims = get_w_h(filename=file_name)
data_37_filename_Y,data_37_filename_U,data_37_filename_V = yuv_import(filename=file_name, dims=dims ,startfrm=startfrm_position,numframe=num_frame)
data_Y.append(data_37_filename_Y)
if i == 1:
mask_37_filename = np.sort(glob.glob(one_filename[i] + '/*.yuv'))
mask_37_filename_length = len(mask_37_filename)
for i_1 in range(video_index,video_index+1):
file_name = mask_37_filename[i_1]
dims = get_w_h(filename=file_name)
mask_37_filename_Y, mask_37_filename_U, mask_37_filename_V = yuv_import(filename=file_name, dims=dims,startfrm=startfrm_position, numframe=num_frame)
data_Y.append(mask_37_filename_Y)
if i == 2:
label_37_filename = np.sort(glob.glob('../test_yuv/label/' + '*.yuv'))
label_37_filename_length = len(label_37_filename)
for i_2 in range(video_index,video_index+1):
file_name = label_37_filename[i_2]
dims = get_w_h(filename=file_name)
label_37_filename_Y, label_37_filename_U, label_37_filename_V = yuv_import(filename=file_name, dims=dims,startfrm=startfrm_position, numframe=num_frame)
data_Y.append(label_37_filename_Y)
return data_Y
def test_batch(data_Y, start, batch_size=1):
data_pre = (data_Y[0][start-1:start,...])/255.0
data_cur = data_Y[0][start:start+1,...]/255.0
data_aft = data_Y[0][start+1:start+2,...]/255.0
mask = data_Y[1][start:start+1,...]/255.0
label = data_Y[2][start:start+1,...]
start+=1
return data_pre,data_cur,data_aft,mask,label,start
def PSNR(img1, img2):
mse = np.mean( (img1.astype(np.float32) - img2.astype(np.float32)) ** 2 ).astype(np.float32)
if mse == 0:
return 100
PIXEL_MAX = 255.0
return 20 * math.log10(PIXEL_MAX / math.sqrt(mse))
def image_test(one_filename,net_G,patch_size=[128,128],f_txt=None,opt=None):
ave_gain_psnr =0.
ave_psnr_predict =0.
ave_psnr_data =0.
video_num=opt.video_nums
for video_index in range(video_num):
data_37_filename = np.sort(glob.glob(one_filename[0]+'/*.yuv'))
data_Y = get_data(one_filename,video_index=video_index,num_frame=opt.frame_nums+5,startfrm_position=opt.startfrm_position)
start =1
psnr_gain_sum = 0
psnr_pre_gt_sum=0
psnr_data_gt_sum=0
nums =opt.frame_nums
for itr in range(0, nums):
data_pre, data_cur, data_aft, mask, label, start = test_batch(data_Y=data_Y, start=start, batch_size=1)
height = data_pre.shape[2]
width = data_pre.shape[3]
data_pre_value_patch = torch.from_numpy(data_pre).float().cuda()
data_cur_value_patch = torch.from_numpy(data_cur).float().cuda()
data_aft_value_patch = torch.from_numpy(data_aft).float().cuda()
data_mask_value_patch = torch.from_numpy(mask).float().cuda()
start_time = time.time()
fake_image = net_G(data_pre_value_patch,data_cur_value_patch,data_aft_value_patch,data_mask_value_patch)
end_time=time.time()
fake_image_numpy = fake_image.detach().cpu().numpy()
fake_image_numpy = np.squeeze(fake_image_numpy)*255.0
finally_image=np.squeeze(fake_image_numpy)
mask_image = np.squeeze(mask)*255.
os.makedirs(opt.result_path+'/result_enhanced_data/%02d'%(video_index+1),exist_ok = True)
os.makedirs(opt.result_path+'/result_mask/%02d'%(video_index+1),exist_ok = True)
os.makedirs(opt.result_path+'/result_label/%02d'%(video_index+1),exist_ok = True)
os.makedirs(opt.result_path+'/result_compression_data/%02d'%(video_index+1),exist_ok = True)
cv2.imwrite(opt.result_path+'/result_enhanced_data/%02d/%02d.png'%(video_index+1,itr+2),finally_image.astype(np.uint8))
cv2.imwrite(opt.result_path+'/result_mask/%02d/%02d.png'%(video_index+1,itr+2),mask_image.astype(np.uint8))
data_cur_image = (np.squeeze(data_cur)*255.0).astype(np.float32)
label = np.squeeze(label).astype(np.float32)
cv2.imwrite(opt.result_path+'/result_label/%02d/%02d.png'%(video_index+1,itr+2),label.astype(np.uint8))
cv2.imwrite(opt.result_path+'/result_compression_data/%02d/%02d.png'%(video_index+1,itr+2),data_cur_image.astype(np.uint8))
psnr_pre_gt = PSNR(finally_image, label)
psnr_data_gt = PSNR(data_cur_image, label)
psnr_gain = psnr_pre_gt - psnr_data_gt
psnr_gain_sum +=psnr_gain
psnr_pre_gt_sum+=psnr_pre_gt
psnr_data_gt_sum+=psnr_data_gt
print('psnr_gain:%.05f'%(psnr_gain))
print('psnr_predict:{:.04f} psnr_anchor:{:.04f} psnr_gain:{:.04f}'.format(psnr_pre_gt,psnr_data_gt,psnr_gain),file=f_txt)
print( data_37_filename[video_index])
print('video_index:{:2d} psnr_predict_average:{:.04f} psnr_data_average:{:.04f} psnr_gain_average:{:.04f}'.format(video_index,psnr_pre_gt_sum/nums,psnr_data_gt_sum/nums,psnr_gain_sum/nums),file=f_txt)
print('{}'.format(data_37_filename[video_index]),file=f_txt)
f_txt.write('\r\n')
ave_gain_psnr+=psnr_gain_sum/nums
ave_psnr_predict +=psnr_pre_gt_sum/nums
ave_psnr_data +=psnr_data_gt_sum/nums
print(' average_psnr_predict:{:.04f} average_psnr_anchor:{:.04f} average_psnr_gain:{:0.4f}'.format(ave_psnr_predict/video_num,ave_psnr_data/video_num,ave_gain_psnr/video_num))
print(' average_psnr_predict:{:.04f} average_psnr_anchor:{:.04f} average_psnr_gain:{:0.4f}'.format(ave_psnr_predict/video_num,ave_psnr_data/video_num,ave_gain_psnr/video_num), file=f_txt)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="MGANet_test")
parser.add_argument('--net_G', default='../model/model_epoch_AI37.pth',help="add checkpoint")
parser.add_argument("--gpu_id", default=0, type=int, help="gpu ids (default: 0)")
parser.add_argument("--video_nums", default=1, type=int, help="Videos number (default: 0)")
parser.add_argument("--frame_nums", default=29, type=int, help="frame number of the video to test (default: 90)")
parser.add_argument("--startfrm_position", default=9, type=int, help="start frame position in one video (default: 0)")
parser.add_argument("--is_training", default=False, type=bool, help="train or test mode")
parser.add_argument("--result_path", default='./result_AI37/', type=str, help="store results")
opts = parser.parse_args()
torch.cuda.set_device(opts.gpu_id)
txt_name = './MGANet_test_data_AI37.txt'
if os.path.isfile(txt_name):
f = open(txt_name, 'w+')
else:
os.mknod(txt_name)
f = open(txt_name, 'w+')
one_filename = np.sort(glob.glob('../test_yuv/AI37/' + '*'))
print(one_filename)
patch_size =[240,416]
net_G = MGANet.Gen_Guided_UNet(batchNorm=False,input_size=patch_size,is_training=opts.is_training)
net_G.eval()
net_G.load_state_dict(torch.load(opts.net_G,map_location=lambda storage, loc: storage.cuda(opts.gpu_id)))
print('....')
net_G.cuda()
image_test(one_filename=one_filename,net_G=net_G,patch_size=patch_size,f_txt = f,opt = opts)
f.close()
| 9,464 | 41.443946 | 209 | py |
MGANet-DCC2020 | MGANet-DCC2020-master/codes/LSTM/functional.py | from functools import partial
import torch
import torch.nn.functional as F
from torch.nn._functions.thnn import rnnFusedPointwise as fusedBackend
from .utils import _single, _pair, _triple
def RNNReLUCell(input, hidden, w_ih, w_hh, b_ih=None, b_hh=None, linear_func=None):
""" Copied from torch.nn._functions.rnn and modified """
if linear_func is None:
linear_func = F.linear
hy = F.relu(linear_func(input, w_ih, b_ih) + linear_func(hidden, w_hh, b_hh))
return hy
def RNNTanhCell(input, hidden, w_ih, w_hh, b_ih=None, b_hh=None, linear_func=None):
""" Copied from torch.nn._functions.rnn and modified """
if linear_func is None:
linear_func = F.linear
hy = F.tanh(linear_func(input, w_ih, b_ih) + linear_func(hidden, w_hh, b_hh))
return hy
def LSTMCell(input, hidden, w_ih, w_hh, b_ih=None, b_hh=None, linear_func=None):
""" Copied from torch.nn._functions.rnn and modified """
if linear_func is None:
linear_func = F.linear
if input.is_cuda and linear_func is F.linear:
igates = linear_func(input, w_ih)
hgates = linear_func(hidden[0], w_hh)
state = fusedBackend.LSTMFused.apply
return state(igates, hgates, hidden[1]) if b_ih is None else state(igates, hgates, hidden[1], b_ih, b_hh)
hx, cx = hidden
gates = linear_func(input, w_ih, b_ih) + linear_func(hx, w_hh, b_hh)
ingate, forgetgate, cellgate, outgate = gates.chunk(4, 1)
ingate = F.sigmoid(ingate)
forgetgate = F.sigmoid(forgetgate)
cellgate = F.tanh(cellgate)
outgate = F.sigmoid(outgate)
cy = (forgetgate * cx) + (ingate * cellgate)
hy = outgate * F.tanh(cy)
return hy, cy
def PeepholeLSTMCell(input, hidden, w_ih, w_hh, w_pi, w_pf, w_po,
b_ih=None, b_hh=None, linear_func=None):
if linear_func is None:
linear_func = F.linear
hx, cx = hidden
gates = linear_func(input, w_ih, b_ih) + linear_func(hx, w_hh, b_hh)
ingate, forgetgate, cellgate, outgate = gates.chunk(4, 1)
ingate += linear_func(cx, w_pi)
forgetgate += linear_func(cx, w_pf)
ingate = F.sigmoid(ingate)
forgetgate = F.sigmoid(forgetgate)
cellgate = F.tanh(cellgate)
cy = (forgetgate * cx) + (ingate * cellgate)
outgate += linear_func(cy, w_po)
outgate = F.sigmoid(outgate)
hy = outgate * F.tanh(cy)
return hy, cy
def GRUCell(input, hidden, w_ih, w_hh, b_ih=None, b_hh=None, linear_func=None):
""" Copied from torch.nn._functions.rnn and modified """
if linear_func is None:
linear_func = F.linear
if input.is_cuda and linear_func is F.linear:
gi = linear_func(input, w_ih)
gh = linear_func(hidden, w_hh)
state = fusedBackend.GRUFused.apply
return state(gi, gh, hidden) if b_ih is None else state(gi, gh, hidden, b_ih, b_hh)
gi = linear_func(input, w_ih, b_ih)
gh = linear_func(hidden, w_hh, b_hh)
i_r, i_i, i_n = gi.chunk(3, 1)
h_r, h_i, h_n = gh.chunk(3, 1)
resetgate = F.sigmoid(i_r + h_r)
inputgate = F.sigmoid(i_i + h_i)
newgate = F.tanh(i_n + resetgate * h_n)
hy = newgate + inputgate * (hidden - newgate)
return hy
def StackedRNN(inners, num_layers, lstm=False, dropout=0, train=True):
""" Copied from torch.nn._functions.rnn and modified """
num_directions = len(inners)
total_layers = num_layers * num_directions
def forward(input, hidden, weight, batch_sizes):
assert(len(weight) == total_layers)
next_hidden = []
ch_dim = input.dim() - weight[0][0].dim() + 1
if lstm:
hidden = list(zip(*hidden))
for i in range(num_layers):
all_output = []
for j, inner in enumerate(inners):
l = i * num_directions + j
hy, output = inner(input, hidden[l], weight[l], batch_sizes)
next_hidden.append(hy)
all_output.append(output)
input = torch.cat(all_output, ch_dim)
if dropout != 0 and i < num_layers - 1:
input = F.dropout(input, p=dropout, training=train, inplace=False)
if lstm:
next_h, next_c = zip(*next_hidden)
next_hidden = (
torch.cat(next_h, 0).view(total_layers, *next_h[0].size()),
torch.cat(next_c, 0).view(total_layers, *next_c[0].size())
)
else:
next_hidden = torch.cat(next_hidden, 0).view(
total_layers, *next_hidden[0].size())
return next_hidden, input
return forward
def Recurrent(inner, reverse=False):
""" Copied from torch.nn._functions.rnn without any modification """
def forward(input, hidden, weight, batch_sizes):
output = []
steps = range(input.size(0) - 1, -1, -1) if reverse else range(input.size(0))
for i in steps:
hidden = inner(input[i], hidden, *weight)
# hack to handle LSTM
output.append(hidden[0] if isinstance(hidden, tuple) else hidden)
if reverse:
output.reverse()
output = torch.cat(output, 0).view(input.size(0), *output[0].size())
return hidden, output
return forward
def variable_recurrent_factory(inner, reverse=False):
""" Copied from torch.nn._functions.rnn without any modification """
if reverse:
return VariableRecurrentReverse(inner)
else:
return VariableRecurrent(inner)
def VariableRecurrent(inner):
""" Copied from torch.nn._functions.rnn without any modification """
def forward(input, hidden, weight, batch_sizes):
output = []
input_offset = 0
last_batch_size = batch_sizes[0]
hiddens = []
flat_hidden = not isinstance(hidden, tuple)
if flat_hidden:
hidden = (hidden,)
for batch_size in batch_sizes:
step_input = input[input_offset:input_offset + batch_size]
input_offset += batch_size
dec = last_batch_size - batch_size
if dec > 0:
hiddens.append(tuple(h[-dec:] for h in hidden))
hidden = tuple(h[:-dec] for h in hidden)
last_batch_size = batch_size
if flat_hidden:
hidden = (inner(step_input, hidden[0], *weight),)
else:
hidden = inner(step_input, hidden, *weight)
output.append(hidden[0])
hiddens.append(hidden)
hiddens.reverse()
hidden = tuple(torch.cat(h, 0) for h in zip(*hiddens))
assert hidden[0].size(0) == batch_sizes[0]
if flat_hidden:
hidden = hidden[0]
output = torch.cat(output, 0)
return hidden, output
return forward
def VariableRecurrentReverse(inner):
""" Copied from torch.nn._functions.rnn without any modification """
def forward(input, hidden, weight, batch_sizes):
output = []
input_offset = input.size(0)
last_batch_size = batch_sizes[-1]
initial_hidden = hidden
flat_hidden = not isinstance(hidden, tuple)
if flat_hidden:
hidden = (hidden,)
initial_hidden = (initial_hidden,)
hidden = tuple(h[:batch_sizes[-1]] for h in hidden)
for i in reversed(range(len(batch_sizes))):
batch_size = batch_sizes[i]
inc = batch_size - last_batch_size
if inc > 0:
hidden = tuple(torch.cat((h, ih[last_batch_size:batch_size]), 0)
for h, ih in zip(hidden, initial_hidden))
last_batch_size = batch_size
step_input = input[input_offset - batch_size:input_offset]
input_offset -= batch_size
if flat_hidden:
hidden = (inner(step_input, hidden[0], *weight),)
else:
hidden = inner(step_input, hidden, *weight)
output.append(hidden[0])
output.reverse()
output = torch.cat(output, 0)
if flat_hidden:
hidden = hidden[0]
return hidden, output
return forward
def ConvNdWithSamePadding(convndim=2, stride=1, dilation=1, groups=1):
def forward(input, w, b=None):
if convndim == 1:
ntuple = _single
elif convndim == 2:
ntuple = _pair
elif convndim == 3:
ntuple = _triple
else:
raise ValueError('convndim must be 1, 2, or 3, but got {}'.format(convndim))
if input.dim() != convndim + 2:
raise RuntimeError('Input dim must be {}, bot got {}'.format(convndim + 2, input.dim()))
if w.dim() != convndim + 2:
raise RuntimeError('w must be {}, bot got {}'.format(convndim + 2, w.dim()))
insize = input.shape[2:]
kernel_size = w.shape[2:]
_stride = ntuple(stride)
_dilation = ntuple(dilation)
ps = [(i + 1 - h + s * (h - 1) + d * (k - 1)) // 2
for h, k, s, d in list(zip(insize, kernel_size, _stride, _dilation))[::-1] for i in range(2)]
# Padding to make the output shape to have the same shape as the input
input = F.pad(input, ps, 'constant', 0)
return getattr(F, 'conv{}d'.format(convndim))(
input, w, b, stride=_stride, padding=ntuple(0), dilation=_dilation, groups=groups)
return forward
def _conv_cell_helper(mode, convndim=2, stride=1, dilation=1, groups=1):
linear_func = ConvNdWithSamePadding(convndim=convndim, stride=stride, dilation=dilation, groups=groups)
if mode == 'RNN_RELU':
cell = partial(RNNReLUCell, linear_func=linear_func)
elif mode == 'RNN_TANH':
cell = partial(RNNTanhCell, linear_func=linear_func)
elif mode == 'LSTM':
cell = partial(LSTMCell, linear_func=linear_func)
elif mode == 'GRU':
cell = partial(GRUCell, linear_func=linear_func)
elif mode == 'PeepholeLSTM':
cell = partial(PeepholeLSTMCell, linear_func=linear_func)
else:
raise Exception('Unknown mode: {}'.format(mode))
return cell
def AutogradConvRNN(
mode, num_layers=1, batch_first=False,
dropout=0, train=True, bidirectional=False, variable_length=False,
convndim=2, stride=1, dilation=1, groups=1):
""" Copied from torch.nn._functions.rnn and modified """
cell = _conv_cell_helper(mode, convndim=convndim, stride=stride, dilation=dilation, groups=groups)
rec_factory = variable_recurrent_factory if variable_length else Recurrent
if bidirectional:
layer = (rec_factory(cell), rec_factory(cell, reverse=True))
else:
layer = (rec_factory(cell),)
func = StackedRNN(layer, num_layers, (mode in ('LSTM', 'PeepholeLSTM')), dropout=dropout, train=train)
def forward(input, weight, hidden, batch_sizes):
if batch_first and batch_sizes is None:
input = input.transpose(0, 1)
nexth, output = func(input, hidden, weight, batch_sizes)
if batch_first and batch_sizes is None:
output = output.transpose(0, 1)
return output, nexth
return forward
| 11,086 | 33.755486 | 113 | py |
MGANet-DCC2020 | MGANet-DCC2020-master/codes/LSTM/utils.py | import collections
from itertools import repeat
""" Copied from torch.nn.modules.utils """
def _ntuple(n):
def parse(x):
if isinstance(x, collections.Iterable):
return x
return tuple(repeat(x, n))
return parse
_single = _ntuple(1)
_pair = _ntuple(2)
_triple = _ntuple(3)
_quadruple = _ntuple(4)
| 337 | 15.9 | 47 | py |
MGANet-DCC2020 | MGANet-DCC2020-master/codes/LSTM/module.py | import math
from typing import Union, Sequence
import torch
from torch.nn import Parameter
from torch.nn.utils.rnn import PackedSequence
from .functional import AutogradConvRNN, _conv_cell_helper
from .utils import _single, _pair, _triple
class ConvNdRNNBase(torch.nn.Module):
def __init__(self,
mode: str,
in_channels: int,
out_channels: int,
kernel_size: Union[int, Sequence[int]],
num_layers: int=1,
bias: bool=True,
batch_first: bool=False,
dropout: float=0.,
bidirectional: bool=False,
convndim: int=2,
stride: Union[int, Sequence[int]]=1,
dilation: Union[int, Sequence[int]]=1,
groups: int=1):
super().__init__()
self.mode = mode
self.in_channels = in_channels
self.out_channels = out_channels
self.num_layers = num_layers
self.bias = bias
self.batch_first = batch_first
self.dropout = dropout
self.bidirectional = bidirectional
self.convndim = convndim
if convndim == 1:
ntuple = _single
elif convndim == 2:
ntuple = _pair
elif convndim == 3:
ntuple = _triple
else:
raise ValueError('convndim must be 1, 2, or 3, but got {}'.format(convndim))
self.kernel_size = ntuple(kernel_size)
self.stride = ntuple(stride)
self.dilation = ntuple(dilation)
self.groups = groups
num_directions = 2 if bidirectional else 1
if mode in ('LSTM', 'PeepholeLSTM'):
gate_size = 4 * out_channels
elif mode == 'GRU':
gate_size = 3 * out_channels
else:
gate_size = out_channels
self._all_weights = []
for layer in range(num_layers):
for direction in range(num_directions):
layer_input_size = in_channels if layer == 0 else out_channels * num_directions
w_ih = Parameter(torch.Tensor(gate_size, layer_input_size // groups, *self.kernel_size))
w_hh = Parameter(torch.Tensor(gate_size, out_channels // groups, *self.kernel_size))
b_ih = Parameter(torch.Tensor(gate_size))
b_hh = Parameter(torch.Tensor(gate_size))
if mode == 'PeepholeLSTM':
w_pi = Parameter(torch.Tensor(out_channels, out_channels // groups, *self.kernel_size))
w_pf = Parameter(torch.Tensor(out_channels, out_channels // groups, *self.kernel_size))
w_po = Parameter(torch.Tensor(out_channels, out_channels // groups, *self.kernel_size))
layer_params = (w_ih, w_hh, w_pi, w_pf, w_po, b_ih, b_hh)
param_names = ['weight_ih_l{}{}', 'weight_hh_l{}{}',
'weight_pi_l{}{}', 'weight_pf_l{}{}', 'weight_po_l{}{}']
else:
layer_params = (w_ih, w_hh, b_ih, b_hh)
param_names = ['weight_ih_l{}{}', 'weight_hh_l{}{}']
if bias:
param_names += ['bias_ih_l{}{}', 'bias_hh_l{}{}']
suffix = '_reverse' if direction == 1 else ''
param_names = [x.format(layer, suffix) for x in param_names]
for name, param in zip(param_names, layer_params):
setattr(self, name, param)
self._all_weights.append(param_names)
self.reset_parameters()
def reset_parameters(self):
stdv = 1.0 / math.sqrt(self.out_channels)
for weight in self.parameters():
weight.data.uniform_(-stdv, stdv)
def check_forward_args(self, input, hidden, batch_sizes):
is_input_packed = batch_sizes is not None
expected_input_dim = (2 if is_input_packed else 3) + self.convndim
if input.dim() != expected_input_dim:
raise RuntimeError(
'input must have {} dimensions, got {}'.format(
expected_input_dim, input.dim()))
ch_dim = 1 if is_input_packed else 2
if self.in_channels != input.size(ch_dim):
raise RuntimeError(
'input.size({}) must be equal to in_channels . Expected {}, got {}'.format(
ch_dim, self.in_channels, input.size(ch_dim)))
if is_input_packed:
mini_batch = int(batch_sizes[0])
else:
mini_batch = input.size(0) if self.batch_first else input.size(1)
num_directions = 2 if self.bidirectional else 1
expected_hidden_size = (self.num_layers * num_directions,
mini_batch, self.out_channels) + input.shape[ch_dim + 1:]
def check_hidden_size(hx, expected_hidden_size, msg='Expected hidden size {}, got {}'):
if tuple(hx.size()) != expected_hidden_size:
raise RuntimeError(msg.format(expected_hidden_size, tuple(hx.size())))
if self.mode in ('LSTM', 'PeepholeLSTM'):
check_hidden_size(hidden[0], expected_hidden_size,
'Expected hidden[0] size {}, got {}')
check_hidden_size(hidden[1], expected_hidden_size,
'Expected hidden[1] size {}, got {}')
else:
check_hidden_size(hidden, expected_hidden_size)
def forward(self, input, hx=None):
is_packed = isinstance(input, PackedSequence)
if is_packed:
input, batch_sizes = input
max_batch_size = batch_sizes[0]
insize = input.shape[2:]
else:
batch_sizes = None
max_batch_size = input.size(0) if self.batch_first else input.size(1)
insize = input.shape[3:]
if hx is None:
num_directions = 2 if self.bidirectional else 1
hx = input.new_zeros(self.num_layers * num_directions, max_batch_size, self.out_channels,
*insize, requires_grad=False)
if self.mode in ('LSTM', 'PeepholeLSTM'):
hx = (hx, hx)
self.check_forward_args(input, hx, batch_sizes)
func = AutogradConvRNN(
self.mode,
num_layers=self.num_layers,
batch_first=self.batch_first,
dropout=self.dropout,
train=self.training,
bidirectional=self.bidirectional,
variable_length=batch_sizes is not None,
convndim=self.convndim,
stride=self.stride,
dilation=self.dilation,
groups=self.groups
)
output, hidden = func(input, self.all_weights, hx, batch_sizes)
if is_packed:
output = PackedSequence(output, batch_sizes)
return output, hidden
def extra_repr(self):
s = ('{in_channels}, {out_channels}, kernel_size={kernel_size}'
', stride={stride}')
if self.dilation != (1,) * len(self.dilation):
s += ', dilation={dilation}'
if self.groups != 1:
s += ', groups={groups}'
if self.num_layers != 1:
s += ', num_layers={num_layers}'
if self.bias is not True:
s += ', bias={bias}'
if self.batch_first is not False:
s += ', batch_first={batch_first}'
if self.dropout != 0:
s += ', dropout={dropout}'
if self.bidirectional is not False:
s += ', bidirectional={bidirectional}'
return s.format(**self.__dict__)
def __setstate__(self, d):
super(ConvNdRNNBase, self).__setstate__(d)
if 'all_weights' in d:
self._all_weights = d['all_weights']
if isinstance(self._all_weights[0][0], str):
return
num_layers = self.num_layers
num_directions = 2 if self.bidirectional else 1
self._all_weights = []
for layer in range(num_layers):
for direction in range(num_directions):
suffix = '_reverse' if direction == 1 else ''
if self.mode == 'PeepholeLSTM':
weights = ['weight_ih_l{}{}', 'weight_hh_l{}{}',
'weight_pi_l{}{}', 'weight_pf_l{}{}', 'weight_po_l{}{}',
'bias_ih_l{}{}', 'bias_hh_l{}{}']
else:
weights = ['weight_ih_l{}{}', 'weight_hh_l{}{}',
'bias_ih_l{}{}', 'bias_hh_l{}{}']
weights = [x.format(layer, suffix) for x in weights]
if self.bias:
self._all_weights += [weights]
else:
self._all_weights += [weights[:len(weights) // 2]]
@property
def all_weights(self):
return [[getattr(self, weight) for weight in weights] for weights in self._all_weights]
class Conv1dRNN(ConvNdRNNBase):
def __init__(self,
in_channels: int,
out_channels: int,
kernel_size: Union[int, Sequence[int]],
nonlinearity: str='tanh',
num_layers: int=1,
bias: bool=True,
batch_first: bool=False,
dropout: float=0.,
bidirectional: bool=False,
stride: Union[int, Sequence[int]]=1,
dilation: Union[int, Sequence[int]]=1,
groups: int=1):
if nonlinearity == 'tanh':
mode = 'RNN_TANH'
elif nonlinearity == 'relu':
mode = 'RNN_RELU'
else:
raise ValueError("Unknown nonlinearity '{}'".format(nonlinearity))
super().__init__(
mode=mode,
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
num_layers=num_layers,
bias=bias,
batch_first=batch_first,
dropout=dropout,
bidirectional=bidirectional,
convndim=1,
stride=stride,
dilation=dilation,
groups=groups)
class Conv1dPeepholeLSTM(ConvNdRNNBase):
def __init__(self,
in_channels: int,
out_channels: int,
kernel_size: Union[int, Sequence[int]],
num_layers: int=1,
bias: bool=True,
batch_first: bool=False,
dropout: float=0.,
bidirectional: bool=False,
stride: Union[int, Sequence[int]]=1,
dilation: Union[int, Sequence[int]]=1,
groups: int=1):
super().__init__(
mode='PeepholeLSTM',
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
num_layers=num_layers,
bias=bias,
batch_first=batch_first,
dropout=dropout,
bidirectional=bidirectional,
convndim=1,
stride=stride,
dilation=dilation,
groups=groups)
class Conv1dLSTM(ConvNdRNNBase):
def __init__(self,
in_channels: int,
out_channels: int,
kernel_size: Union[int, Sequence[int]],
num_layers: int=1,
bias: bool=True,
batch_first: bool=False,
dropout: float=0.,
bidirectional: bool=False,
stride: Union[int, Sequence[int]]=1,
dilation: Union[int, Sequence[int]]=1,
groups: int=1):
super().__init__(
mode='LSTM',
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
num_layers=num_layers,
bias=bias,
batch_first=batch_first,
dropout=dropout,
bidirectional=bidirectional,
convndim=1,
stride=stride,
dilation=dilation,
groups=groups)
class Conv1dGRU(ConvNdRNNBase):
def __init__(self,
in_channels: int,
out_channels: int,
kernel_size: Union[int, Sequence[int]],
num_layers: int=1,
bias: bool=True,
batch_first: bool=False,
dropout: float=0.,
bidirectional: bool=False,
stride: Union[int, Sequence[int]]=1,
dilation: Union[int, Sequence[int]]=1,
groups: int=1):
super().__init__(
mode='GRU',
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
num_layers=num_layers,
bias=bias,
batch_first=batch_first,
dropout=dropout,
bidirectional=bidirectional,
convndim=1,
stride=stride,
dilation=dilation,
groups=groups)
class Conv2dRNN(ConvNdRNNBase):
def __init__(self,
in_channels: int,
out_channels: int,
kernel_size: Union[int, Sequence[int]],
nonlinearity: str='tanh',
num_layers: int=1,
bias: bool=True,
batch_first: bool=False,
dropout: float=0.,
bidirectional: bool=False,
stride: Union[int, Sequence[int]]=1,
dilation: Union[int, Sequence[int]]=1,
groups: int=1):
if nonlinearity == 'tanh':
mode = 'RNN_TANH'
elif nonlinearity == 'relu':
mode = 'RNN_RELU'
else:
raise ValueError("Unknown nonlinearity '{}'".format(nonlinearity))
super().__init__(
mode=mode,
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
num_layers=num_layers,
bias=bias,
batch_first=batch_first,
dropout=dropout,
bidirectional=bidirectional,
convndim=2,
stride=stride,
dilation=dilation,
groups=groups)
class Conv2dLSTM(ConvNdRNNBase):
def __init__(self,
in_channels: int,
out_channels: int,
kernel_size: Union[int, Sequence[int]],
num_layers: int=1,
bias: bool=True,
batch_first: bool=False,
dropout: float=0.,
bidirectional: bool=False,
stride: Union[int, Sequence[int]]=1,
dilation: Union[int, Sequence[int]]=1,
groups: int=1):
super().__init__(
mode='LSTM',
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
num_layers=num_layers,
bias=bias,
batch_first=batch_first,
dropout=dropout,
bidirectional=bidirectional,
convndim=2,
stride=stride,
dilation=dilation,
groups=groups)
class Conv2dPeepholeLSTM(ConvNdRNNBase):
def __init__(self,
in_channels: int,
out_channels: int,
kernel_size: Union[int, Sequence[int]],
num_layers: int=1,
bias: bool=True,
batch_first: bool=False,
dropout: float=0.,
bidirectional: bool=False,
stride: Union[int, Sequence[int]]=1,
dilation: Union[int, Sequence[int]]=1,
groups: int=1):
super().__init__(
mode='PeepholeLSTM',
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
num_layers=num_layers,
bias=bias,
batch_first=batch_first,
dropout=dropout,
bidirectional=bidirectional,
convndim=2,
stride=stride,
dilation=dilation,
groups=groups)
class Conv2dGRU(ConvNdRNNBase):
def __init__(self,
in_channels: int,
out_channels: int,
kernel_size: Union[int, Sequence[int]],
num_layers: int=1,
bias: bool=True,
batch_first: bool=False,
dropout: float=0.,
bidirectional: bool=False,
stride: Union[int, Sequence[int]]=1,
dilation: Union[int, Sequence[int]]=1,
groups: int=1):
super().__init__(
mode='GRU',
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
num_layers=num_layers,
bias=bias,
batch_first=batch_first,
dropout=dropout,
bidirectional=bidirectional,
convndim=2,
stride=stride,
dilation=dilation,
groups=groups)
class Conv3dRNN(ConvNdRNNBase):
def __init__(self,
in_channels: int,
out_channels: int,
kernel_size: Union[int, Sequence[int]],
nonlinearity: str='tanh',
num_layers: int=1,
bias: bool=True,
batch_first: bool=False,
dropout: float=0.,
bidirectional: bool=False,
stride: Union[int, Sequence[int]]=1,
dilation: Union[int, Sequence[int]]=1,
groups: int=1):
if nonlinearity == 'tanh':
mode = 'RNN_TANH'
elif nonlinearity == 'relu':
mode = 'RNN_RELU'
else:
raise ValueError("Unknown nonlinearity '{}'".format(nonlinearity))
super().__init__(
mode=mode,
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
num_layers=num_layers,
bias=bias,
batch_first=batch_first,
dropout=dropout,
bidirectional=bidirectional,
convndim=3,
stride=stride,
dilation=dilation,
groups=groups)
class Conv3dLSTM(ConvNdRNNBase):
def __init__(self,
in_channels: int,
out_channels: int,
kernel_size: Union[int, Sequence[int]],
num_layers: int=1,
bias: bool=True,
batch_first: bool=False,
dropout: float=0.,
bidirectional: bool=False,
stride: Union[int, Sequence[int]]=1,
dilation: Union[int, Sequence[int]]=1,
groups: int=1):
super().__init__(
mode='LSTM',
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
num_layers=num_layers,
bias=bias,
batch_first=batch_first,
dropout=dropout,
bidirectional=bidirectional,
convndim=3,
stride=stride,
dilation=dilation,
groups=groups)
class Conv3dPeepholeLSTM(ConvNdRNNBase):
def __init__(self,
in_channels: int,
out_channels: int,
kernel_size: Union[int, Sequence[int]],
num_layers: int=1,
bias: bool=True,
batch_first: bool=False,
dropout: float=0.,
bidirectional: bool=False,
stride: Union[int, Sequence[int]]=1,
dilation: Union[int, Sequence[int]]=1,
groups: int=1):
super().__init__(
mode='PeepholeLSTM',
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
num_layers=num_layers,
bias=bias,
batch_first=batch_first,
dropout=dropout,
bidirectional=bidirectional,
convndim=3,
stride=stride,
dilation=dilation,
groups=groups)
class Conv3dGRU(ConvNdRNNBase):
def __init__(self,
in_channels: int,
out_channels: int,
kernel_size: Union[int, Sequence[int]],
num_layers: int=1,
bias: bool=True,
batch_first: bool=False,
dropout: float=0.,
bidirectional: bool=False,
stride: Union[int, Sequence[int]]=1,
dilation: Union[int, Sequence[int]]=1,
groups: int=1):
super().__init__(
mode='GRU',
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
num_layers=num_layers,
bias=bias,
batch_first=batch_first,
dropout=dropout,
bidirectional=bidirectional,
convndim=3,
stride=stride,
dilation=dilation,
groups=groups)
class ConvRNNCellBase(torch.nn.Module):
def __init__(self,
mode: str,
in_channels: int,
out_channels: int,
kernel_size: Union[int, Sequence[int]],
bias: bool=True,
convndim: int=2,
stride: Union[int, Sequence[int]]=1,
dilation: Union[int, Sequence[int]]=1,
groups: int=1
):
super().__init__()
self.mode = mode
self.in_channels = in_channels
self.out_channels = out_channels
self.bias = bias
self.convndim = convndim
if convndim == 1:
ntuple = _single
elif convndim == 2:
ntuple = _pair
elif convndim == 3:
ntuple = _triple
else:
raise ValueError('convndim must be 1, 2, or 3, but got {}'.format(convndim))
self.kernel_size = ntuple(kernel_size)
self.stride = ntuple(stride)
self.dilation = ntuple(dilation)
self.groups = groups
if mode in ('LSTM', 'PeepholeLSTM'):
gate_size = 4 * out_channels
elif mode == 'GRU':
gate_size = 3 * out_channels
else:
gate_size = out_channels
self.weight_ih = Parameter(torch.Tensor(gate_size, in_channels // groups, *self.kernel_size))
self.weight_hh = Parameter(torch.Tensor(gate_size, out_channels // groups, *self.kernel_size))
if bias:
self.bias_ih = Parameter(torch.Tensor(gate_size))
self.bias_hh = Parameter(torch.Tensor(gate_size))
else:
self.register_parameter('bias_ih', None)
self.register_parameter('bias_hh', None)
if mode == 'PeepholeLSTM':
self.weight_pi = Parameter(torch.Tensor(out_channels, out_channels // groups, *self.kernel_size))
self.weight_pf = Parameter(torch.Tensor(out_channels, out_channels // groups, *self.kernel_size))
self.weight_po = Parameter(torch.Tensor(out_channels, out_channels // groups, *self.kernel_size))
self.reset_parameters()
def extra_repr(self):
s = ('{in_channels}, {out_channels}, kernel_size={kernel_size}'
', stride={stride}')
if self.dilation != (1,) * len(self.dilation):
s += ', dilation={dilation}'
if self.groups != 1:
s += ', groups={groups}'
if self.bias is not True:
s += ', bias={bias}'
if self.bidirectional is not False:
s += ', bidirectional={bidirectional}'
return s.format(**self.__dict__)
def check_forward_input(self, input):
if input.size(1) != self.in_channels:
raise RuntimeError(
"input has inconsistent channels: got {}, expected {}".format(
input.size(1), self.in_channels))
def check_forward_hidden(self, input, hx, hidden_label=''):
if input.size(0) != hx.size(0):
raise RuntimeError(
"Input batch size {} doesn't match hidden{} batch size {}".format(
input.size(0), hidden_label, hx.size(0)))
if hx.size(1) != self.out_channels:
raise RuntimeError(
"hidden{} has inconsistent hidden_size: got {}, expected {}".format(
hidden_label, hx.size(1), self.out_channels))
def reset_parameters(self):
stdv = 1.0 / math.sqrt(self.out_channels)
for weight in self.parameters():
weight.data.uniform_(-stdv, stdv)
def forward(self, input, hx=None):
self.check_forward_input(input)
if hx is None:
batch_size = input.size(0)
insize = input.shape[2:]
hx = input.new_zeros(batch_size, self.out_channels, *insize, requires_grad=False)
if self.mode in ('LSTM', 'PeepholeLSTM'):
hx = (hx, hx)
if self.mode in ('LSTM', 'PeepholeLSTM'):
self.check_forward_hidden(input, hx[0])
self.check_forward_hidden(input, hx[1])
else:
self.check_forward_hidden(input, hx)
cell = _conv_cell_helper(
self.mode,
convndim=self.convndim,
stride=self.stride,
dilation=self.dilation,
groups=self.groups)
if self.mode == 'PeepholeLSTM':
return cell(
input, hx,
self.weight_ih, self.weight_hh, self.weight_pi, self.weight_pf, self.weight_po,
self.bias_ih, self.bias_hh
)
else:
return cell(
input, hx,
self.weight_ih, self.weight_hh,
self.bias_ih, self.bias_hh,
)
class Conv1dRNNCell(ConvRNNCellBase):
def __init__(self,
in_channels: int,
out_channels: int,
kernel_size: Union[int, Sequence[int]],
nonlinearity: str='tanh',
bias: bool=True,
stride: Union[int, Sequence[int]]=1,
dilation: Union[int, Sequence[int]]=1,
groups: int=1
):
if nonlinearity == 'tanh':
mode = 'RNN_TANH'
elif nonlinearity == 'relu':
mode = 'RNN_RELU'
else:
raise ValueError("Unknown nonlinearity '{}'".format(nonlinearity))
super().__init__(
mode=mode,
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
bias=bias,
convndim=1,
stride=stride,
dilation=dilation,
groups=groups
)
class Conv1dLSTMCell(ConvRNNCellBase):
def __init__(self,
in_channels: int,
out_channels: int,
kernel_size: Union[int, Sequence[int]],
bias: bool=True,
stride: Union[int, Sequence[int]]=1,
dilation: Union[int, Sequence[int]]=1,
groups: int=1
):
super().__init__(
mode='LSTM',
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
bias=bias,
convndim=1,
stride=stride,
dilation=dilation,
groups=groups
)
class Conv1dPeepholeLSTMCell(ConvRNNCellBase):
def __init__(self,
in_channels: int,
out_channels: int,
kernel_size: Union[int, Sequence[int]],
bias: bool=True,
stride: Union[int, Sequence[int]]=1,
dilation: Union[int, Sequence[int]]=1,
groups: int=1
):
super().__init__(
mode='PeepholeLSTM',
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
bias=bias,
convndim=1,
stride=stride,
dilation=dilation,
groups=groups
)
class Conv1dGRUCell(ConvRNNCellBase):
def __init__(self,
in_channels: int,
out_channels: int,
kernel_size: Union[int, Sequence[int]],
bias: bool=True,
stride: Union[int, Sequence[int]]=1,
dilation: Union[int, Sequence[int]]=1,
groups: int=1
):
super().__init__(
mode='GRU',
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
bias=bias,
convndim=1,
stride=stride,
dilation=dilation,
groups=groups
)
class Conv2dRNNCell(ConvRNNCellBase):
def __init__(self,
in_channels: int,
out_channels: int,
kernel_size: Union[int, Sequence[int]],
nonlinearity: str='tanh',
bias: bool=True,
stride: Union[int, Sequence[int]]=1,
dilation: Union[int, Sequence[int]]=1,
groups: int=1
):
if nonlinearity == 'tanh':
mode = 'RNN_TANH'
elif nonlinearity == 'relu':
mode = 'RNN_RELU'
else:
raise ValueError("Unknown nonlinearity '{}'".format(nonlinearity))
super().__init__(
mode=mode,
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
bias=bias,
convndim=2,
stride=stride,
dilation=dilation,
groups=groups
)
class Conv2dLSTMCell(ConvRNNCellBase):
def __init__(self,
in_channels: int,
out_channels: int,
kernel_size: Union[int, Sequence[int]],
bias: bool=True,
stride: Union[int, Sequence[int]]=1,
dilation: Union[int, Sequence[int]]=1,
groups: int=1
):
super().__init__(
mode='LSTM',
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
bias=bias,
convndim=2,
stride=stride,
dilation=dilation,
groups=groups
)
class Conv2dPeepholeLSTMCell(ConvRNNCellBase):
def __init__(self,
in_channels: int,
out_channels: int,
kernel_size: Union[int, Sequence[int]],
bias: bool=True,
stride: Union[int, Sequence[int]]=1,
dilation: Union[int, Sequence[int]]=1,
groups: int=1
):
super().__init__(
mode='PeepholeLSTM',
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
bias=bias,
convndim=2,
stride=stride,
dilation=dilation,
groups=groups
)
class Conv2dGRUCell(ConvRNNCellBase):
def __init__(self,
in_channels: int,
out_channels: int,
kernel_size: Union[int, Sequence[int]],
bias: bool=True,
stride: Union[int, Sequence[int]]=1,
dilation: Union[int, Sequence[int]]=1,
groups: int=1
):
super().__init__(
mode='GRU',
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
bias=bias,
convndim=2,
stride=stride,
dilation=dilation,
groups=groups
)
class Conv3dRNNCell(ConvRNNCellBase):
def __init__(self,
in_channels: int,
out_channels: int,
kernel_size: Union[int, Sequence[int]],
nonlinearity: str='tanh',
bias: bool=True,
stride: Union[int, Sequence[int]]=1,
dilation: Union[int, Sequence[int]]=1,
groups: int=1
):
if nonlinearity == 'tanh':
mode = 'RNN_TANH'
elif nonlinearity == 'relu':
mode = 'RNN_RELU'
else:
raise ValueError("Unknown nonlinearity '{}'".format(nonlinearity))
super().__init__(
mode=mode,
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
bias=bias,
convndim=3,
stride=stride,
dilation=dilation,
groups=groups
)
class Conv3dLSTMCell(ConvRNNCellBase):
def __init__(self,
in_channels: int,
out_channels: int,
kernel_size: Union[int, Sequence[int]],
bias: bool=True,
stride: Union[int, Sequence[int]]=1,
dilation: Union[int, Sequence[int]]=1,
groups: int=1
):
super().__init__(
mode='LSTM',
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
bias=bias,
convndim=3,
stride=stride,
dilation=dilation,
groups=groups
)
class Conv3dPeepholeLSTMCell(ConvRNNCellBase):
def __init__(self,
in_channels: int,
out_channels: int,
kernel_size: Union[int, Sequence[int]],
bias: bool=True,
stride: Union[int, Sequence[int]]=1,
dilation: Union[int, Sequence[int]]=1,
groups: int=1
):
super().__init__(
mode='PeepholeLSTM',
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
bias=bias,
convndim=3,
stride=stride,
dilation=dilation,
groups=groups
)
class Conv3dGRUCell(ConvRNNCellBase):
def __init__(self,
in_channels: int,
out_channels: int,
kernel_size: Union[int, Sequence[int]],
bias: bool=True,
stride: Union[int, Sequence[int]]=1,
dilation: Union[int, Sequence[int]]=1,
groups: int=1
):
super().__init__(
mode='GRU',
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
bias=bias,
convndim=3,
stride=stride,
dilation=dilation,
groups=groups
)
| 35,171 | 33.789318 | 109 | py |
MGANet-DCC2020 | MGANet-DCC2020-master/codes/LSTM/BiConvLSTM.py | import torch.nn as nn
from torch.autograd import Variable
import torch
torch.cuda.set_device(0)
class BiConvLSTMCell(nn.Module):
def __init__(self, input_size, input_dim, hidden_dim, kernel_size, bias):
"""
Initialize ConvLSTM cell.
Parameters
----------
input_size: (int, int)
Height and width of input tensor as (height, width).
input_dim: int
Number of channels of input tensor.
hidden_dim: int
Number of channels of hidden state.
kernel_size: (int, int)
Size of the convolutional kernel.
bias: bool
Whether or not to add the bias.
"""
super(BiConvLSTMCell, self).__init__()
self.height, self.width = input_size
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.kernel_size = kernel_size
# NOTE: This keeps height and width the same
self.padding = kernel_size[0] // 2, kernel_size[1] // 2
self.bias = bias
self.conv = nn.Conv2d(in_channels=self.input_dim + self.hidden_dim,
out_channels=4 * self.hidden_dim,
kernel_size=self.kernel_size,
padding=self.padding,
bias=self.bias)
# TODO: we may want this to be different than the conv we use inside each cell
self.conv_concat = nn.Conv2d(in_channels=self.input_dim + self.hidden_dim,
out_channels=self.hidden_dim,
kernel_size=self.kernel_size,
padding=self.padding,
bias=self.bias)
def forward(self, input_tensor, cur_state):
h_cur, c_cur = cur_state
# print(input_tensor.shape,h_cur.shape)
combined = torch.cat([input_tensor, h_cur], dim=1) # concatenate along channel axis
# print('...',combined.shape)
combined_conv = self.conv(combined)
cc_i, cc_f, cc_o, cc_g = torch.split(combined_conv, self.hidden_dim, dim=1)
i = torch.sigmoid(cc_i)
f = torch.sigmoid(cc_f)
o = torch.sigmoid(cc_o)
g = torch.tanh(cc_g)
c_next = f * c_cur + i * g
h_next = o * torch.tanh(c_next)
return h_next, c_next
class BiConvLSTM(nn.Module):
def __init__(self, input_size, input_dim, hidden_dim, kernel_size, num_layers,
bias=True, return_all_layers=False):
super(BiConvLSTM, self).__init__()
self._check_kernel_size_consistency(kernel_size)
# Make sure that both `kernel_size` and `hidden_dim` are lists having len == num_layers
kernel_size = self._extend_for_multilayer(kernel_size, num_layers)
hidden_dim = self._extend_for_multilayer(hidden_dim, num_layers)
if not len(kernel_size) == len(hidden_dim) == num_layers:
raise ValueError('Inconsistent list length.')
self.height, self.width = input_size
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.kernel_size = kernel_size
self.num_layers = num_layers
self.bias = bias
self.return_all_layers = return_all_layers
cell_list = []
for i in range(0, self.num_layers):
cur_input_dim = self.input_dim if i == 0 else self.hidden_dim[i - 1]
cell_list.append(BiConvLSTMCell(input_size=(self.height, self.width),
input_dim=cur_input_dim,
hidden_dim=self.hidden_dim[i],
kernel_size=self.kernel_size[i],
bias=self.bias))
self.cell_list = nn.ModuleList(cell_list)
def forward(self, input_tensor):
hidden_state = self._init_hidden(batch_size=input_tensor.size(0), cuda=input_tensor.is_cuda)
layer_output_list = []
seq_len = input_tensor.size(1)
cur_layer_input = input_tensor
for layer_idx in range(self.num_layers):
backward_states = []
forward_states = []
output_inner = []
hb, cb = hidden_state[layer_idx]
# print('hb,cb',hb.shape,cb.shape)
for t in range(seq_len):
hb, cb = self.cell_list[layer_idx](input_tensor=cur_layer_input[:, seq_len - t - 1, :, :, :], cur_state=[hb, cb])
backward_states.append(hb)
hf, cf = hidden_state[layer_idx]
for t in range(seq_len):
hf, cf = self.cell_list[layer_idx](input_tensor=cur_layer_input[:, t, :, :, :], cur_state=[hf, cf])
# print('hf:',hf.shape)
forward_states.append(hf)
for t in range(seq_len):
h = self.cell_list[layer_idx].conv_concat(torch.cat((forward_states[t], backward_states[seq_len - t - 1]), dim=1))
# print('h',h.shape)
output_inner.append(h)
layer_output = torch.stack(output_inner, dim=1)
cur_layer_input = layer_output
layer_output_list.append(layer_output)
if not self.return_all_layers:
return layer_output_list[-1]
return layer_output_list
def _init_hidden(self, batch_size, cuda):
init_states = []
for i in range(self.num_layers):
if(cuda):
init_states.append((Variable(torch.zeros(batch_size, self.hidden_dim[i], self.height, self.width).cuda()).cuda(),
Variable(torch.zeros(batch_size, self.hidden_dim[i], self.height, self.width).cuda()).cuda()))
else:
init_states.append((Variable(torch.zeros(batch_size, self.hidden_dim[i], self.height, self.width).cuda()).cuda(),
Variable(torch.zeros(batch_size, self.hidden_dim[i], self.height, self.width).cuda()).cuda()))
return init_states
@staticmethod
def _check_kernel_size_consistency(kernel_size):
if not (isinstance(kernel_size, tuple) or
(isinstance(kernel_size, list) and all([isinstance(elem, tuple) for elem in kernel_size]))):
raise ValueError('`kernel_size` must be tuple or list of tuples')
@staticmethod
def _extend_for_multilayer(param, num_layers):
if not isinstance(param, list):
param = [param] * num_layers
return param | 6,521 | 39.259259 | 130 | py |
MGANet-DCC2020 | MGANet-DCC2020-master/codes/Net/net_view.py | from graphviz import Digraph
from torch.autograd import Variable
import torch
def make_dot(var, params=None):
""" Produces Graphviz representation of PyTorch autograd graph
Blue nodes are the Variables that require grad, orange are Tensors
saved for backward in torch.autograd.Function
Args:
var: output Variable
params: dict of (name, Variable) to add names to node that
require grad (TODO: make optional)
"""
if params is not None:
assert isinstance(params.values()[0], Variable)
param_map = {id(v): k for k, v in params.items()}
node_attr = dict(style='filled',
shape='box',
align='left',
fontsize='12',
ranksep='0.1',
height='0.2')
dot = Digraph(node_attr=node_attr, graph_attr=dict(size="12,12"))
seen = set()
def size_to_str(size):
return '('+(', ').join(['%d' % v for v in size])+')'
def add_nodes(var):
if var not in seen:
if torch.is_tensor(var):
dot.node(str(id(var)), size_to_str(var.size()), fillcolor='orange')
elif hasattr(var, 'variable'):
u = var.variable
name = param_map[id(u)] if params is not None else ''
node_name = '%s\n %s' % (name, size_to_str(u.size()))
dot.node(str(id(var)), node_name, fillcolor='lightblue')
else:
dot.node(str(id(var)), str(type(var).__name__))
seen.add(var)
if hasattr(var, 'next_functions'):
for u in var.next_functions:
if u[0] is not None:
dot.edge(str(id(u[0])), str(id(var)))
add_nodes(u[0])
if hasattr(var, 'saved_tensors'):
for t in var.saved_tensors:
dot.edge(str(id(t)), str(id(var)))
add_nodes(t)
add_nodes(var.grad_fn)
return dot
| 2,016 | 37.788462 | 83 | py |
MGANet-DCC2020 | MGANet-DCC2020-master/codes/Net/multiscaleloss.py | import torch
import torch.nn as nn
def EPE(input_image, target_image,L_model=None):
loss_L2 = L_model(input_image,target_image)
return loss_L2
# EPE_map = torch.norm(target_image-input_image,2,1)
# batch_size = EPE_map.size(0)
#
# if mean:
# return EPE_map.mean()
# else:
# return EPE_map.sum()/batch_size
def sparse_max_pool(input, size):
positive = (input > 0).float()
negative = (input < 0).float()
output = nn.functional.adaptive_max_pool2d(input * positive, size) - nn.functional.adaptive_max_pool2d(-input * negative, size)
return output
def multiscaleEPE(network_output, target_image, weights=None, L_model=None):
def one_scale(output, target, L_model):
b, _, h, w = output.size()
target_scaled = nn.functional.adaptive_avg_pool2d(target, (h, w))
return EPE(output, target_scaled, L_model)
if type(network_output) not in [tuple, list]:
network_output = [network_output]
if weights is None:
weights = [ 1.0/32,1.0/16.0, 1.0/8.0, 1.0/4.0, 1.0/2.0]
assert(len(weights) == len(network_output))
loss = 0
for output, weight in zip(network_output, weights):
loss += weight * one_scale(output, target_image,L_model)
return loss
| 1,280 | 26.255319 | 131 | py |
MGANet-DCC2020 | MGANet-DCC2020-master/codes/Net/MGANet.py | import torch
import torch.nn as nn
from torch.nn.init import kaiming_normal
from LSTM.BiConvLSTM import BiConvLSTM
def conv(batchNorm, in_planes, out_planes, kernel_size=3, stride=1):
if batchNorm:
return nn.Sequential(
nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=(kernel_size-1)//2, bias=False),
nn.BatchNorm2d(out_planes),
nn.LeakyReLU(0.05,inplace=True)
)
else:
return nn.Sequential(
nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=(kernel_size-1)//2, bias=True),
nn.LeakyReLU(0.05,inplace=True)
)
def conv_no_lrelu(batchNorm, in_planes, out_planes, kernel_size=3, stride=1):
if batchNorm:
return nn.Sequential(
nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=(kernel_size-1)//2, bias=False),
nn.BatchNorm2d(out_planes)
)
else:
return nn.Sequential(
nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=(kernel_size-1)//2, bias=True),
)
def predict_image(in_planes):
return nn.Conv2d(in_planes,1,kernel_size=3,stride=1,padding=1,bias=False)
def deconv(in_planes, out_planes):
return nn.Sequential(
nn.ConvTranspose2d(in_planes, out_planes, kernel_size=4, stride=2, padding=1, bias=False),
nn.LeakyReLU(0.05,inplace=True)
)
def crop_like(input, target):
if input.size()[2:] == target.size()[2:]:
return input
else:
return input[:, :, :target.size(2), :target.size(3)]
class Gen_Guided_UNet(nn.Module):
expansion = 1
def __init__(self,batchNorm=True,input_size=[240,416],is_training=True):
super(Gen_Guided_UNet,self).__init__()
self.batchNorm = batchNorm
self.is_training = is_training
self.pre_conv1 = conv(self.batchNorm, 1, 64, kernel_size=3, stride=1)
self.pre_conv1_1 = conv(self.batchNorm, 64, 64, kernel_size=3, stride=1)
self.pre_conv2 = conv(self.batchNorm, 1, 64, kernel_size=3, stride=1)
self.pre_conv2_1 = conv(self.batchNorm, 64, 64, kernel_size=3, stride=1)
self.pre_conv3 = conv(self.batchNorm, 1, 64, kernel_size=3, stride=1)
self.pre_conv3_1 = conv(self.batchNorm, 64, 64, kernel_size=3, stride=1)
self.biconvlstm = BiConvLSTM(input_size=(input_size[0], input_size[1]), input_dim=64, hidden_dim=64,kernel_size=(3, 3), num_layers=1)
self.LSTM_out = conv(self.batchNorm,128,64, kernel_size=1, stride=1)
self.conv1_mask = conv(self.batchNorm, 1, 64, kernel_size=3, stride=1)
self.conv2_mask = conv(self.batchNorm, 64, 64, kernel_size=3, stride=1)
self.conv1 = conv(self.batchNorm, 64, 128, kernel_size=7, stride=2)#64
self.conv1_1 = conv(self.batchNorm, 128,128) # 128*128 ->64*64
self.conv2 = conv(self.batchNorm, 128,256, kernel_size=3, stride=2)#64 ->32
self.conv2_1 = conv(self.batchNorm, 256,256) # 128*128 ->64*64
self.conv3 = conv(self.batchNorm, 256,512, kernel_size=3, stride=2)#32->16
self.conv3_1 = conv(self.batchNorm, 512,512)
self.conv4 = conv(self.batchNorm, 512,1024, kernel_size=3, stride=2)#16->8
self.conv4_1 = conv(self.batchNorm, 1024,1024)
self.deconv4 = deconv(1024,512)
self.deconv3 = deconv(1025,256)
self.deconv2 = deconv(513,128)
self.deconv1 = deconv(257,64)
self.predict_image4 = predict_image(1024)
self.predict_image3 = predict_image(1025)
self.predict_image2 = predict_image(513)
self.predict_image1 = predict_image(257)
self.upsampled_image4_to_3 = nn.ConvTranspose2d(1,1, 4, 2, 1, bias=False)#8_16
self.upsampled_image3_to_2 = nn.ConvTranspose2d(1,1, 4, 2, 1, bias=False)#16-32
self.upsampled_image2_to_1 = nn.ConvTranspose2d(1,1, 4, 2, 1, bias=False)#32-64
self.upsampled_image1_to_finally = nn.ConvTranspose2d(1, 1, 4, 2, 1, bias=False) # 64-128
self.output1 = conv(self.batchNorm,129,64,kernel_size=3,stride=1)
self.output2 = conv(self.batchNorm, 64, 64, kernel_size=3, stride=1)
self.output3 = conv_no_lrelu(self.batchNorm,64,1,kernel_size=3,stride=1)
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d):
kaiming_normal(m.weight.data,a=0.05)
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def forward(self, data1,data2,data3,mask):
CNN_seq = []
pre_conv1 = self.pre_conv1(data1)
pre_conv1_1 = self.pre_conv1_1(pre_conv1)
CNN_seq.append(pre_conv1_1)
pre_conv2 = self.pre_conv2(data2)
pre_conv2_1 = self.pre_conv2_1(pre_conv2)
CNN_seq.append(pre_conv2_1)
pre_conv3 = self.pre_conv3(data3)
pre_conv3_1 = self.pre_conv3_1(pre_conv3)
CNN_seq.append(pre_conv3_1)
CNN_seq_out = torch.stack(CNN_seq, dim=1)
CNN_seq_feature_maps = self.biconvlstm(CNN_seq_out)
# CNN_concat_input = CNN_seq_out[:, 1, ...]+CNN_seq_feature_maps[:, 1, ...]
CNN_concat_input = torch.cat([CNN_seq_out[:, 1, ...],CNN_seq_feature_maps[:, 1, ...]],dim=1)
LSTM_out = self.LSTM_out(CNN_concat_input)#128*128*64
conv1_mask = self.conv1_mask(mask)
conv2_mask = self.conv2_mask(conv1_mask)#128*128*64
out_conv1 = self.conv1_1(self.conv1(LSTM_out))
out_conv2 = self.conv2_1(self.conv2(out_conv1))
out_conv3 = self.conv3_1(self.conv3(out_conv2))
out_conv4 = self.conv4_1(self.conv4(out_conv3))
out_conv1_mask = self.conv1_1(self.conv1(conv2_mask))
out_conv2_mask = self.conv2_1(self.conv2(out_conv1_mask))
out_conv3_mask = self.conv3_1(self.conv3(out_conv2_mask))
out_conv4_mask = self.conv4_1(self.conv4(out_conv3_mask))
sum4 = out_conv4+out_conv4_mask
image_4 = self.predict_image4(sum4)
image_4_up = crop_like(self.upsampled_image4_to_3(image_4), out_conv3)
out_deconv3 = crop_like(self.deconv4(sum4), out_conv3)
sum3 = out_conv3 + out_conv3_mask
concat3 = torch.cat((sum3,out_deconv3,image_4_up),dim=1)
image_3 = self.predict_image3(concat3)
image_3_up = crop_like(self.upsampled_image3_to_2(image_3), out_conv2)
out_deconv2 = crop_like(self.deconv3(concat3), out_conv2)
sum2 = out_conv2+out_conv2_mask
concat2 = torch.cat((sum2,out_deconv2,image_3_up),dim=1)
image_2 = self.predict_image2(concat2)
image_2_up = crop_like(self.upsampled_image2_to_1(image_2), out_conv1)
out_deconv2 = crop_like(self.deconv2(concat2), out_conv1)
sum1 = out_conv1 + out_conv1_mask
concat1 = torch.cat((sum1,out_deconv2,image_2_up),dim=1)
image_1 = self.predict_image1(concat1)
image_1_up = crop_like(self.upsampled_image1_to_finally(image_1), LSTM_out)
# print(image_1_up.shape)
out_deconv1 = crop_like(self.deconv1(concat1), LSTM_out)
sum0 = LSTM_out + conv2_mask
concat0 = torch.cat([sum0,out_deconv1,image_1_up],dim=1)
image_out = self.output1(concat0)
image_out2 = self.output2(image_out)
image_finally = self.output3(image_out2)
image_finally = torch.clamp(image_finally,0.,1.)
# print('image_1',image_finally.shape)
if self.is_training:
return image_4,image_3,image_2,image_1,image_finally
else:
return image_finally
| 7,816 | 41.483696 | 142 | py |
MGANet-DCC2020 | MGANet-DCC2020-master/codes/dataloader/read_h5.py | import numpy as np
import cv2
import torch.multiprocessing as mp
mp.set_start_method('spawn')
import h5py
f = h5py.File('../../train_b8_LD37.h5','r')
for key in f.keys():
print(f[key].name,f[key].shape)
# for i in range(1,100):
# cv2.imshow('1.jpg',f[key][i,0,...])
# cv2.waitKey(0)
| 301 | 20.571429 | 43 | py |
MGANet-DCC2020 | MGANet-DCC2020-master/codes/dataloader/h5_dataset_T.py | import torch.utils.data as data
import torch
import numpy as np
from torchvision import transforms, datasets
import h5py
def data_augmentation(image, mode):
if mode == 0:
# original
return image
elif mode == 1:
# flip up and down
return np.flipud(image)
elif mode == 2:
# rotate counterwise 90 degree
return np.rot90(image)
elif mode == 3:
# rotate 90 degree and flip up and down
image = np.rot90(image)
return np.flipud(image)
elif mode == 4:
# rotate 180 degree
return np.rot90(image, k=2)
elif mode == 5:
# rotate 180 degree and flip
image = np.rot90(image, k=2)
return np.flipud(image)
elif mode == 6:
# rotate 270 degree
return np.rot90(image, k=3)
elif mode == 7:
# rotate 270 degree and flip
image = np.rot90(image, k=3)
return np.flipud(image)
class DatasetFromHdf5(data.Dataset):
def __init__(self, file_path):
super(DatasetFromHdf5, self).__init__()
f = h5py.File(file_path,'r')
self.data_pre = f.get('data_pre')
self.data_cur = f.get('data_cur')
self.data_aft = f.get('data_aft')
self.data_mask = f.get('mask')
self.label = f.get('label')
self.data_transform = transforms.Compose([
transforms.RandomHorizontalFlip(),
transforms.ToTensor()
])
def __getitem__(self, index):
return torch.from_numpy(self.data_pre[index, :, :, :].transpose(0,2,1)).float(), \
torch.from_numpy(self.data_cur[index, :, :, :].transpose(0,2,1)).float(),\
torch.from_numpy(self.data_aft[index, :, :, :].transpose(0,2,1)).float(),\
torch.from_numpy(self.data_mask[index, :, :, :].transpose(0,2,1)).float(),\
torch.from_numpy(self.label[index, :, :, :].transpose(0,2,1)).float()
def __len__(self):
assert self.label.shape[0]==self.data_aft.shape[0]
return self.label.shape[0]
class DatasetFromHdf5_2_data(data.Dataset):
def __init__(self,data_root_1,data_root_2,transforms=None):
super(DatasetFromHdf5_2_data, self).__init__()
f1 = h5py.File(data_root_1,'r')
f2 = h5py.File(data_root_2,'r')
self.data_pre = f1.get('data_pre')
self.data_cur = f1.get('data_cur')
self.data_aft = f1.get('data_aft')
self.data_mask = f1.get('mask')
self.label = f1.get('label')
self.data_pre_2 = f2.get('data_pre')
self.data_cur_2 = f2.get('data_cur')
self.data_aft_2 = f2.get('data_aft')
self.data_mask_2 = f2.get('mask')
self.label_2 = f2.get('label')
def __getitem__(self, index):
# print(index)
if index%2==0:
index = index//2
return torch.from_numpy(self.data_pre[index, :, :, :].transpose(0,2,1)), torch.from_numpy(self.data_cur[index, :, :, :].transpose(0,2,1)),\
torch.from_numpy(self.data_aft[index, :, :, :].transpose(0,2,1)),torch.from_numpy(self.data_mask[index, :, :, :].transpose(0,2,1)),torch.from_numpy(self.label[index, :, :, :].transpose(0,2,1))
else:
index = index // 2
return torch.from_numpy(self.data_pre_2[index, :, :, :].transpose(0,2,1)), torch.from_numpy(self.data_cur_2[index, :, :, :].transpose(0,2,1)), \
torch.from_numpy(self.data_aft_2[index, :, :, :].transpose(0,2,1)), torch.from_numpy(self.data_mask_2[index, :, :, :].transpose(0,2,1)), torch.from_numpy(self.label_2[index, :, :, :].transpose(0,2,1))
def __len__(self):
return self.data_pre.shape[0]+self.data_pre_2.shape[0]
| 3,692 | 38.287234 | 219 | py |
gwsky | gwsky-master/gwsky/utils.py | import numpy as np
import healpy as hp
from scipy.special import sph_harm
from functools import reduce
import quaternionic
import spherical
import matplotlib.pyplot as plt
from healpy.projaxes import HpxMollweideAxes
from typing import Tuple, Optional, List, Dict
from .typing import SHModes, Value
def ra_dec_to_theta_phi(ra: Value, dec: Value) -> Tuple[Value, Value]:
return np.pi/2-dec, ra
def theta_phi_to_ra_dec(theta: Value, phi: Value) -> Tuple[Value, Value]:
return phi, np.pi/2-theta
def catalog_delta_map(theta: np.ndarray, phi: np.ndarray,
nside: int = 64, ra_dec: bool = False) -> np.ndarray:
if ra_dec:
theta, phi = ra_dec_to_theta_phi(theta, phi)
hp_map = np.zeros(hp.nside2npix(nside))
points_ipix = hp.ang2pix(nside=nside, theta=theta, phi=phi)
ipix, counts = np.unique(points_ipix, return_counts=True)
hp_map[ipix] += counts
map_mean = theta.shape[0] / hp.nside2npix(nside)
return hp_map/map_mean - 1
def spherical_harmonic_modes(theta: np.ndarray, phi: np.ndarray, l: int, m: int,
weights: Optional[np.ndarray] = None, ra_dec: bool = False) -> complex:
if ra_dec:
theta, phi = ra_dec_to_theta_phi(theta, phi)
if weights is None:
weights = np.ones(theta.shape)
normalization = theta.shape[0] / (4*np.pi)
# sph_harm(m, n, theta, phi)
# m,n: harmonic mode, |m|<=n
# theta, phi: spherical coordinate, 0<theta<2*pi, 0<phi<pi
coefficient = np.sum(sph_harm(m, l, phi, theta) * weights).conjugate() / normalization
return coefficient
def sh_normal_coeff(l, m):
fact_item = reduce(
lambda x, y: x*y, range(l-np.abs(m)+1, l+np.abs(m)+1), 1)
return ((2*l+1)/(4*np.pi) / fact_item)**0.5
def rotation_matrix_from_vec(orig_vec, dest_vec) -> np.ndarray:
# see https://math.stackexchange.com/a/476311
v = np.cross(orig_vec, dest_vec)
c = np.inner(orig_vec, dest_vec)
v_cross = np.array(
[[0, -v[2], v[1]],
[v[2], 0, -v[0]],
[-v[1], v[0], 0]])
rot = np.eye(3) + v_cross + np.matmul(v_cross, v_cross)/(1+c)
return rot
def dipole_modes(amplitude: float, theta: float, phi: float) -> SHModes:
a10 = amplitude / sh_normal_coeff(1, 0)
dipole_mode = spherical.Modes(
np.array([0, 0, a10, 0], dtype=complex),
spin_weight=0)
rot_mat = rotation_matrix_from_vec(
orig_vec=hp.dir2vec(theta, phi),
dest_vec=np.array([0, 0, 1])) # 将Y_{10}转到给定(theta, phi)方向的偶极场的旋转矩阵的逆
rotation = quaternionic.array.from_rotation_matrix(rot_mat)
wigner = spherical.Wigner(ell_max=2)
# wigner.rotate返回的modes对应的是坐标旋转的逆
# 即对于一个球谐系数为a_lm的场f,用坐标旋转RM作用之,得到的新场f'(r)=f(R^{-1} r)
# 则f'的球谐系数为`wigner.rotate(modes=a_lm, R=1/R)`
rot_mode_sph: spherical.Modes = wigner.rotate(modes=dipole_mode, R=rotation)
rot_mode = {(1, m): rot_mode_sph[spherical.LM_index(1, m)] for m in range(-1, 2)}
return rot_mode
def plot_hp_map(hp_map: np.ndarray, detectors: Optional[List[Dict]] = None,
fig: Optional[plt.Figure] = None, label: str = '',
grid_on: bool = True, grid_kwargs: Optional[Dict] = None,
detector_kwargs: Optional[Dict] = None, **kwargs):
if detectors is None:
detectors = []
plot_kwargs = {'flip': 'geo'}
plot_kwargs.update(kwargs)
hp.mollview(hp_map, fig=fig, **plot_kwargs)
fig = plt.gcf()
skymap_ax: HpxMollweideAxes = fig.get_axes()[0]
det_kwargs = {'color': 'orange', 'markersize': 10}
if detector_kwargs is not None:
det_kwargs.update(detector_kwargs)
for detector in detectors:
skymap_ax.projplot(
detector['longitude'], detector['latitude'], lonlat=True,
marker=detector['marker'], **det_kwargs)
if grid_on:
grid_kwargs_real = {'dpar': 30, 'dmer': 30}
grid_kwargs_real.update(grid_kwargs)
skymap_ax.graticule(**grid_kwargs_real)
cb_ax: plt.Axes = fig.get_axes()[1]
cb_ax.set_xlabel(label)
return fig | 4,056 | 32.254098 | 100 | py |
wgenpatex | wgenpatex-main/model.py | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# Generator's convolutional blocks 2D
class Conv_block2D(nn.Module):
def __init__(self, n_ch_in, n_ch_out, m=0.1):
super(Conv_block2D, self).__init__()
self.conv1 = nn.Conv2d(n_ch_in, n_ch_out, 3, padding=0, bias=True)
self.bn1 = nn.BatchNorm2d(n_ch_out, momentum=m)
self.conv2 = nn.Conv2d(n_ch_out, n_ch_out, 3, padding=0, bias=True)
self.bn2 = nn.BatchNorm2d(n_ch_out, momentum=m)
self.conv3 = nn.Conv2d(n_ch_out, n_ch_out, 1, padding=0, bias=True)
self.bn3 = nn.BatchNorm2d(n_ch_out, momentum=m)
def forward(self, x):
x = F.leaky_relu(self.bn1(self.conv1(x)))
x = F.leaky_relu(self.bn2(self.conv2(x)))
x = F.leaky_relu(self.bn3(self.conv3(x)))
return x
# Up-sampling block
class Upsample(nn.Module):
""" nn.Upsample is deprecated """
def __init__(self, scale_factor, mode="nearest"):
super(Upsample, self).__init__()
self.scale_factor = scale_factor
self.mode = mode
def forward(self, x):
x = F.interpolate(x, scale_factor=self.scale_factor, mode=self.mode)
return x
# Up-sampling + batch normalization block
class Up_Bn2D(nn.Module):
def __init__(self, n_ch):
super(Up_Bn2D, self).__init__()
self.up = Upsample(scale_factor=2, mode='nearest')
self.bn = nn.BatchNorm2d(n_ch)
def forward(self, x):
x = self.bn(self.up(x))
return x
# The whole network
class generator(nn.Module):
def __init__(self, nlayers=5, ch_in=3, ch_step=2, device=DEVICE):
super(generator, self).__init__()
self.ch_in = ch_in
self.nlayers = nlayers
self.first_conv = Conv_block2D(ch_in,ch_step).to(device)
self.cb1 = nn.ModuleList()
self.cb2 = nn.ModuleList()
self.up = nn.ModuleList()
for n in range(0, nlayers):
self.up.append(Up_Bn2D((n+1)*ch_step).to(device))
self.cb1.append(Conv_block2D(ch_in,ch_step).to(device))
self.cb2.append(Conv_block2D((n+2)*ch_step,(n+2)*ch_step).to(device))
self.last_conv = nn.Conv2d((nlayers+1)*ch_step, 3, 1, padding=0, bias=False).to(device)
def forward(self, z):
nlayers=self.nlayers
y = self.first_conv(z[0])
for n in range(0,nlayers):
y = self.up[n](y)
y = torch.cat((y, self.cb1[n](z[n+1])), 1)
y = self.cb2[n](y)
y = self.last_conv(y)
return y
# Function to generate an output sample
def sample_fake_img(G, size, n_samples=1):
# dimension of the first input noise
strow = int(np.ceil(size[2])/2**G.nlayers)
stcol = int(np.ceil(size[3])/2**G.nlayers)
# input noise and forward pass
ztab = [torch.rand(n_samples, G.ch_in, 8+2**k*strow+4*int(k!=0), 8+2**k*stcol+4*int(k!=0), device=DEVICE, dtype=torch.float) for k in range(0, G.nlayers+1)]
Z = [Variable(z) for z in ztab]
return G(Z)
| 3,143 | 33.173913 | 160 | py |
wgenpatex | wgenpatex-main/wgenpatex.py | import torch
from torch import nn
from torch.autograd.variable import Variable
import matplotlib.pyplot as plt
import numpy as np
import math
import time
import model
from os import mkdir
from os.path import isdir
DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print(DEVICE)
def imread(img_name):
"""
loads an image as torch.tensor on the selected device
"""
np_img = plt.imread(img_name)
tens_img = torch.tensor(np_img, dtype=torch.float, device=DEVICE)
if torch.max(tens_img) > 1:
tens_img/=255
if len(tens_img.shape) < 3:
tens_img = tens_img.unsqueeze(2)
if tens_img.shape[2] > 3:
tens_img = tens_img[:,:,:3]
tens_img = tens_img.permute(2,0,1)
return tens_img.unsqueeze(0)
def imshow(tens_img):
"""
shows a tensor image
"""
np_img = np.clip(tens_img.squeeze(0).permute(1,2,0).data.cpu().numpy(), 0,1)
if np_img.shape[2] < 3:
np_img = np_img[:,:,0]
ax = plt.imshow(np_img)
ax.set_cmap('gray')
else:
ax = plt.imshow(np_img)
plt.axis('off')
return plt.show()
def imsave(save_name, tens_img):
"""
save a tensor image
"""
np_img = np.clip(tens_img.squeeze(0).permute(1,2,0).data.cpu().numpy(), 0,1)
if np_img.shape[2] < 3:
np_img = np_img[:,:,0]
plt.imsave(save_name, np_img)
return
class gaussian_downsample(nn.Module):
"""
Downsampling module with Gaussian filtering
"""
def __init__(self, kernel_size, sigma, stride, pad=False):
super(gaussian_downsample, self).__init__()
self.gauss = nn.Conv2d(3, 3, kernel_size, stride=stride, groups=3, bias=False)
gaussian_weights = self.init_weights(kernel_size, sigma)
self.gauss.weight.data = gaussian_weights.to(DEVICE)
self.gauss.weight.requires_grad_(False)
self.pad = pad
self.padsize = kernel_size-1
def forward(self, x):
if self.pad:
x = torch.cat((x, x[:,:,:self.padsize,:]), 2)
x = torch.cat((x, x[:,:,:,:self.padsize]), 3)
return self.gauss(x)
def init_weights(self, kernel_size, sigma):
x_cord = torch.arange(kernel_size)
x_grid = x_cord.repeat(kernel_size).view(kernel_size, kernel_size)
y_grid = x_grid.t()
xy_grid = torch.stack([x_grid, y_grid], dim=-1)
mean = (kernel_size - 1)/2.
variance = sigma**2.
gaussian_kernel = (1./(2.*math.pi*variance))*torch.exp(-torch.sum((xy_grid - mean)**2., dim=-1)/(2*variance))
gaussian_kernel = gaussian_kernel / torch.sum(gaussian_kernel)
return gaussian_kernel.view(1, 1, kernel_size, kernel_size).repeat(3, 1, 1, 1)
class semidual(nn.Module):
"""
Computes the semi-dual loss between inputy and inputx for the dual variable psi
"""
def __init__(self, inputy, device=DEVICE, usekeops=False):
super(semidual, self).__init__()
self.psi = nn.Parameter(torch.zeros(inputy.shape[0], device=device))
self.yt = inputy.transpose(1,0)
self.usekeops = usekeops
self.y2 = torch.sum(self.yt **2,0,keepdim=True)
def forward(self, inputx):
if self.usekeops:
from pykeops.torch import LazyTensor
y = self.yt.transpose(1,0)
x_i = LazyTensor(inputx.unsqueeze(1).contiguous())
y_j = LazyTensor(y.unsqueeze(0).contiguous())
v_j = LazyTensor(self.psi.unsqueeze(0).unsqueeze(2).contiguous())
sx2_i = LazyTensor(torch.sum(inputx**2,1,keepdim=True).unsqueeze(2).contiguous())
sy2_j = LazyTensor(self.y2.unsqueeze(2).contiguous())
rmv = sx2_i + sy2_j - 2*(x_i*y_j).sum(-1) - v_j
amin = rmv.argmin(dim=1).view(-1)
loss = torch.mean(torch.sum((inputx-y[amin,:])**2,1)-self.psi[amin]) + torch.mean(self.psi)
else:
cxy = torch.sum(inputx**2,1,keepdim=True) + self.y2 - 2*torch.matmul(inputx,self.yt)
loss = torch.mean(torch.min(cxy - self.psi.unsqueeze(0),1)[0]) + torch.mean(self.psi)
return loss
class gaussian_layer(nn.Module):
"""
Gaussian layer for the dowsampling pyramid
"""
def __init__(self, gaussian_kernel_size, gaussian_std, stride = 2, pad=False):
super(gaussian_layer, self).__init__()
self.downsample = gaussian_downsample(gaussian_kernel_size, gaussian_std, stride, pad=pad)
def forward(self, input):
self.down_img = self.downsample(input)
return self.down_img
class identity(nn.Module):
"""
Identity layer for the dowsampling pyramid
"""
def __init__(self):
super(identity, self).__init__()
def forward(self, input):
self.down_img = input
return input
def create_gaussian_pyramid(gaussian_kernel_size, gaussian_std, n_scales, stride = 2, pad=False):
"""
Create a dowsampling Gaussian pyramid
"""
layer = identity()
gaussian_pyramid = nn.Sequential(layer)
for i in range(n_scales-1):
layer = gaussian_layer(gaussian_kernel_size, gaussian_std, stride, pad=pad)
gaussian_pyramid.add_module("Gaussian_downsampling_{}".format(i+1), layer)
return gaussian_pyramid
class patch_extractor(nn.Module):
"""
Module for creating custom patch extractor
"""
def __init__(self, patch_size, pad=False):
super(patch_extractor, self).__init__()
self.im2pat = nn.Unfold(kernel_size=patch_size)
self.pad = pad
self.padsize = patch_size-1
def forward(self, input, batch_size=0):
if self.pad:
input = torch.cat((input, input[:,:,:self.padsize,:]), 2)
input = torch.cat((input, input[:,:,:,:self.padsize]), 3)
patches = self.im2pat(input).squeeze(0).transpose(1,0)
if batch_size > 0:
idx = torch.randperm(patches.size(0))[:batch_size]
patches = patches[idx,:]
return patches
def optim_synthesis(args):
"""
Perform the texture synthesis of an examplar image
"""
target_img_name = args.target_image_path
patch_size = args.patch_size
n_iter_max = args.n_iter_max
n_iter_psi = args.n_iter_psi
n_patches_in = args.n_patches_in
n_patches_out = args.n_patches_out
n_scales = args.scales
usekeops = args.keops
visu = args.visu
save = args.save
# fixed parameters
monitoring_step=50
saving_folder='tmp/'
# parameters for Gaussian downsampling
gaussian_kernel_size = 4
gaussian_std = 1
stride = 2
# load image
target_img = imread(target_img_name)
# synthesized size
if args.size is None:
nrow = target_img.shape[2]
ncol = target_img.shape[3]
else:
nrow = args.size[0]
ncol = args.size[1]
if save:
if not isdir(saving_folder):
mkdir(saving_folder)
imsave(saving_folder+'original.png', target_img)
# Create Gaussian Pyramid downsamplers
target_downsampler = create_gaussian_pyramid(gaussian_kernel_size, gaussian_std, n_scales, stride, pad=False)
input_downsampler = create_gaussian_pyramid(gaussian_kernel_size, gaussian_std, n_scales, stride, pad=True)
target_downsampler(target_img) # evaluate on the target image
# create patch extractors
target_im2pat = patch_extractor(patch_size, pad=False)
input_im2pat = patch_extractor(patch_size, pad=True)
# create semi-dual module at each scale
semidual_loss = []
for s in range(n_scales):
real_data = target_im2pat(target_downsampler[s].down_img, n_patches_out) # exctract at most n_patches_out patches from the downsampled target images
layer = semidual(real_data, device=DEVICE, usekeops=usekeops)
semidual_loss.append(layer)
if visu:
imshow(target_downsampler[s].down_img)
# Weights on scales
prop = torch.ones(n_scales, device=DEVICE)/n_scales # all scales with same weight
# initialize the generated image
fake_img = torch.rand(1, 3, nrow,ncol, device=DEVICE, requires_grad=True)
# intialize optimizer for image
optim_img = torch.optim.Adam([fake_img], lr=0.01)
# initialize the loss vector
total_loss = np.zeros(n_iter_max)
# Main loop
t = time.time()
for it in range(n_iter_max):
# 1. update psi
input_downsampler(fake_img.detach()) # evaluate on the current fake image
for s in range(n_scales):
optim_psi = torch.optim.ASGD([semidual_loss[s].psi], lr=1, alpha=0.5, t0=1)
for i in range(n_iter_psi):
fake_data = input_im2pat(input_downsampler[s].down_img, n_patches_in)
optim_psi.zero_grad()
loss = -semidual_loss[s](fake_data)
loss.backward()
optim_psi.step()
semidual_loss[s].psi.data = optim_psi.state[semidual_loss[s].psi]['ax']
# 2. perform gradient step on the image
optim_img.zero_grad()
tloss = 0
for s in range(n_scales):
input_downsampler(fake_img)
fake_data = input_im2pat(input_downsampler[s].down_img, n_patches_in)
loss = prop[s]*semidual_loss[s](fake_data)
loss.backward()
tloss += loss.item()
optim_img.step()
# save loss
total_loss[it] = tloss
# save some results
if it % monitoring_step == 0:
print('iteration '+str(it)+' - elapsed '+str(int(time.time()-t))+'s - loss = '+str(tloss))
if visu:
imshow(fake_img)
if save:
imsave(saving_folder+'it'+str(it)+'.png', fake_img)
print('DONE - total time is '+str(int(time.time()-t))+'s')
if visu:
plt.plot(total_loss)
plt.show()
if save:
plt.savefig(saving_folder+'loss_multiscale.png')
plt.close()
if save:
np.save(saving_folder+'loss.npy', total_loss)
return fake_img
def learn_model(args):
target_img_name = args.target_image_path
patch_size = args.patch_size
n_iter_max = args.n_iter_max
n_iter_psi = args.n_iter_psi
n_patches_in = args.n_patches_in
n_patches_out = args.n_patches_out
n_scales = args.scales
usekeops = args.keops
visu = args.visu
save = args.save
# fixed parameters
monitoring_step=100
saving_folder='tmp/'
# parameters for Gaussian downsampling
gaussian_kernel_size = 4
gaussian_std = 1
stride = 2
# load image
target_img = imread(target_img_name)
if save:
if not isdir(saving_folder):
mkdir(saving_folder)
imsave(saving_folder+'original.png', target_img)
# Create Gaussian Pyramid downsamplers
target_downsampler = create_gaussian_pyramid(gaussian_kernel_size, gaussian_std, n_scales, stride, pad=False)
input_downsampler = create_gaussian_pyramid(gaussian_kernel_size, gaussian_std, n_scales, stride, pad=False)
target_downsampler(target_img) # evaluate on the target image
# create patch extractors
target_im2pat = patch_extractor(patch_size, pad=False)
input_im2pat = patch_extractor(patch_size, pad=False)
# create semi-dual module at each scale
semidual_loss = []
for s in range(n_scales):
real_data = target_im2pat(target_downsampler[s].down_img, n_patches_out) # exctract at most n_patches_out patches from the downsampled target images
layer = semidual(real_data, device=DEVICE, usekeops=usekeops)
semidual_loss.append(layer)
if visu:
imshow(target_downsampler[s].down_img)
#plt.pause(0.01)
# Weights on scales
prop = torch.ones(n_scales, device=DEVICE)/n_scales # all scales with same weight
# initialize generator
G = model.generator(n_scales)
fake_img = model.sample_fake_img(G, target_img.shape, n_samples=1)
# intialize optimizer for image
optim_G = torch.optim.Adam(G.parameters(), lr=0.01)
# initialize the loss vector
total_loss = np.zeros(n_iter_max)
# Main loop
t = time.time()
for it in range(n_iter_max):
# 1. update psi
fake_img = model.sample_fake_img(G, target_img.shape, n_samples=1)
input_downsampler(fake_img.detach())
for s in range(n_scales):
optim_psi = torch.optim.ASGD([semidual_loss[s].psi], lr=1, alpha=0.5, t0=1)
for i in range(n_iter_psi):
# evaluate on the current fake image
fake_data = input_im2pat(input_downsampler[s].down_img, n_patches_in)
optim_psi.zero_grad()
loss = -semidual_loss[s](fake_data)
loss.backward()
optim_psi.step()
semidual_loss[s].psi.data = optim_psi.state[semidual_loss[s].psi]['ax']
# 2. perform gradient step on the image
optim_G.zero_grad()
tloss = 0
input_downsampler(fake_img)
for s in range(n_scales):
fake_data = input_im2pat(input_downsampler[s].down_img, n_patches_in)
loss = prop[s]*semidual_loss[s](fake_data)
tloss += loss
tloss.backward()
optim_G.step()
# save loss
total_loss[it] = tloss.item()
# save some results
if it % monitoring_step == 0:
print('iteration '+str(it)+' - elapsed '+str(int(time.time()-t))+'s - loss = '+str(tloss.item()))
if visu:
imshow(fake_img)
if save:
imsave(saving_folder+'it'+str(it)+'.png', fake_img)
print('DONE - total time is '+str(int(time.time()-t))+'s')
if visu:
plt.plot(total_loss)
plt.show()
plt.pause(0.01)
if save:
plt.savefig(saving_folder+'loss.png')
plt.close()
if save:
np.save(saving_folder+'loss.npy', total_loss)
return G
| 14,073 | 34.185 | 157 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.