repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
BraVL | BraVL-master/BraVL_fMRI/brain_image_text/networks/MLP_Image.py |
import torch
import torch.nn as nn
class EncoderImage(nn.Module):
def __init__(self, flags):
super(EncoderImage, self).__init__()
self.flags = flags;
self.hidden_dim = 2048;
modules = []
modules.append(nn.Sequential(nn.Linear(flags.m2_dim, self.hidden_dim), nn.ReLU(True)))
modules.extend([nn.Sequential(nn.Linear(self.hidden_dim, self.hidden_dim), nn.ReLU(True))
for _ in range(flags.num_hidden_layers - 1)])
self.enc = nn.Sequential(*modules)
self.relu = nn.ReLU();
self.hidden_mu = nn.Linear(in_features=self.hidden_dim, out_features=flags.class_dim, bias=True)
self.hidden_logvar = nn.Linear(in_features=self.hidden_dim, out_features=flags.class_dim, bias=True)
def forward(self, x):
h = self.enc(x);
h = h.view(h.size(0), -1);
latent_space_mu = self.hidden_mu(h);
latent_space_logvar = self.hidden_logvar(h);
latent_space_mu = latent_space_mu.view(latent_space_mu.size(0), -1);
latent_space_logvar = latent_space_logvar.view(latent_space_logvar.size(0), -1);
return None, None, latent_space_mu, latent_space_logvar;
class DecoderImage(nn.Module):
def __init__(self, flags):
super(DecoderImage, self).__init__();
self.flags = flags;
self.hidden_dim = 2048;
modules = []
modules.append(nn.Sequential(nn.Linear(flags.class_dim, self.hidden_dim), nn.ReLU(True)))
modules.extend([nn.Sequential(nn.Linear(self.hidden_dim, self.hidden_dim), nn.ReLU(True))
for _ in range(flags.num_hidden_layers - 1)])
self.dec = nn.Sequential(*modules)
self.fc3 = nn.Linear(self.hidden_dim, flags.m2_dim, bias=True)
def forward(self, style_latent_space, class_latent_space):
z = class_latent_space;
x_hat = self.dec(z);
x_hat = self.fc3(x_hat);
return x_hat, torch.tensor(0.75).to(z.device); | 1,982 | 37.134615 | 108 | py |
BraVL | BraVL-master/BraVL_fMRI/brain_image_text/networks/QNET.py | import torch.nn as nn
import torch.nn.functional as F
import torch
class QNet(nn.Module):
def __init__(self, input_dim,latent_dim):
super(QNet, self).__init__()
self.fc1 = nn.Linear(input_dim,512)
self.fc21 = nn.Linear(512, latent_dim)
self.fc22 = nn.Linear(512, latent_dim)
def forward(self, x):
e = F.relu(self.fc1(x))
mu = self.fc21(e)
lv = self.fc22(e)
# return mu,lv.mul(0.5).exp_()
return mu,torch.tensor(0.75).cuda() | 504 | 30.5625 | 46 | py |
BraVL | BraVL-master/BraVL_fMRI/brain_image_text/networks/MLP_Brain.py |
import torch
import torch.nn as nn
class EncoderBrain(nn.Module):
def __init__(self, flags):
super(EncoderBrain, self).__init__()
self.flags = flags;
self.hidden_dim = 512;
modules = []
modules.append(nn.Sequential(nn.Linear(flags.m1_dim, self.hidden_dim), nn.ReLU(True)))
modules.extend([nn.Sequential(nn.Linear(self.hidden_dim, self.hidden_dim), nn.ReLU(True))
for _ in range(flags.num_hidden_layers - 1)])
self.enc = nn.Sequential(*modules)
self.relu = nn.ReLU();
self.hidden_mu = nn.Linear(in_features=self.hidden_dim, out_features=flags.class_dim, bias=True)
self.hidden_logvar = nn.Linear(in_features=self.hidden_dim, out_features=flags.class_dim, bias=True)
def forward(self, x):
h = self.enc(x);
h = h.view(h.size(0), -1);
latent_space_mu = self.hidden_mu(h);
latent_space_logvar = self.hidden_logvar(h);
latent_space_mu = latent_space_mu.view(latent_space_mu.size(0), -1);
latent_space_logvar = latent_space_logvar.view(latent_space_logvar.size(0), -1);
return None, None, latent_space_mu, latent_space_logvar;
class DecoderBrain(nn.Module):
def __init__(self, flags):
super(DecoderBrain, self).__init__();
self.flags = flags;
self.hidden_dim = 512;
modules = []
modules.append(nn.Sequential(nn.Linear(flags.class_dim, self.hidden_dim), nn.ReLU(True)))
modules.extend([nn.Sequential(nn.Linear(self.hidden_dim, self.hidden_dim), nn.ReLU(True))
for _ in range(flags.num_hidden_layers - 1)])
self.dec = nn.Sequential(*modules)
self.fc3 = nn.Linear(self.hidden_dim, flags.m1_dim)
self.relu = nn.ReLU();
def forward(self, style_latent_space, class_latent_space):
z = class_latent_space;
x_hat = self.dec(z);
x_hat = self.fc3(x_hat);
return x_hat, torch.tensor(0.75).to(z.device);
| 2,002 | 35.418182 | 108 | py |
BraVL | BraVL-master/BraVL_fMRI/divergence_measures/mm_div.py |
import torch
import torch.nn as nn
from divergence_measures.kl_div import calc_kl_divergence
from divergence_measures.kl_div import calc_kl_divergence_lb_gauss_mixture
from divergence_measures.kl_div import calc_kl_divergence_ub_gauss_mixture
from divergence_measures.kl_div import calc_entropy_gauss
from utils.utils import reweight_weights
def poe(mu, logvar, eps=1e-8):
var = torch.exp(logvar) + eps
# precision of i-th Gaussian expert at point x
T = 1. / var
pd_mu = torch.sum(mu * T, dim=0) / torch.sum(T, dim=0)
pd_var = 1. / torch.sum(T, dim=0)
pd_logvar = torch.log(pd_var)
return pd_mu, pd_logvar
def alpha_poe(alpha, mu, logvar, eps=1e-8):
var = torch.exp(logvar) + eps
# precision of i-th Gaussian expert at point x
if var.dim() == 3:
alpha_expanded = alpha.unsqueeze(-1).unsqueeze(-1);
elif var.dim() == 4:
alpha_expanded = alpha.unsqueeze(-1).unsqueeze(-1).unsqueeze(-1);
T = 1 / var;
pd_var = 1. / torch.sum(alpha_expanded * T, dim=0)
pd_mu = pd_var * torch.sum(alpha_expanded * mu * T, dim=0)
pd_logvar = torch.log(pd_var)
return pd_mu, pd_logvar;
def calc_alphaJSD_modalities_mixture(m1_mu, m1_logvar, m2_mu, m2_logvar, flags):
klds = torch.zeros(2);
entropies_mixture = torch.zeros(2);
w_modalities = torch.Tensor(flags.alpha_modalities[1:]);
if flags.cuda:
w_modalities = w_modalities.cuda();
klds = klds.cuda();
entropies_mixture = entropies_mixture.cuda();
w_modalities = reweight_weights(w_modalities);
mus = [m1_mu, m2_mu]
logvars = [m1_logvar, m2_logvar]
for k in range(0, len(mus)):
ent = calc_entropy_gauss(flags, logvars[k], norm_value=flags.batch_size);
# print('entropy: ' + str(ent))
# print('lb: ' )
kld_lb = calc_kl_divergence_lb_gauss_mixture(flags, k, mus[k], logvars[k], mus, logvars,
norm_value=flags.batch_size);
print('kld_lb: ' + str(kld_lb))
# print('ub: ')
kld_ub = calc_kl_divergence_ub_gauss_mixture(flags, k, mus[k], logvars[k], mus, logvars, ent,
norm_value=flags.batch_size);
print('kld_ub: ' + str(kld_ub))
# kld_mean = (kld_lb+kld_ub)/2;
entropies_mixture[k] = ent.clone();
klds[k] = 0.5*(kld_lb + kld_ub);
# klds[k] = kld_ub;
summed_klds = (w_modalities * klds).sum();
# print('summed klds: ' + str(summed_klds));
return summed_klds, klds, entropies_mixture;
def calc_alphaJSD_modalities(flags, mus, logvars, weights, normalization=None):
num_mods = mus.shape[0];
num_samples = mus.shape[1];
alpha_mu, alpha_logvar = alpha_poe(weights, mus, logvars)
if normalization is not None:
klds = torch.zeros(num_mods);
else:
klds = torch.zeros(num_mods, num_samples);
klds = klds.to(flags.device);
for k in range(0, num_mods):
kld = calc_kl_divergence(mus[k,:,:], logvars[k,:,:], alpha_mu,
alpha_logvar, norm_value=normalization);
if normalization is not None:
klds[k] = kld;
else:
klds[k,:] = kld;
if normalization is None:
weights = weights.unsqueeze(1).repeat(1, num_samples);
group_div = (weights * klds).sum(dim=0);
return group_div, klds, [alpha_mu, alpha_logvar];
def calc_group_divergence_moe(flags, mus, logvars, weights, normalization=None):
num_mods = mus.shape[0];
num_samples = mus.shape[1];
if normalization is not None:
klds = torch.zeros(num_mods);
else:
klds = torch.zeros(num_mods, num_samples);
klds = klds.to(flags.device);
weights = weights.to(flags.device);
for k in range(0, num_mods):
kld_ind = calc_kl_divergence(mus[k,:,:], logvars[k,:,:],
norm_value=normalization);
if normalization is not None:
klds[k] = kld_ind;
else:
klds[k,:] = kld_ind;
if normalization is None:
weights = weights.unsqueeze(1).repeat(1, num_samples);
group_div = (weights*klds).sum(dim=0);
return group_div, klds;
def calc_group_divergence_poe(flags, mus, logvars, norm=None):
num_mods = mus.shape[0];
poe_mu, poe_logvar = poe(mus, logvars)
kld_poe = calc_kl_divergence(poe_mu, poe_logvar, norm_value=norm);
klds = torch.zeros(num_mods).to(flags.device);
for k in range(0, num_mods):
kld_ind = calc_kl_divergence(mus[k,:,:], logvars[k,:,:],
norm_value=norm);
klds[k] = kld_ind;
return kld_poe, klds, [poe_mu, poe_logvar];
def calc_modality_divergence(m1_mu, m1_logvar, m2_mu, m2_logvar, flags):
if flags.modality_poe:
kld_batch = calc_kl_divergence(m1_mu, m1_logvar, m2_mu, m2_logvar, norm_value=flags.batch_size).sum();
return kld_batch;
else:
uniform_mu = torch.zeros(m1_mu.shape)
uniform_logvar = torch.zeros(m1_logvar.shape)
klds = torch.zeros(3,3)
klds_modonly = torch.zeros(2,2)
if flags.cuda:
klds = klds.cuda();
klds_modonly = klds_modonly.cuda();
uniform_mu = uniform_mu.cuda();
uniform_logvar = uniform_logvar.cuda();
mus = [uniform_mu, m1_mu, m2_mu]
logvars = [uniform_logvar, m1_logvar, m2_logvar]
for i in range(1, len(mus)): # CAREFUL: index starts from one, not zero
for j in range(0, len(mus)):
kld = calc_kl_divergence(mus[i], logvars[i], mus[j], logvars[j], norm_value=flags.batch_size);
klds[i,j] = kld;
if i >= 1 and j >= 1:
klds_modonly[i-1,j-1] = kld;
klds = klds.sum()/(len(mus)*(len(mus)-1))
klds_modonly = klds_modonly.sum()/((len(mus)-1)*(len(mus)-1));
return [klds, klds_modonly];
| 5,927 | 38 | 110 | py |
BraVL | BraVL-master/BraVL_fMRI/divergence_measures/kl_div.py | import math
import torch
from utils.utils import reweight_weights
def calc_kl_divergence(mu0, logvar0, mu1=None, logvar1=None, norm_value=None):
if mu1 is None or logvar1 is None:
KLD = -0.5 * torch.sum(1 - logvar0.exp() - mu0.pow(2) + logvar0)
else:
KLD = -0.5 * (torch.sum(1 - logvar0.exp()/logvar1.exp() - (mu0-mu1).pow(2)/logvar1.exp() + logvar0 - logvar1))
if norm_value is not None:
KLD = KLD / float(norm_value);
return KLD
def calc_gaussian_scaling_factor(PI, mu1, logvar1, mu2=None, logvar2=None, norm_value=None):
d = mu1.shape[1];
if mu2 is None or logvar2 is None:
# print('S_11: ' + str(torch.sum(1/((2*PI*(logvar1.exp() + 1)).pow(0.5)))))
# print('S_12: ' + str(torch.sum(torch.exp(-0.5*(mu1.pow(2)/(logvar1.exp()+1))))))
S_pre = (1/(2*PI).pow(d/2))*torch.sum((logvar1.exp() + 1), dim=1).pow(0.5);
S = S_pre*torch.sum((-0.5*(mu1.pow(2)/(logvar1.exp()+1))).exp(), dim=1);
S = torch.sum(S)
else:
# print('S_21: ' + str(torch.sum(1/((2*PI).pow(d/2)*(logvar1.exp()+logvar2.exp()).pow(0.5)))));
# print('S_22: ' + str(torch.sum(torch.exp(-0.5 * ((mu1 - mu2).pow(2) / (logvar1.exp() + logvar2.exp()))))));
S_pre = torch.sum(1/((2*PI).pow(d/2)*(logvar1.exp()+logvar2.exp())), dim=1).pow(0.5)
S = S_pre*torch.sum(torch.exp(-0.5*((mu1-mu2).pow(2)/(logvar1.exp()+logvar2.exp()))), dim=1);
S = torch.sum(S)
if norm_value is not None:
S = S / float(norm_value);
# print('S: ' + str(S))
return S
def calc_gaussian_scaling_factor_self(PI, logvar1, norm_value=None):
d = logvar1.shape[1];
S = (1/(2*PI).pow(d/2))*torch.sum(logvar1.exp(), dim=1).pow(0.5);
S = torch.sum(S);
# S = torch.sum(1 / (2*(PI*torch.exp(logvar1)).pow(0.5)));
if norm_value is not None:
S = S / float(norm_value);
# print('S self: ' + str(S))
return S
#def calc_kl_divergence_lb_gauss_mixture(flags, index, mu1, logvar1, mus, logvars, norm_value=None):
# klds = torch.zeros(mus.shape[0]+1)
# if flags.cuda:
# klds = klds.cuda();
#
# klds[0] = calc_kl_divergence(mu1, logvar1, norm_value=norm_value);
# for k in range(0, mus.shape[0]):
# if k == index:
# kld = 0.0;
# else:
# kld = calc_kl_divergence(mu1, logvar1, mus[k], logvars[k], norm_value=norm_value);
# klds[k+1] = kld;
# kld_mixture = klds.mean();
# return kld_mixture;
def calc_kl_divergence_lb_gauss_mixture(flags, index, mu1, logvar1, mus, logvars, norm_value=None):
PI = torch.Tensor([math.pi]);
w_modalities = torch.Tensor(flags.alpha_modalities);
if flags.cuda:
PI = PI.cuda();
w_modalities = w_modalities.cuda();
w_modalities = reweight_weights(w_modalities);
denom = w_modalities[0]*calc_gaussian_scaling_factor(PI, mu1, logvar1, norm_value=norm_value);
for k in range(0, len(mus)):
if index == k:
denom += w_modalities[k+1]*calc_gaussian_scaling_factor_self(PI, logvar1, norm_value=norm_value);
else:
denom += w_modalities[k+1]*calc_gaussian_scaling_factor(PI, mu1, logvar1, mus[k], logvars[k], norm_value=norm_value)
lb = -torch.log(denom);
return lb;
def calc_kl_divergence_ub_gauss_mixture(flags, index, mu1, logvar1, mus, logvars, entropy, norm_value=None):
PI = torch.Tensor([math.pi]);
w_modalities = torch.Tensor(flags.alpha_modalities);
if flags.cuda:
PI = PI.cuda();
w_modalities = w_modalities.cuda();
w_modalities = reweight_weights(w_modalities);
nom = calc_gaussian_scaling_factor_self(PI, logvar1, norm_value=norm_value);
kl_div = calc_kl_divergence(mu1, logvar1, norm_value=norm_value);
print('kl div uniform: ' + str(kl_div))
denom = w_modalities[0]*torch.min(torch.Tensor([kl_div.exp(), 100000]));
for k in range(0, len(mus)):
if index == k:
denom += w_modalities[k+1];
else:
kl_div = calc_kl_divergence(mu1, logvar1, mus[k], logvars[k], norm_value=norm_value)
print('kl div ' + str(k) + ': ' + str(kl_div))
denom += w_modalities[k+1]*torch.min(torch.Tensor([kl_div.exp(), 100000]));
ub = torch.log(nom) - torch.log(denom) + entropy;
return ub;
def calc_entropy_gauss(flags, logvar, norm_value=None):
PI = torch.Tensor([math.pi]);
if flags.cuda:
PI = PI.cuda();
ent = 0.5*torch.sum(torch.log(2*PI) + logvar + 1)
if norm_value is not None:
ent = ent / norm_value;
return ent; | 4,561 | 40.099099 | 128 | py |
BraVL | BraVL-master/BraVL_fMRI/utils/BaseMMVae.py | from abc import ABC, abstractmethod
import os
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.distributions as dist
from divergence_measures.mm_div import calc_alphaJSD_modalities
from divergence_measures.mm_div import calc_group_divergence_moe
from divergence_measures.mm_div import poe
from utils import utils
class BaseMMVae(ABC, nn.Module):
def __init__(self, flags, modalities, subsets):
super(BaseMMVae, self).__init__()
self.num_modalities = len(modalities.keys());
self.flags = flags;
self.modalities = modalities;
self.subsets = subsets;
self.set_fusion_functions();
encoders = nn.ModuleDict();
decoders = nn.ModuleDict();
lhoods = dict();
for m, m_key in enumerate(sorted(modalities.keys())):
encoders[m_key] = modalities[m_key].encoder;
decoders[m_key] = modalities[m_key].decoder;
lhoods[m_key] = modalities[m_key].likelihood;
self.encoders = encoders;
self.decoders = decoders;
self.lhoods = lhoods;
def reparameterize(self, mu, logvar):
std = logvar.mul(0.5).exp_()
eps = Variable(std.data.new(std.size()).normal_())
return eps.mul(std).add_(mu)
def set_fusion_functions(self):
weights = utils.reweight_weights(torch.Tensor(self.flags.alpha_modalities));
self.weights = weights.to(self.flags.device);
if self.flags.modality_moe:
self.modality_fusion = self.moe_fusion;
self.fusion_condition = self.fusion_condition_moe; self.calc_joint_divergence = self.divergence_static_prior;
elif self.flags.modality_jsd:
self.modality_fusion = self.moe_fusion;
self.fusion_condition = self.fusion_condition_moe;
self.calc_joint_divergence = self.divergence_dynamic_prior;
elif self.flags.modality_poe:
self.modality_fusion = self.poe_fusion;
self.fusion_condition = self.fusion_condition_poe;
self.calc_joint_divergence = self.divergence_static_prior;
elif self.flags.joint_elbo:
self.modality_fusion = self.poe_fusion;
self.fusion_condition = self.fusion_condition_joint;
self.calc_joint_divergence = self.divergence_static_prior;
def divergence_static_prior(self, mus, logvars, weights=None):
if weights is None:
weights=self.weights;
weights = weights.clone();
weights = utils.reweight_weights(weights);
div_measures = calc_group_divergence_moe(self.flags,
mus,
logvars,
weights,
normalization=self.flags.batch_size);
divs = dict();
divs['joint_divergence'] = div_measures[0]; divs['individual_divs'] = div_measures[1]; divs['dyn_prior'] = None;
return divs;
def divergence_dynamic_prior(self, mus, logvars, weights=None):
if weights is None:
weights = self.weights;
div_measures = calc_alphaJSD_modalities(self.flags,
mus,
logvars,
weights,
normalization=self.flags.batch_size);
divs = dict();
divs['joint_divergence'] = div_measures[0];
divs['individual_divs'] = div_measures[1];
divs['dyn_prior'] = div_measures[2];
return divs;
def moe_fusion(self, mus, logvars, weights=None):
if weights is None:
weights = self.weights;
weights = utils.reweight_weights(weights);
#mus = torch.cat(mus, dim=0);
#logvars = torch.cat(logvars, dim=0);
mu_moe, logvar_moe = utils.mixture_component_selection(self.flags,
mus,
logvars,
weights);
return [mu_moe, logvar_moe];
def poe_fusion(self, mus, logvars, weights=None):
if (self.flags.modality_poe or mus.shape[0] ==
len(self.modalities.keys())):
num_samples = mus[0].shape[0];
mus = torch.cat((mus, torch.zeros(1, num_samples,
self.flags.class_dim).to(self.flags.device)),
dim=0);
logvars = torch.cat((logvars, torch.zeros(1, num_samples,
self.flags.class_dim).to(self.flags.device)),
dim=0);
#mus = torch.cat(mus, dim=0);
#logvars = torch.cat(logvars, dim=0);
mu_poe, logvar_poe = poe(mus, logvars);
return [mu_poe, logvar_poe];
def fusion_condition_moe(self, subset, input_batch=None):
if len(subset) == 1:
return True;
else:
return False;
def fusion_condition_poe(self, subset, input_batch=None):
if len(subset) == len(input_batch.keys()):
return True;
else:
return False;
def fusion_condition_joint(self, subset, input_batch=None):
return True;
def forward(self, input_batch,K=1):
latents = self.inference(input_batch);
results = dict();
results['latents'] = latents;
results['group_distr'] = latents['joint'];
class_embeddings = self.reparameterize(latents['joint'][0],
latents['joint'][1]);
#### For CUBO ####
qz_x = dist.Normal(latents['joint'][0],latents['joint'][1].mul(0.5).exp_())
zss = qz_x.rsample(torch.Size([K]))
div = self.calc_joint_divergence(latents['mus'],
latents['logvars'],
latents['weights']);
for k, key in enumerate(div.keys()):
results[key] = div[key];
results_rec = dict();
px_zs = dict();
enc_mods = latents['modalities'];
for m, m_key in enumerate(self.modalities.keys()):
if m_key in input_batch.keys():
m_s_mu, m_s_logvar = enc_mods[m_key + '_style'];
if self.flags.factorized_representation:
m_s_embeddings = self.reparameterize(mu=m_s_mu, logvar=m_s_logvar);
else:
m_s_embeddings = None;
m_rec = self.lhoods[m_key](*self.decoders[m_key](m_s_embeddings, class_embeddings));
px_z = self.lhoods[m_key](*self.decoders[m_key](m_s_embeddings, zss));
results_rec[m_key] = m_rec;
px_zs[m_key] = px_z
results['rec'] = results_rec;
results['class_embeddings'] = class_embeddings
results['qz_x'] = qz_x
results['zss'] = zss
results['px_zs'] = px_zs
return results;
def encode(self, input_batch):
latents = dict();
for m, m_key in enumerate(self.modalities.keys()):
if m_key in input_batch.keys():
i_m = input_batch[m_key];
l = self.encoders[m_key](i_m)
latents[m_key + '_style'] = l[:2]
latents[m_key] = l[2:]
else:
latents[m_key + '_style'] = [None, None];
latents[m_key] = [None, None];
return latents;
def inference(self, input_batch, num_samples=None):
if num_samples is None:
num_samples = self.flags.batch_size;
latents = dict();
enc_mods = self.encode(input_batch);
latents['modalities'] = enc_mods;
mus = torch.Tensor().to(self.flags.device);
logvars = torch.Tensor().to(self.flags.device);
distr_subsets = dict();
for k, s_key in enumerate(self.subsets.keys()):
if s_key != '':
mods = self.subsets[s_key];
mus_subset = torch.Tensor().to(self.flags.device);
logvars_subset = torch.Tensor().to(self.flags.device);
mods_avail = True
for m, mod in enumerate(mods):
if mod.name in input_batch.keys():
mus_subset = torch.cat((mus_subset,
enc_mods[mod.name][0].unsqueeze(0)),
dim=0);
logvars_subset = torch.cat((logvars_subset,
enc_mods[mod.name][1].unsqueeze(0)),
dim=0);
else:
mods_avail = False;
if mods_avail:
weights_subset = ((1/float(len(mus_subset)))*
torch.ones(len(mus_subset)).to(self.flags.device));
s_mu, s_logvar = self.modality_fusion(mus_subset,
logvars_subset,
weights_subset); #子集内部POE#
distr_subsets[s_key] = [s_mu, s_logvar];
if self.fusion_condition(mods, input_batch):
mus = torch.cat((mus, s_mu.unsqueeze(0)), dim=0);
logvars = torch.cat((logvars, s_logvar.unsqueeze(0)),
dim=0);
if self.flags.modality_jsd:
num_samples = mus[0].shape[0]
mus = torch.cat((mus, torch.zeros(1, num_samples,
self.flags.class_dim).to(self.flags.device)),
dim=0);
logvars = torch.cat((logvars, torch.zeros(1, num_samples,
self.flags.class_dim).to(self.flags.device)),
dim=0);
#weights = (1/float(len(mus)))*torch.ones(len(mus)).to(self.flags.device);
weights = (1/float(mus.shape[0]))*torch.ones(mus.shape[0]).to(self.flags.device);
joint_mu, joint_logvar = self.moe_fusion(mus, logvars, weights); #子集之间MOE#
#mus = torch.cat(mus, dim=0);
#logvars = torch.cat(logvars, dim=0);
latents['mus'] = mus;
latents['logvars'] = logvars;
latents['weights'] = weights;
latents['joint'] = [joint_mu, joint_logvar];
latents['subsets'] = distr_subsets;
return latents;
def generate(self, num_samples=None):
if num_samples is None:
num_samples = self.flags.batch_size;
mu = torch.zeros(num_samples,
self.flags.class_dim).to(self.flags.device);
logvar = torch.zeros(num_samples,
self.flags.class_dim).to(self.flags.device);
z_class = self.reparameterize(mu, logvar);
z_styles = self.get_random_styles(num_samples);
random_latents = {'content': z_class, 'style': z_styles};
random_samples = self.generate_from_latents(random_latents);
return random_samples;
def generate_sufficient_statistics_from_latents(self, latents):
suff_stats = dict();
content = latents['content']
for m, m_key in enumerate(self.modalities.keys()):
s = latents['style'][m_key];
cg = self.lhoods[m_key](*self.decoders[m_key](s, content));
suff_stats[m_key] = cg;
return suff_stats;
def generate_from_latents(self, latents):
suff_stats = self.generate_sufficient_statistics_from_latents(latents);
cond_gen = dict();
for m, m_key in enumerate(latents['style'].keys()):
cond_gen_m = suff_stats[m_key].mean;
cond_gen[m_key] = cond_gen_m;
return cond_gen;
def cond_generation(self, latent_distributions, num_samples=None):
if num_samples is None:
num_samples = self.flags.batch_size;
style_latents = self.get_random_styles(num_samples);
cond_gen_samples = dict();
for k, key in enumerate(latent_distributions.keys()):
[mu, logvar] = latent_distributions[key];
content_rep = self.reparameterize(mu=mu, logvar=logvar);
latents = {'content': content_rep, 'style': style_latents}
cond_gen_samples[key] = self.generate_from_latents(latents);
return cond_gen_samples;
def get_random_style_dists(self, num_samples):
styles = dict();
for k, m_key in enumerate(self.modalities.keys()):
mod = self.modalities[m_key];
s_mu = torch.zeros(num_samples,
mod.style_dim).to(self.flags.device)
s_logvar = torch.zeros(num_samples,
mod.style_dim).to(self.flags.device);
styles[m_key] = [s_mu, s_logvar];
return styles;
def get_random_styles(self, num_samples):
styles = dict();
for k, m_key in enumerate(self.modalities.keys()):
if self.flags.factorized_representation:
mod = self.modalities[m_key];
z_style = torch.randn(num_samples, mod.style_dim);
z_style = z_style.to(self.flags.device);
else:
z_style = None;
styles[m_key] = z_style;
return styles;
def save_networks(self):
for k, m_key in enumerate(self.modalities.keys()):
torch.save(self.encoders[m_key].state_dict(),
os.path.join(self.flags.dir_checkpoints, 'enc_' +
self.modalities[m_key].name))
torch.save(self.decoders[m_key].state_dict(),
os.path.join(self.flags.dir_checkpoints, 'dec_' +
self.modalities[m_key].name))
| 14,033 | 41.017964 | 121 | py |
BraVL | BraVL-master/BraVL_fMRI/utils/utils.py | import os
import torch
# Print iterations progress
def printProgressBar (iteration, total, prefix = '', suffix = '', decimals = 1, length = 100, fill = '█'):
"""
Call in a loop to create terminal progress bar
@params:
iteration - Required : current iteration (Int)
total - Required : total iterations (Int)
prefix - Optional : prefix string (Str)
suffix - Optional : suffix string (Str)
decimals - Optional : positive number of decimals in percent complete (Int)
length - Optional : character length of bar (Int)
fill - Optional : bar fill character (Str)
"""
percent = ("{0:." + str(decimals) + "f}").format(100 * (iteration / float(total)))
filledLength = int(length * iteration // total)
bar = fill * filledLength + '-' * (length - filledLength)
print('\r%s |%s| %s%% %s' % (prefix, bar, percent, suffix), end = '\r')
# Print New Line on Complete
if iteration == total:
print()
def get_likelihood(str):
if str == 'laplace':
pz = dist.Laplace
elif str == 'bernoulli':
pz = dist.Bernoulli
elif str == 'normal':
pz = dist.Normal
elif str == 'categorical':
pz = dist.OneHotCategorical
else:
print('likelihood not implemented')
pz = None
return pz
def reweight_weights(w):
w = w / w.sum()
return w
def mixture_component_selection(flags, mus, logvars, w_modalities=None):
#if not defined, take pre-defined weights
num_components = mus.shape[0]
num_samples = mus.shape[1]
if w_modalities is None:
w_modalities = torch.Tensor(flags.alpha_modalities).to(flags.device)
idx_start = []
idx_end = []
for k in range(0, num_components):
if k == 0:
i_start = 0
else:
i_start = int(idx_end[k-1])
if k == w_modalities.shape[0]-1:
i_end = num_samples
else:
i_end = i_start + int(torch.floor(num_samples*w_modalities[k]))
idx_start.append(i_start)
idx_end.append(i_end)
idx_end[-1] = num_samples
mu_sel = torch.cat([mus[k, idx_start[k]:idx_end[k], :] for k in range(w_modalities.shape[0])])
logvar_sel = torch.cat([logvars[k, idx_start[k]:idx_end[k], :] for k in range(w_modalities.shape[0])])
return [mu_sel, logvar_sel]
def calc_elbo(exp, modality, recs, klds):
flags = exp.flags
mods = exp.modalities
s_weights = exp.style_weights
r_weights = exp.rec_weights
kld_content = klds['content']
if modality == 'joint':
w_style_kld = 0.0
w_rec = 0.0
klds_style = klds['style']
for k, m_key in enumerate(mods.keys()):
w_style_kld += s_weights[m_key] * klds_style[m_key]
w_rec += r_weights[m_key] * recs[m_key]
kld_style = w_style_kld
rec_error = w_rec
else:
beta_style_mod = s_weights[modality]
#rec_weight_mod = r_weights[modality]
rec_weight_mod = 1.0
kld_style = beta_style_mod * klds['style'][modality]
rec_error = rec_weight_mod * recs[modality]
div = flags.beta_content * kld_content + flags.beta_style * kld_style
elbo = rec_error + flags.beta * div
return elbo
def save_and_log_flags(flags):
#filename_flags = os.path.join(flags.dir_experiment_run, 'flags.json')
#with open(filename_flags, 'w') as f:
# json.dump(flags.__dict__, f, indent=2, sort_keys=True)
filename_flags_rar = os.path.join(flags.dir_experiment_run, 'flags.rar')
torch.save(flags, filename_flags_rar)
str_args = ''
for k, key in enumerate(sorted(flags.__dict__.keys())):
str_args = str_args + '\n' + key + ': ' + str(flags.__dict__[key])
return str_args
class Flatten(torch.nn.Module):
def forward(self, x):
return x.view(x.size(0), -1)
class Unflatten(torch.nn.Module):
def __init__(self, ndims):
super(Unflatten, self).__init__()
self.ndims = ndims
def forward(self, x):
return x.view(x.size(0), *self.ndims)
| 4,100 | 32.892562 | 106 | py |
BraVL | BraVL-master/BraVL_fMRI/utils/BaseFlags.py | import os
import argparse
import torch
import scipy.io as sio
parser = argparse.ArgumentParser()
# TRAINING
parser.add_argument('--batch_size', type=int, default=512, help="batch size for training")
parser.add_argument('--initial_learning_rate', type=float, default=0.0001, help="starting learning rate")
parser.add_argument('--beta_1', type=float, default=0.9, help="default beta_1 val for adam")
parser.add_argument('--beta_2', type=float, default=0.999, help="default beta_2 val for adam")
parser.add_argument('--start_epoch', type=int, default=0, help="flag to set the starting epoch for training")
parser.add_argument('--end_epoch', type=int, default=100, help="flag to indicate the final epoch of training")
# DATA DEPENDENT
parser.add_argument('--class_dim', type=int, default=32, help="dimension of common factor latent space")
# SAVE and LOAD
parser.add_argument('--mm_vae_save', type=str, default='mm_vae', help="model save for vae_bimodal")
parser.add_argument('--load_saved', type=bool, default=False, help="flag to indicate if a saved model will be loaded")
# DIRECTORIES
# experiments
parser.add_argument('--dir_experiment', type=str, default='./logs', help="directory to save logs in")
parser.add_argument('--dataname', type=str, default='DIR-Wiki', help="dataset")
parser.add_argument('--sbj', type=str, default='sub-03', help="fmri subject")
parser.add_argument('--roi', type=str, default='LVC_HVC_IT', help="ROI")
parser.add_argument('--text_model', type=str, default='GPTNeo', help="text embedding model")
parser.add_argument('--image_model', type=str, default='pytorch/repvgg_b3g4', help="image embedding model")
parser.add_argument('--stability_ratio', type=str, default='', help="stability_ratio")
parser.add_argument('--test_type', type=str, default='zsl', help='normal or zsl')
parser.add_argument('--aug_type', type=str, default='image_text', help='no_aug, image_text, image_only, text_only')
#multimodal
parser.add_argument('--method', type=str, default='joint_elbo', help='choose method for training the model')
parser.add_argument('--modality_jsd', type=bool, default=False, help="modality_jsd")
parser.add_argument('--modality_poe', type=bool, default=False, help="modality_poe")
parser.add_argument('--modality_moe', type=bool, default=False, help="modality_moe")
parser.add_argument('--joint_elbo', type=bool, default=False, help="modality_moe")
parser.add_argument('--poe_unimodal_elbos', type=bool, default=True, help="unimodal_klds")
parser.add_argument('--factorized_representation', action='store_true', default=False, help="factorized_representation")
# LOSS TERM WEIGHTS
parser.add_argument('--beta', type=float, default=0.0, help="default initial weight of sum of weighted divergence terms")
parser.add_argument('--beta_style', type=float, default=1.0, help="default weight of sum of weighted style divergence terms")
parser.add_argument('--beta_content', type=float, default=1.0, help="default weight of sum of weighted content divergence terms")
parser.add_argument('--lambda1', type=float, default=0.001, help="default weight of intra_mi terms")
parser.add_argument('--lambda2', type=float, default=0.001, help="default weight of inter_mi terms")
FLAGS = parser.parse_args()
data_dir_root = os.path.join('./data', FLAGS.dataname)
brain_dir = os.path.join(data_dir_root, 'brain_feature', FLAGS.roi, FLAGS.sbj)
image_dir_train = os.path.join(data_dir_root, 'visual_feature/ImageNetTraining', FLAGS.image_model+'-PCA', FLAGS.sbj)
text_dir_train = os.path.join(data_dir_root, 'textual_feature/ImageNetTraining/text', FLAGS.text_model, FLAGS.sbj)
train_brain = sio.loadmat(os.path.join(brain_dir, 'fmri_train_data'+FLAGS.stability_ratio+'.mat'))['data'].astype('double')
train_image = sio.loadmat(os.path.join(image_dir_train, 'feat_pca_train.mat'))['data'].astype('double')#[:,0:3000]
train_text = sio.loadmat(os.path.join(text_dir_train, 'text_feat_train.mat'))['data'].astype('double')
train_brain = torch.from_numpy(train_brain)
train_image = torch.from_numpy(train_image)
train_text = torch.from_numpy(train_text)
dim_brain = train_brain.shape[1]
dim_image = train_image.shape[1]
dim_text = train_text.shape[1]
parser.add_argument('--m1_dim', type=int, default=dim_brain, help="dimension of modality brain")
parser.add_argument('--m2_dim', type=int, default=dim_image, help="dimension of modality image")
parser.add_argument('--m3_dim', type=int, default=dim_text, help="dimension of modality text")
parser.add_argument('--data_dir_root', type=str, default=data_dir_root, help="data dir")
FLAGS = parser.parse_args()
print(FLAGS)
| 4,581 | 62.638889 | 129 | py |
NeuralVoicePuppetry | NeuralVoicePuppetry-master/Neural Rendering Network/models/NeuralTexture.py | import os
import torch
import torch.nn as nn
import torchvision.transforms as transforms
from util.image_pool import ImagePool
from .base_model import BaseModel
from . import networks
import numpy as np
import functools
from PIL import Image
from util import util
from torchvision import models
from collections import namedtuple
################
### HELPER ###
################
from BaselModel.basel_model import *
INVALID_UV = -1.0
#####################################
######## static texture #########
#####################################
class StaticNeuralTexture(nn.Module):
def __init__(self, texture_dimensions, texture_features):
super(StaticNeuralTexture, self).__init__()
self.texture_dimensions = texture_dimensions #256 #texture dimensions
self.out_ch = texture_features # output feature, after evaluating the texture
self.register_parameter('data', torch.nn.Parameter(torch.randn(1, self.out_ch, self.texture_dimensions, self.texture_dimensions, requires_grad=True)))
####
def forward(self, expressions, audio_features, uv_inputs):
b = audio_features.shape[0] # batchsize
if b != 1:
print('ERROR: NeuralTexture forward only implemented for batchsize==1')
exit(-1)
uvs = torch.stack([uv_inputs[:,0,:,:], uv_inputs[:,1,:,:]], 3)
return torch.nn.functional.grid_sample(self.data, uvs, mode='bilinear', padding_mode='border')
#####################################
######## audio texture ##########
#####################################
class DynamicNeuralTextureAudio(nn.Module):
def __init__(self, texture_dimensions, texture_features_intermediate, texture_features):
super(DynamicNeuralTextureAudio, self).__init__()
self.texture_features_intermediate = texture_features_intermediate #16 #features stored in texture
self.texture_dimensions = texture_dimensions #256 #texture dimensions
self.out_ch = texture_features # output feature, after evaluating the texture
# input 16 x 29
self.convNet = nn.Sequential(
nn.Conv2d(29, 32, kernel_size=(3,1), stride=(2,1), padding=(1,0), bias=True), # 29 x 16 x 1 => 32 x 8 x 1
nn.LeakyReLU(0.02, True),
nn.Conv2d(32, 32, kernel_size=(3,1), stride=(2,1), padding=(1,0), bias=True), # 32 x 8 x 1 => 32 x 4 x 1
nn.LeakyReLU(0.02, True),
nn.Conv2d(32, 64, kernel_size=(3,1), stride=(2,1), padding=(1,0), bias=True), # 32 x 4 x 1 => 64 x 2 x 1
nn.LeakyReLU(0.2, True),
nn.Conv2d(64, 64, kernel_size=(3,1), stride=(2,1), padding=(1,0), bias=True), # 64 x 2 x 1 => 64 x 1 x 1
nn.LeakyReLU(0.2, True),
)
conv_output_size = 64
self.fullNet = nn.Sequential(
nn.Linear(in_features = conv_output_size, out_features=128, bias = True),
nn.LeakyReLU(0.02),
nn.Linear(in_features = 128, out_features=64, bias = True),
nn.LeakyReLU(0.02),
nn.Linear(in_features = 64, out_features=self.out_ch*4*4*self.texture_features_intermediate, bias = True),
nn.Tanh()
)
self.register_parameter('data', torch.nn.Parameter(torch.randn(1, self.texture_features_intermediate, self.texture_dimensions, self.texture_dimensions, requires_grad=True)))
####
def forward(self, expressions, audio_features, uv_inputs):
b = audio_features.shape[0] # batchsize
if b != 1:
print('ERROR: NeuralTexture forward only implemented for batchsize==1')
exit(-1)
# b x 1 x 16 x 29 --> transpose
audio_features = torch.transpose(audio_features, 1, 3)
audio_conv_res = self.convNet( audio_features )
conv_filter = torch.reshape( self.fullNet( torch.reshape( audio_conv_res, (b,1,-1))), (self.out_ch,self.texture_features_intermediate,4,4))
self.tex_eval = nn.functional.conv2d(self.data, conv_filter, stride=1, padding=2)
uvs = torch.stack([uv_inputs[:,0,:,:], uv_inputs[:,1,:,:]], 3)
return torch.nn.functional.grid_sample(self.tex_eval, uvs, mode='bilinear', padding_mode='border')
#####################################
###### expression texture #######
#####################################
class DynamicNeuralTextureExpression(nn.Module):
def __init__(self, texture_dimensions, texture_features_intermediate, texture_features):
super(DynamicNeuralTextureExpression, self).__init__()
self.texture_features_intermediate = texture_features_intermediate #16 #features stored in texture
self.texture_dimensions = texture_dimensions #256 #texture dimensions
self.out_ch = texture_features # output feature, after evaluating the texture
# input: 76
input_size = 76
self.fullNet = nn.Sequential(
nn.Linear(in_features = input_size, out_features=128, bias = True),
nn.LeakyReLU(0.02),
nn.Linear(in_features = 128, out_features=64, bias = True),
nn.LeakyReLU(0.02),
nn.Linear(in_features = 64, out_features=self.out_ch*4*4*self.texture_features_intermediate, bias = True),
nn.Tanh()
)
self.register_parameter('data', torch.nn.Parameter(torch.randn(1, self.texture_features_intermediate, self.texture_dimensions, self.texture_dimensions, requires_grad=True)))
####
def forward(self, expressions, audio_features, uv_inputs):
b = expressions.shape[0] # batchsize
if b != 1:
print('ERROR: NeuralTexture forward only implemented for batchsize==1')
exit(-1)
conv_filter = torch.reshape( self.fullNet( torch.reshape( expressions, (1,1,-1))), (self.out_ch,self.texture_features_intermediate,4,4))
self.tex_eval = nn.functional.conv2d(self.data, conv_filter, stride=1, padding=2)
uvs = torch.stack([uv_inputs[:,0,:,:], uv_inputs[:,1,:,:]], 3)
return torch.nn.functional.grid_sample(self.tex_eval, uvs, mode='bilinear', padding_mode='border')
| 6,109 | 46 | 181 | py |
NeuralVoicePuppetry | NeuralVoicePuppetry-master/Neural Rendering Network/models/UNET.py | import os
import torch
import torch.nn as nn
import torchvision.transforms as transforms
from util.image_pool import ImagePool
from .base_model import BaseModel
from . import networks
import numpy as np
import functools
from PIL import Image
from util import util
from torchvision import models
from collections import namedtuple
class UnetSkipConnectionBlock(nn.Module):
def __init__(self, outer_nc, inner_nc, input_nc=None, submodule=None, outermost=False, innermost=False, norm_layer=nn.BatchNorm2d, use_dropout=False):
super(UnetSkipConnectionBlock, self).__init__()
self.outermost = outermost
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
if input_nc is None:
input_nc = outer_nc
#use_norm = False
use_norm = True
downconv = nn.Conv2d(input_nc, inner_nc, kernel_size=4, stride=2, padding=1, bias=use_bias)
downrelu = nn.LeakyReLU(0.2, True)
if use_norm: downnorm = norm_layer(inner_nc)
uprelu = nn.ReLU(True)
if use_norm: upnorm = norm_layer(outer_nc)
if outermost:
upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc, kernel_size=4, stride=2, padding=1)
down = [downconv]
up = [uprelu, upconv, nn.Tanh()]
model = down + [submodule] + up
elif innermost:
upconv = nn.ConvTranspose2d(inner_nc, outer_nc, kernel_size=4, stride=2, padding=1, bias=use_bias)
down = [downrelu, downconv]
if use_norm: up = [uprelu, upconv, upnorm]
else: up = [uprelu, upconv]
model = down + up
else:
upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc, kernel_size=4, stride=2, padding=1, bias=use_bias)
if use_norm: down = [downrelu, downconv, downnorm]
down = [downrelu, downconv]
if use_norm: up = [uprelu, upconv, upnorm]
else: up = [uprelu, upconv]
if use_dropout:
model = down + [submodule] + up + [nn.Dropout(0.5)]
else:
model = down + [submodule] + up
self.model = nn.Sequential(*model)
def forward(self, x):
if self.outermost:
return self.model(x)
else:
return torch.cat([x, self.model(x)], 1)
# with bilinear upsampling
class UnetSkipConnectionBlock_BU(nn.Module):
def __init__(self, outer_nc, inner_nc, input_nc=None, submodule=None, outermost=False, innermost=False, norm_layer=nn.BatchNorm2d, use_dropout=False):
super(UnetSkipConnectionBlock_BU, self).__init__()
self.outermost = outermost
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
if input_nc is None:
input_nc = outer_nc
#use_norm = False
use_norm = True
downconv = nn.Conv2d(input_nc, inner_nc, kernel_size=4, stride=2, padding=1, bias=use_bias)
downrelu = nn.LeakyReLU(0.2, True)
if use_norm: downnorm = norm_layer(inner_nc)
uprelu = nn.ReLU(True)
if use_norm: upnorm = norm_layer(outer_nc)
if outermost:
#upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc, kernel_size=4, stride=2, padding=1)
upconv = nn.Sequential(
nn.Upsample(scale_factor=2, mode="bilinear", align_corners=False),
nn.Conv2d(inner_nc * 2, outer_nc, kernel_size=3, stride=1, padding=1, bias=use_bias),
nn.LeakyReLU(0.2, True)
)
down = [downconv]
up = [uprelu, upconv, nn.Tanh()]
model = down + [submodule] + up
elif innermost:
#upconv = nn.ConvTranspose2d(inner_nc, outer_nc, kernel_size=4, stride=2, padding=1, bias=use_bias)
upconv = nn.Sequential(
nn.Upsample(scale_factor=2, mode="bilinear", align_corners=False),
nn.Conv2d(inner_nc, outer_nc, kernel_size=3, stride=1, padding=1, bias=use_bias),
nn.LeakyReLU(0.2, True)
)
down = [downrelu, downconv]
if use_norm: up = [uprelu, upconv, upnorm]
else: up = [uprelu, upconv]
model = down + up
else:
#upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc, kernel_size=4, stride=2, padding=1, bias=use_bias)
upconv = nn.Sequential(
nn.Upsample(scale_factor=2, mode="bilinear", align_corners=False),
nn.Conv2d(inner_nc * 2, outer_nc, kernel_size=3, stride=1, padding=1, bias=use_bias),
nn.LeakyReLU(0.2, True)
)
if use_norm: down = [downrelu, downconv, downnorm]
down = [downrelu, downconv]
if use_norm: up = [uprelu, upconv, upnorm]
else: up = [uprelu, upconv]
if use_dropout:
model = down + [submodule] + up + [nn.Dropout(0.5)]
else:
model = down + [submodule] + up
self.model = nn.Sequential(*model)
def forward(self, x):
if self.outermost:
return self.model(x)
else:
return torch.cat([x, self.model(x)], 1)
# dilated convs, without downsampling
class UnetSkipConnectionBlock_DC(nn.Module):
def __init__(self, outer_nc, inner_nc, input_nc=None, submodule=None, outermost=False, innermost=False, norm_layer=nn.BatchNorm2d, use_dropout=False, dilation=1):
super(UnetSkipConnectionBlock_DC, self).__init__()
self.outermost = outermost
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
if input_nc is None:
input_nc = outer_nc
#use_norm = False
use_norm = True
#downconv = nn.Conv2d(input_nc, inner_nc, kernel_size=4, stride=2, dilation=dilation, padding=1, bias=use_bias)
downconv = nn.Conv2d(input_nc, inner_nc, kernel_size=3, stride=1, dilation=dilation, padding=1*dilation, bias=use_bias)
downrelu = nn.LeakyReLU(0.2, True)
if use_norm: downnorm = norm_layer(inner_nc)
uprelu = nn.ReLU(True)
if use_norm: upnorm = norm_layer(outer_nc)
if outermost:
#upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc, kernel_size=4, stride=2, padding=1)
upconv = nn.Sequential(
nn.Conv2d(inner_nc * 2, outer_nc, kernel_size=3, stride=1, dilation=1, padding=1, bias=use_bias),
nn.LeakyReLU(0.2, True)
)
down = [downconv]
up = [uprelu, upconv, nn.Tanh()]
model = down + [submodule] + up
elif innermost:
#upconv = nn.ConvTranspose2d(inner_nc, outer_nc, kernel_size=4, stride=2, padding=1, bias=use_bias)
upconv = nn.Sequential(
nn.Conv2d(inner_nc, outer_nc, kernel_size=3, stride=1, dilation=1, padding=1, bias=use_bias),
nn.LeakyReLU(0.2, True)
)
down = [downrelu, downconv]
if use_norm: up = [uprelu, upconv, upnorm]
else: up = [uprelu, upconv]
model = down + up
else:
#upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc, kernel_size=4, stride=2, padding=1, bias=use_bias)
upconv = nn.Sequential(
nn.Conv2d(inner_nc * 2, outer_nc, kernel_size=3, stride=1, dilation=1, padding=1, bias=use_bias),
nn.LeakyReLU(0.2, True)
)
if use_norm: down = [downrelu, downconv, downnorm]
down = [downrelu, downconv]
if use_norm: up = [uprelu, upconv, upnorm]
else: up = [uprelu, upconv]
if use_dropout:
model = down + [submodule] + up + [nn.Dropout(0.5)]
else:
model = down + [submodule] + up
self.model = nn.Sequential(*model)
def forward(self, x):
if self.outermost:
return self.model(x)
else:
return torch.cat([x, self.model(x)], 1)
class UnetRenderer(nn.Module):
def __init__(self, renderer, input_nc, output_nc, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False):
super(UnetRenderer, self).__init__()
# construct unet structure
if renderer=='UNET_8_level_BU':
print('>>>> UNET_8_level_BU <<<<')
num_downs = 8
unet_block = UnetSkipConnectionBlock_BU(ngf * 8, ngf * 8, input_nc=None, submodule=None, norm_layer=norm_layer, innermost=True)
for i in range(num_downs - 5):
unet_block = UnetSkipConnectionBlock_BU(ngf * 8, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer, use_dropout=use_dropout)
unet_block = UnetSkipConnectionBlock_BU(ngf * 4, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer)
unet_block = UnetSkipConnectionBlock_BU(ngf * 2, ngf * 4, input_nc=None, submodule=unet_block, norm_layer=norm_layer)
unet_block = UnetSkipConnectionBlock_BU(ngf, ngf * 2, input_nc=None, submodule=unet_block, norm_layer=norm_layer)
unet_block = UnetSkipConnectionBlock_BU(output_nc, ngf, input_nc=input_nc, submodule=unet_block, outermost=True, norm_layer=norm_layer)
elif renderer=='UNET_6_level_BU':
print('>>>> UNET_6_level_BU <<<<')
num_downs = 6
unet_block = UnetSkipConnectionBlock_BU(ngf * 8, ngf * 8, input_nc=None, submodule=None, norm_layer=norm_layer, innermost=True)
for i in range(num_downs - 5):
unet_block = UnetSkipConnectionBlock_BU(ngf * 8, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer, use_dropout=use_dropout)
unet_block = UnetSkipConnectionBlock_BU(ngf * 4, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer)
unet_block = UnetSkipConnectionBlock_BU(ngf * 2, ngf * 4, input_nc=None, submodule=unet_block, norm_layer=norm_layer)
unet_block = UnetSkipConnectionBlock_BU(ngf, ngf * 2, input_nc=None, submodule=unet_block, norm_layer=norm_layer)
unet_block = UnetSkipConnectionBlock_BU(output_nc, ngf, input_nc=input_nc, submodule=unet_block, outermost=True, norm_layer=norm_layer)
elif renderer=='UNET_5_level_BU':
print('>>>> UNET_5_level_BU <<<<')
num_downs = 5
unet_block = UnetSkipConnectionBlock_BU(ngf * 8, ngf * 8, input_nc=None, submodule=None, norm_layer=norm_layer, innermost=True)
for i in range(num_downs - 5):
unet_block = UnetSkipConnectionBlock_BU(ngf * 8, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer, use_dropout=use_dropout)
unet_block = UnetSkipConnectionBlock_BU(ngf * 4, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer)
unet_block = UnetSkipConnectionBlock_BU(ngf * 2, ngf * 4, input_nc=None, submodule=unet_block, norm_layer=norm_layer)
unet_block = UnetSkipConnectionBlock_BU(ngf, ngf * 2, input_nc=None, submodule=unet_block, norm_layer=norm_layer)
unet_block = UnetSkipConnectionBlock_BU(output_nc, ngf, input_nc=input_nc, submodule=unet_block, outermost=True, norm_layer=norm_layer)
elif renderer=='UNET_3_level_BU':
print('>>>> UNET_3_level_BU <<<<')
unet_block = UnetSkipConnectionBlock_BU(ngf * 2, ngf * 8, input_nc=None, submodule=None, norm_layer=norm_layer, innermost=True)
unet_block = UnetSkipConnectionBlock_BU(ngf, ngf * 2, input_nc=None, submodule=unet_block, norm_layer=norm_layer)
unet_block = UnetSkipConnectionBlock_BU(output_nc, ngf, input_nc=input_nc, submodule=unet_block, outermost=True, norm_layer=norm_layer)
elif renderer=='UNET_8_level':
print('>>>> UNET_8_level <<<<')
num_downs = 8
unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=None, norm_layer=norm_layer, innermost=True)
for i in range(num_downs - 5):
unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer, use_dropout=use_dropout)
unet_block = UnetSkipConnectionBlock(ngf * 4, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer)
unet_block = UnetSkipConnectionBlock(ngf * 2, ngf * 4, input_nc=None, submodule=unet_block, norm_layer=norm_layer)
unet_block = UnetSkipConnectionBlock(ngf, ngf * 2, input_nc=None, submodule=unet_block, norm_layer=norm_layer)
unet_block = UnetSkipConnectionBlock(output_nc, ngf, input_nc=input_nc, submodule=unet_block, outermost=True, norm_layer=norm_layer)
elif renderer=='UNET_6_level':
print('>>>> UNET_6_level <<<<')
num_downs = 6
unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=None, norm_layer=norm_layer, innermost=True)
for i in range(num_downs - 5):
unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer, use_dropout=use_dropout)
unet_block = UnetSkipConnectionBlock(ngf * 4, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer)
unet_block = UnetSkipConnectionBlock(ngf * 2, ngf * 4, input_nc=None, submodule=unet_block, norm_layer=norm_layer)
unet_block = UnetSkipConnectionBlock(ngf, ngf * 2, input_nc=None, submodule=unet_block, norm_layer=norm_layer)
unet_block = UnetSkipConnectionBlock(output_nc, ngf, input_nc=input_nc, submodule=unet_block, outermost=True, norm_layer=norm_layer)
elif renderer=='UNET_5_level':
print('>>>> UNET_5_level <<<<')
num_downs = 5
unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=None, norm_layer=norm_layer, innermost=True)
for i in range(num_downs - 5):
unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer, use_dropout=use_dropout)
unet_block = UnetSkipConnectionBlock(ngf * 4, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer)
unet_block = UnetSkipConnectionBlock(ngf * 2, ngf * 4, input_nc=None, submodule=unet_block, norm_layer=norm_layer)
unet_block = UnetSkipConnectionBlock(ngf, ngf * 2, input_nc=None, submodule=unet_block, norm_layer=norm_layer)
unet_block = UnetSkipConnectionBlock(output_nc, ngf, input_nc=input_nc, submodule=unet_block, outermost=True, norm_layer=norm_layer)
elif renderer=='UNET_3_level':
print('>>>> UNET_3_level <<<<')
unet_block = UnetSkipConnectionBlock(ngf * 2, ngf * 8, input_nc=None, submodule=None, norm_layer=norm_layer, innermost=True)
unet_block = UnetSkipConnectionBlock(ngf, ngf * 2, input_nc=None, submodule=unet_block, norm_layer=norm_layer)
unet_block = UnetSkipConnectionBlock(output_nc, ngf, input_nc=input_nc, submodule=unet_block, outermost=True, norm_layer=norm_layer)
elif renderer=='UNET_5_level_DC':
print('>>>> UNET_5_level_DC <<<<')
num_downs = 5
dilation = 1
unet_block = UnetSkipConnectionBlock_DC(ngf * 8, ngf * 8, input_nc=None, submodule=None, norm_layer=norm_layer, innermost=True, dilation=dilation)
for i in range(num_downs - 5):
dilation *= 2
unet_block = UnetSkipConnectionBlock_DC(ngf * 8, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer, use_dropout=use_dropout, dilation=dilation)
dilation *= 2
unet_block = UnetSkipConnectionBlock_DC(ngf * 4, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer, dilation=dilation)
dilation *= 2
unet_block = UnetSkipConnectionBlock_DC(ngf * 2, ngf * 4, input_nc=None, submodule=unet_block, norm_layer=norm_layer, dilation=dilation)
dilation *= 2
unet_block = UnetSkipConnectionBlock_DC(ngf, ngf * 2, input_nc=None, submodule=unet_block, norm_layer=norm_layer, dilation=dilation)
dilation *= 2
unet_block = UnetSkipConnectionBlock_DC(output_nc, ngf, input_nc=input_nc, submodule=unet_block, outermost=True, norm_layer=norm_layer, dilation=dilation)
elif renderer=='UNET_3_level_DC':
print('>>>> UNET_3_level_DC <<<<')
dilation = 1
unet_block = UnetSkipConnectionBlock_DC(ngf * 2, ngf * 8, input_nc=None, submodule=None, norm_layer=norm_layer, innermost=True, dilation=dilation)
dilation *= 2
unet_block = UnetSkipConnectionBlock_DC(ngf, ngf * 2, input_nc=None, submodule=unet_block, norm_layer=norm_layer, dilation=dilation)
dilation *= 2
unet_block = UnetSkipConnectionBlock_DC(output_nc, ngf, input_nc=input_nc, submodule=unet_block, outermost=True, norm_layer=norm_layer, dilation=dilation)
self.model = unet_block
def forward(self, expressions, audio_features, features, background):
unet_input = torch.cat([features, background], 1)
return self.model(unet_input) | 17,496 | 54.370253 | 177 | py |
NeuralVoicePuppetry | NeuralVoicePuppetry-master/Neural Rendering Network/models/VGG_LOSS.py | import os
import torch
import torch.nn as nn
import torchvision.transforms as transforms
from util.image_pool import ImagePool
from .base_model import BaseModel
from . import networks
import numpy as np
import functools
from torchvision import models
from collections import namedtuple
class VGG16(torch.nn.Module):
def __init__(self, requires_grad=False):
super(VGG16, self).__init__()
vgg_pretrained_features = models.vgg16(pretrained=True).features
self.slice1 = torch.nn.Sequential()
self.slice2 = torch.nn.Sequential()
self.slice3 = torch.nn.Sequential()
self.slice4 = torch.nn.Sequential()
for x in range(4):
self.slice1.add_module(str(x), vgg_pretrained_features[x])
for x in range(4, 9):
self.slice2.add_module(str(x), vgg_pretrained_features[x])
for x in range(9, 16):
self.slice3.add_module(str(x), vgg_pretrained_features[x])
for x in range(16, 23):
self.slice4.add_module(str(x), vgg_pretrained_features[x])
if not requires_grad:
for param in self.parameters():
param.requires_grad = False
def forward(self, X):
#normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
#X=normalize(X)
X = 0.5 * (X + 1.0) # map to [0,1]
h = self.slice1(X)
h_relu1_2 = h
h = self.slice2(h)
h_relu2_2 = h
h = self.slice3(h)
h_relu3_3 = h
h = self.slice4(h)
h_relu4_3 = h
vgg_outputs = namedtuple("VggOutputs", ['relu1_2', 'relu2_2', 'relu3_3', 'relu4_3'])
out = vgg_outputs(h_relu1_2, h_relu2_2, h_relu3_3, h_relu4_3)
return out
def gram_matrix(y):
(b, ch, h, w) = y.size()
features = y.view(b, ch, w * h)
features_t = features.transpose(1, 2)
gram = features.bmm(features_t) / (ch * h * w)
return gram
class VGGLOSS(torch.nn.Module):
def __init__(self):
super(VGGLOSS, self).__init__()
self.model = VGG16()
self.criterionL2 = torch.nn.MSELoss(reduction='mean')
def forward(self, fake, target, content_weight = 1.0, style_weight = 1.0):
vgg_fake = self.model(fake)
vgg_target = self.model(target)
content_loss = self.criterionL2(vgg_target.relu2_2, vgg_fake.relu2_2)
# gram_matrix
gram_style = [gram_matrix(y) for y in vgg_target]
style_loss = 0.0
for ft_y, gm_s in zip(vgg_fake, gram_style):
gm_y = gram_matrix(ft_y)
style_loss += self.criterionL2(gm_y, gm_s)
total_loss = content_weight * content_loss + style_weight * style_loss
return total_loss | 2,718 | 33.417722 | 97 | py |
NeuralVoicePuppetry | NeuralVoicePuppetry-master/Neural Rendering Network/models/DynamicNeuralTextures_model.py | import os
import torch
import torch.nn as nn
import torchvision.transforms as transforms
from util.image_pool import ImagePool
from .base_model import BaseModel
from . import networks
import numpy as np
import functools
from PIL import Image
from util import util
from torchvision import models
from collections import namedtuple
from . import VGG_LOSS
from . import UNET
from . import NeuralTexture
################
### HELPER ###
################
from BaselModel.basel_model import *
INVALID_UV = -1.0
def define_Texture(opt, gpu_ids=[]):
net = None
if opt.textureModel == 'DynamicNeuralTextureAudio':
net = NeuralTexture.DynamicNeuralTextureAudio(texture_dimensions=opt.tex_dim, texture_features_intermediate=opt.tex_features_intermediate, texture_features=opt.tex_features)
elif opt.textureModel == 'DynamicNeuralTextureExpression':
net = NeuralTexture.DynamicNeuralTextureExpression(texture_dimensions=opt.tex_dim, texture_features_intermediate=opt.tex_features_intermediate, texture_features=opt.tex_features)
elif opt.textureModel == 'StaticNeuralTexture':
net = NeuralTexture.StaticNeuralTexture(texture_dimensions=opt.tex_dim, texture_features=opt.tex_features)
return networks.init_net(net, opt.init_type, opt.init_gain, gpu_ids)
def define_TextureDecoder(renderer, n_feature, ngf, norm='batch', use_dropout=False, init_type='normal', init_gain=0.02, gpu_ids=[]):
net = None
norm_layer = networks.get_norm_layer(norm_type=norm)
N_OUT = 3
#renderer=='UNET_5_level'
net = UNET.UnetRenderer(renderer, n_feature, N_OUT, ngf, norm_layer=norm_layer, use_dropout=use_dropout)
return networks.init_net(net, init_type, init_gain, gpu_ids)
def define_Inpainter(renderer, n_feature, ngf, norm='batch', use_dropout=False, init_type='normal', init_gain=0.02, gpu_ids=[]):
net = None
norm_layer = networks.get_norm_layer(norm_type=norm)
N_OUT = 3
#renderer=='UNET_5_level'
net = UNET.UnetRenderer(renderer, n_feature, N_OUT, ngf, norm_layer=norm_layer, use_dropout=use_dropout)
return networks.init_net(net, init_type, init_gain, gpu_ids)
class DynamicNeuralTexturesModel(BaseModel):
def name(self):
return 'DynamicNeuralTexturesModel'
@staticmethod
def modify_commandline_options(parser, is_train=True):
# changing the default values to match the pix2pix paper
# (https://phillipi.github.io/pix2pix/)
#parser.set_defaults(norm='batch', netG='unet_256')
parser.set_defaults(norm='instance', netG='unet_256')
parser.set_defaults(dataset_mode='aligned')
if is_train:
parser.set_defaults(pool_size=0, no_lsgan=True)
parser.add_argument('--lambda_L1', type=float, default=100.0, help='weight for L1 loss')
return parser
def initialize(self, opt):
BaseModel.initialize(self, opt)
self.isTrain = opt.isTrain
self.trainRenderer = not opt.fix_renderer
# specify the training losses you want to print out. The program will call base_model.get_current_losses
self.loss_names = ['G_total', 'G_L1_Rendering', 'G_VGG_Rendering', 'G_GAN']
# specify the images you want to save/display. The program will call base_model.get_current_visuals
if self.isTrain:
self.visual_names = ['input_uv', 'fake', 'target']
else:
self.visual_names = ['input_uv', 'fake', 'target']
# specify the models you want to save to the disk. The program will call base_model.save_networks and base_model.load_networks
if self.isTrain:
self.model_names = ['texture', 'texture_decoder', 'inpainter' ,'netD']
else: # during test time, only load Gs
self.model_names = ['texture', 'texture_decoder', 'inpainter']
# load/define networks
self.texture = define_Texture(opt, self.gpu_ids)
self.texture_decoder = define_TextureDecoder(opt.rendererType, opt.tex_features+3, opt.ngf, opt.norm, not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids)
self.inpainter = define_Inpainter(opt.rendererType, 6, opt.ngf, opt.norm, not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids)
# optimizer
self.loss_G_GAN = 0.0
if self.isTrain:
use_sigmoid = opt.no_lsgan
self.netD = networks.define_D(opt.input_nc + opt.output_nc, opt.ndf, opt.netD, opt.n_layers_D, opt.norm, use_sigmoid, opt.init_type, opt.init_gain, self.gpu_ids)
self.fake_AB_pool = ImagePool(opt.pool_size)
# define loss functions
self.criterionGAN = networks.GANLoss(use_lsgan=not opt.no_lsgan).to(self.device)
self.criterionL1 = torch.nn.L1Loss(reduction='mean')
self.criterionL1Smooth = torch.nn.SmoothL1Loss(reduction='mean')
self.criterionL2 = torch.nn.MSELoss(reduction='mean')
if self.opt.lossType == 'VGG':
self.vggloss = VGG_LOSS.VGGLOSS().to(self.device)
# initialize optimizers
self.optimizers = []
self.optimizer_texture_decoder = torch.optim.Adam(self.texture_decoder.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
self.optimizers.append(self.optimizer_texture_decoder)
self.optimizer_inpainter = torch.optim.Adam(self.inpainter.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
self.optimizers.append(self.optimizer_inpainter)
self.optimizer_D = torch.optim.Adam(self.netD.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
self.optimizers.append(self.optimizer_D)
self.optimizer_T = torch.optim.Adam(self.texture.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
self.optimizers.append(self.optimizer_T)
def maskErosion(self, mask, erosionFactor):
offsetY = int(erosionFactor * 40)
# throat
mask2 = mask[:,:,0:-offsetY,:]
mask2 = torch.cat([torch.ones_like(mask[:,:,0:offsetY,:]), mask2], 2)
# forehead
offsetY = int(erosionFactor * 8) #<<<<
mask3 = mask[:,:,offsetY:,:]
mask3 = torch.cat([mask3, torch.ones_like(mask[:,:,0:offsetY,:])], 2)
mask = mask * mask2 * mask3
offsetX = int(erosionFactor * 15)
# left
mask4 = mask[:,:,:,0:-offsetX]
mask4 = torch.cat([torch.ones_like(mask[:,:,:,0:offsetX]), mask4], 3)
# right
mask5 = mask[:,:,:,offsetX:]
mask5 = torch.cat([mask5,torch.ones_like(mask[:,:,:,0:offsetX])], 3)
return mask * mask4 * mask5
def set_input(self, input):
self.target = input['TARGET'].to(self.device)
self.input_uv = input['UV'].to(self.device)
self.intrinsics = input['intrinsics']
self.extrinsics = input['extrinsics']
self.expressions = input['expressions'].cuda()
self.image_paths = input['paths']
self.audio_features = input['audio_deepspeech'].cuda()
## in training phase introduce some noise
#if self.isTrain:
# if self.opt.input_noise_augmentation:
# audio_noise = torch.randn_like(self.audio_features)*0.05 # check magnitude of noise
# self.audio_features = self.audio_features + audio_noise
def forward(self, alpha=1.0):
# background
mask = (self.input_uv[:,0:1,:,:] == INVALID_UV) & (self.input_uv[:,1:2,:,:] == INVALID_UV)
mask = self.maskErosion(mask, self.opt.erosionFactor)
mask = torch.cat([mask,mask,mask], 1)
self.background = torch.where(mask, self.target, torch.zeros_like(self.target))
# loop over batch elements
batch_size = self.target.shape[0]
self.features = []
self.intermediate_fake = []
self.fake = []
for b in range(0,batch_size):
feat = self.texture(self.expressions[b:b+1], self.audio_features[b:b+1], self.input_uv[b:b+1])
self.features.append(feat)
intermediate_fake = self.texture_decoder(self.expressions[b:b+1], self.audio_features[b:b+1], feat, self.background[b:b+1])
self.intermediate_fake.append(intermediate_fake)
fake = self.inpainter(self.expressions[b:b+1], self.audio_features[b:b+1], intermediate_fake, self.background[b:b+1])
self.fake.append(fake)
self.features = torch.cat(self.features, dim=0)
self.intermediate_fake = torch.cat(self.intermediate_fake, dim=0)
self.fake = torch.cat(self.fake, dim=0)
self.fake = torch.where(mask, self.background, self.fake)
def backward_D(self):
mask = ( (self.input_uv[:,0:1,:,:] != INVALID_UV) | (self.input_uv[:,1:2,:,:] != INVALID_UV) )
mask = torch.cat([mask,mask,mask], 1)
def masked(img):
return torch.where(mask, img, torch.zeros_like(img))
# Fake
# stop backprop to the generator by detaching fake_B
fake_AB = self.fake_AB_pool.query(torch.cat((self.input_uv, masked(self.fake)), 1))
pred_fake = self.netD(fake_AB.detach())
self.loss_D_fake = self.criterionGAN(pred_fake, False)
# Real
real_AB = torch.cat((self.input_uv, masked(self.target)), 1)
pred_real = self.netD(real_AB)
self.loss_D_real = self.criterionGAN(pred_real, True)
# Combined loss
self.loss_D = (self.loss_D_fake + self.loss_D_real) * 0.5
self.loss_D.backward()
def backward_G(self, epoch):
mask = ( (self.input_uv[:,0:1,:,:] != INVALID_UV) | (self.input_uv[:,1:2,:,:] != INVALID_UV) )
sum_mask = torch.sum(mask)
d = mask.shape[1]
mask_weight = (d*d) / sum_mask
mask = torch.cat([mask,mask,mask], 1)
def masked(img):
return torch.where(mask, img, torch.zeros_like(img))
# First, G(A) should fake the discriminator
fake_AB = torch.cat((self.input_uv, masked(self.fake)), 1)
pred_fake = self.netD(fake_AB)
self.loss_G_GAN = self.criterionGAN(pred_fake, True) * 0.0 # disabled GAN
# Second, G(A) = B
self.loss_G_L1_Rendering = 0.0
self.loss_G_L1_Rendering = 1.0 * self.criterionL1(masked(self.features[:,0:3,:,:]), masked(self.target) ) * mask_weight
self.loss_G_L1_Rendering += 5.0 * self.criterionL1(masked(self.intermediate_fake), masked(self.target) ) * mask_weight
self.loss_G_L1_Rendering += 10.0 * self.criterionL1(self.fake, self.target)
self.loss_G_VGG_Rendering = 0.0
if self.opt.lossType == 'VGG':
self.loss_G_VGG_Rendering += 10.0 * self.vggloss(self.fake, self.target)
self.loss_G_total = self.loss_G_L1_Rendering + self.loss_G_VGG_Rendering + self.loss_G_GAN
self.loss_G_total.backward()
def optimize_parameters(self, epoch_iter):
alpha = (epoch_iter-5) / 50.0
if alpha < 0.0: alpha = 0.0
if alpha > 1.0: alpha = 1.0
self.forward(alpha)
updateDiscriminator = self.loss_G_GAN < 1.0#0.1
# update Discriminator
if updateDiscriminator:
self.set_requires_grad(self.netD, True)
self.optimizer_D.zero_grad()
self.backward_D()
self.optimizer_D.step()
# update Generator
self.set_requires_grad(self.netD, False)
self.optimizer_texture_decoder.zero_grad()
self.optimizer_inpainter.zero_grad()
self.optimizer_T.zero_grad()
self.backward_G(epoch_iter)
self.optimizer_texture_decoder.step()
self.optimizer_inpainter.step()
self.optimizer_T.step()
| 11,642 | 39.996479 | 186 | py |
NeuralVoicePuppetry | NeuralVoicePuppetry-master/Audio2ExpressionNet/Training Code/test.py | import os
from options.test_options import TestOptions
from data import CreateDataLoader
from models import create_model
from util.visualizer import save_images
from util import html
from util import util
from scipy.misc import imresize
import torch
import numpy as np
from PIL import Image
import time
import cv2
def save_tensor_image(input_image, image_path):
if isinstance(input_image, torch.Tensor):
image_tensor = input_image.data
image_numpy = image_tensor[0].cpu().float().numpy()
if image_numpy.shape[0] == 1:
image_numpy = np.tile(image_numpy, (3, 1, 1))
image_numpy = (np.transpose(image_numpy, (1, 2, 0)) + 1) / 2.0 * 255.0
else:
image_numpy = input_image
image_numpy = image_numpy.astype(np.uint8)
image_pil = Image.fromarray(image_numpy)
image_pil.save(image_path)
if __name__ == '__main__':
opt = TestOptions().parse()
# hard-code some parameters for test
opt.num_threads = 1 # test code only supports num_threads = 1
opt.batch_size = 1 # test code only supports batch_size = 1
opt.serial_batches = True # no shuffle
opt.no_augmentation = True # no flip
opt.display_id = -1 # no visdom display
data_loader = CreateDataLoader(opt)
dataset = data_loader.load_data()
dataset_size = len(data_loader)
print('#test images = %d' % dataset_size)
print('#train objects = %d' % opt.nTrainObjects)
print('#test objects = %d' % opt.nTestObjects)
print('>>> create model <<<')
model = create_model(opt)
print('>>> setup model <<<')
model.setup(opt)
#save_tensor_image(model.texture.data[0:1,0:3,:,:], 'load_test1.png')
sum_time = 0
total_runs = dataset_size
warm_up = 50
# create a website
web_dirs = []
webpages = []
file_expressions = []
file_fake_expressions = []
file_rigids = []
file_intrinsics = []
file_identities = []
video_writer = []
print('>>> create a websites and output directories <<<')
for i in range(0, opt.nTestObjects):
#web_dir = os.path.join(opt.results_dir, opt.name, '%s--%s__%s_%s' % (opt.test_sequence_names[i][0], opt.test_sequence_names[i][1], opt.phase, opt.epoch) )
web_dir = os.path.join(opt.results_dir, opt.name, '%s--%s' % (opt.test_sequence_names[i][0], opt.test_sequence_names[i][1]) )
webpage = html.HTML(web_dir, 'Experiment = %s, Phase = %s, Epoch = %s' % (opt.name, opt.phase, opt.epoch))
##
output_file_expressions=None
output_file_fake_expressions=None
output_file_rigids = None
output_file_intrinsics = None
output_file_identities = None
if hasattr(model, 'fake_expressions'):
output_file_expressions=open(os.path.join(web_dir, 'gt_expressions.txt'), 'w')
output_file_fake_expressions=open(os.path.join(web_dir, 'expression.txt'), 'w')
output_file_rigids=open(os.path.join(web_dir, 'rigid.txt'), 'w')
output_file_intrinsics=open(os.path.join(web_dir, 'intrinsics.txt'), 'w')
output_file_identities=open(os.path.join(web_dir, 'identities.txt'), 'w')
##
web_dirs.append(web_dir)
webpages.append(webpage)
file_expressions.append(output_file_expressions)
file_fake_expressions.append(output_file_fake_expressions)
file_rigids.append(output_file_rigids)
file_intrinsics.append(output_file_intrinsics)
file_identities.append(output_file_identities)
if opt.write_video:
writer = cv2.VideoWriter(web_dir+'.mp4', cv2.VideoWriter_fourcc(*"mp4v"), 25.0,(opt.display_winsize,opt.display_winsize))
video_writer.append(writer)
# test with eval mode. This only affects layers like batchnorm and dropout.
# pix2pix: we use batchnorm and dropout in the original pix2pix. You can experiment it with and without eval() mode.
# CycleGAN: It should not affect CycleGAN as CycleGAN uses instancenorm without dropout.
if opt.eval:
model.eval()
for i, data in enumerate(dataset):
#if i >= opt.num_test:
# break
model.set_input(data)
test_sequence_id = data['internal_id'].cpu()
torch.cuda.synchronize()
a = time.perf_counter()
model.test()
torch.cuda.synchronize() # added sync
b = time.perf_counter()
if i > warm_up: # give torch some time to warm up
sum_time += ((b-a) * 1000)
visuals = model.get_current_visuals()
img_path = model.get_image_paths()
if i % 5 == 0:
print('processing (%04d)-th image... %s' % (i, img_path))
#if not hasattr(model, 'fake_expressions'):
if not opt.write_no_images:
save_images(webpages[test_sequence_id], visuals, img_path, aspect_ratio=opt.aspect_ratio, width=opt.display_winsize)
if opt.write_video:
fake = visuals['fake']
im = util.tensor2im(fake)
im = imresize(im, (opt.display_winsize,opt.display_winsize), interp='bicubic')
im = np.concatenate([im[:,:,2:3], im[:,:,1:2], im[:,:,0:1]],axis=2)
#video_writer[test_sequence_id].write(np.random.randint(0, 255, (opt.display_winsize,opt.display_winsize,3)).astype('uint8'))
video_writer[test_sequence_id].write(im.astype('uint8'))
if hasattr(model, 'fake_expressions'):
#print('contains fake expressions')
np.savetxt(file_fake_expressions[test_sequence_id], model.fake_expressions.data.cpu().numpy(), delimiter=' ')
if hasattr(model, 'expressions'):
np.savetxt(file_expressions[test_sequence_id], model.expressions.data.cpu().numpy(), delimiter=' ')
np.savetxt(file_rigids[test_sequence_id], data['extrinsics'][0].data.cpu().numpy(), delimiter=' ')
np.savetxt(file_intrinsics[test_sequence_id], data['intrinsics'].data.cpu().numpy(), delimiter=' ')
np.savetxt(file_identities[test_sequence_id], data['identity'].data.cpu().numpy(), delimiter=' ')
#if i < 50:
# if hasattr(model, 'face_model'):
# image_dir = webpage.get_image_dir()
# name = os.path.splitext(short_path)[0]
#
# filename_mesh = name + '.obj'
# #filename_tex = name + '_audio_texture.png'
# #filename_mat = name + '.mtl'
# #model.face_model.save_model_to_obj_file(image_dir, filename_mesh, filename_mat, filename_tex)
# model.face_model.save_model_to_obj_file(image_dir, filename_mesh)
print('mean eval time (ms): ', (sum_time / (total_runs - warm_up)))
# save the website
for webpage in webpages:
webpage.save()
if opt.write_video:
for writer in video_writer:
writer.release()
| 6,888 | 39.523529 | 163 | py |
NeuralVoicePuppetry | NeuralVoicePuppetry-master/Audio2ExpressionNet/Training Code/train.py | import time
import copy
import torch
from options.train_options import TrainOptions
from data import CreateDataLoader
from models import create_model
from util.visualizer import Visualizer
if __name__ == '__main__':
# training dataset
opt = TrainOptions().parse()
data_loader = CreateDataLoader(opt)
dataset = data_loader.load_data()
dataset_size = len(data_loader)
print('#training images = %d' % dataset_size)
print('#training objects = %d' % opt.nTrainObjects)
## validation dataset
if opt.compute_val:
opt_validation = copy.copy(opt) # create a clone
opt_validation.phase = 'val'
opt_validation.serial_batches = True
opt_validation.isTrain = False
data_loader_validation = CreateDataLoader(opt_validation)
dataset_validation = data_loader_validation.load_data()
dataset_size_validation = len(data_loader_validation)
print('#validation images = %d' % dataset_size_validation)
print('#validation objects = %d' % opt_validation.nValObjects)
# model
model = create_model(opt)
model.setup(opt)
if opt.renderer != 'no_renderer':
print('load renderer')
model.loadModules(opt, opt.renderer, ['netD','netG'])
visualizer = Visualizer(opt)
total_steps = 0
for epoch in range(opt.epoch_count, opt.niter + opt.niter_decay + 1):
epoch_start_time = time.time()
iter_data_time = time.time()
epoch_iter = 0 # iterator within an epoch
for i, data in enumerate(dataset):
iter_start_time = time.time()
if total_steps % opt.print_freq == 0:
t_data = iter_start_time - iter_data_time
visualizer.reset()
total_steps += opt.batch_size
epoch_iter += opt.batch_size
model.set_input(data)
model.optimize_parameters(epoch)
if total_steps % opt.display_freq == 0:
save_result = total_steps % opt.update_html_freq == 0
visualizer.display_current_results(model.get_current_visuals(), epoch, save_result)
if total_steps % opt.print_freq == 0:
losses = model.get_current_losses()
if opt.compute_val:
validation_error = 0
cnt = 0
for i, data in enumerate(dataset_validation):
model.set_input(data)
model.forward()
model.backward_G(epoch) # be carefull with the gradients (are zeroed in the optimization step)
validation_error += model.loss_G.detach().cpu()
cnt += 1.0
validation_error /= cnt
#print('Validation Error:', validation_error)
#visualizer.plot_current_validation_error(epoch, float(epoch_iter) / dataset_size, {'validation_error': validation_error})
losses.update({'validation_error': validation_error})
t = (time.time() - iter_start_time) / opt.batch_size
visualizer.print_current_losses(epoch, epoch_iter, losses, t, t_data)
if opt.display_id > 0:
visualizer.plot_current_losses(epoch, float(epoch_iter) / dataset_size, opt, losses)
if total_steps % opt.save_latest_freq == 0:
print('saving the latest model (epoch %d, total_steps %d)' % (epoch, total_steps))
save_suffix = 'iter_%d' % total_steps if opt.save_by_iter else 'latest'
model.save_networks(save_suffix)
iter_data_time = time.time()
if epoch % opt.save_epoch_freq == 0:
print('saving the model at the end of epoch %d, iters %d' % (epoch, total_steps))
model.save_networks('latest')
model.save_networks(epoch)
print('End of epoch %d / %d \t Time Taken: %d sec' %
(epoch, opt.niter + opt.niter_decay, time.time() - epoch_start_time))
model.update_learning_rate()
# run validation
#if epoch % opt.save_epoch_freq == 0:
# if opt.compute_val:
# validation_error = 0
# cnt = 0
# for i, data in enumerate(dataset_validation):
# model.set_input(data)
# model.forward()
# model.backward_G(epoch)
# validation_error += model.loss_G.detach().cpu()
# cnt += 1.0
#
# validation_error /= cnt
# print('Validation Error:', validation_error)
# visualizer.plot_current_validation_error(epoch, {'validation_error': validation_error})
| 4,758 | 39.675214 | 142 | py |
NeuralVoicePuppetry | NeuralVoicePuppetry-master/Audio2ExpressionNet/Training Code/options/base_options.py | import argparse
import os
from util import util
import torch
import models
import data
class BaseOptions():
def __init__(self):
self.initialized = False
def initialize(self, parser):
parser.add_argument('--dataroot', required=True, help='path to images (should have subfolders trainA, trainB, valA, valB, etc)')
parser.add_argument('--batch_size', type=int, default=1, help='input batch size')
parser.add_argument('--seq_len', type=int, default=1, help='sequence length (if applicable)')
parser.add_argument('--fineSize', type=int, default=512, help='then crop to this size')
parser.add_argument('--display_winsize', type=int, default=256, help='display window size for both visdom and HTML')
parser.add_argument('--input_nc', type=int, default=3, help='# of input image channels')
parser.add_argument('--output_nc', type=int, default=3, help='# of output image channels')
parser.add_argument('--ngf', type=int, default=64, help='# of gen filters in first conv layer')
parser.add_argument('--ndf', type=int, default=64, help='# of discrim filters in first conv layer')
parser.add_argument('--netD', type=str, default='basic', help='selects model to use for netD')
parser.add_argument('--netG', type=str, default='resnet_9blocks', help='selects model to use for netG')
parser.add_argument('--n_layers_D', type=int, default=3, help='only used if netD==n_layers')
parser.add_argument('--gpu_ids', type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU')
parser.add_argument('--name', type=str, default='experiment_name', help='name of the experiment. It decides where to store samples and models')
parser.add_argument('--renderer', type=str, default='no_renderer', help='name of the renderer to load the models from')
parser.add_argument('--fix_renderer', action='store_true', help='renderer is fixed')
parser.add_argument('--dataset_mode', type=str, default='aligned', help='chooses how datasets are loaded. [aligned | multi]')
parser.add_argument('--model', type=str, default='cycle_gan', help='chooses which model to use. cycle_gan, pix2pix, test')
parser.add_argument('--direction', type=str, default='AtoB', help='AtoB or BtoA')
parser.add_argument('--epoch', type=str, default='latest', help='which epoch to load? set to latest to use latest cached model')
parser.add_argument('--load_iter', type=int, default='0', help='which iteration to load? if load_iter > 0, the code will load models by iter_[load_iter]; otherwise, the code will load models by [epoch]')
parser.add_argument('--num_threads', default=4, type=int, help='# threads for loading data')
parser.add_argument('--checkpoints_dir', type=str, default='./checkpoints', help='models are saved here')
parser.add_argument('--norm', type=str, default='instance', help='instance normalization or batch normalization')
parser.add_argument('--serial_batches', action='store_true', help='if true, takes images in order to make batches, otherwise takes them randomly')
parser.add_argument('--no_dropout', action='store_true', help='no dropout for the generator')
parser.add_argument('--max_dataset_size', type=int, default=float("inf"), help='Maximum number of samples allowed per dataset. If the dataset directory contains more than max_dataset_size, only a subset is loaded.')
parser.add_argument('--resize_or_crop', type=str, default='resize_and_crop', help='scaling and cropping of images at load time [resize_and_crop|crop|scale_width|scale_width_and_crop|none]')
parser.add_argument('--no_augmentation', action='store_true', help='if specified, no data augmentation')
#parser.add_argument('--init_type', type=str, default='normal', help='network initialization [normal|xavier|kaiming|orthogonal]')
parser.add_argument('--init_type', type=str, default='xavier', help='network initialization [normal|xavier|kaiming|orthogonal]')
parser.add_argument('--init_gain', type=float, default=0.02, help='scaling factor for normal, xavier and orthogonal.')
parser.add_argument('--verbose', action='store_true', help='if specified, print more debugging information')
parser.add_argument('--suffix', default='', type=str, help='customized suffix: opt.name = opt.name + suffix: e.g., {model}_{netG}_size{loadSize}')
parser.add_argument('--tex_dim', type=int, default=256, help='neural texture dimensions')
parser.add_argument('--tex_features_intermediate', type=int, default=16, help='# intermediate neural texture features when using dynamic textures')
parser.add_argument('--tex_features', type=int, default=16, help='# neural texture features')
parser.add_argument('--textureModel', type=str, default='DynamicNeuralTextureAudio', help='texture model')
parser.add_argument('--rendererType', type=str, default='UNET_5_level', help='neural renderer network')
parser.add_argument('--lossType', type=str, default='L1', help='loss type for the final output')
parser.add_argument('--hierarchicalTex', action='store_true', help='if specified, hierachical neural textures are used')
parser.add_argument('--output_audio_expressions', action='store_true', help='if specified, no sh layers are used')
parser.add_argument('--erosionFactor', type=float, default=1.0, help='scaling factor for erosion of the background.')
parser.add_argument('--audio_window_size', type=float, default=16, help='audio window size = #mel feature bins')
parser.add_argument('--look_ahead', action='store_true', help='cache images in numpy format')
parser.add_argument('--cached_images', action='store_true', help='cache images in numpy format')
self.initialized = True
return parser
def gather_options(self):
# initialize parser with basic options
if not self.initialized:
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser = self.initialize(parser)
# get the basic options
opt, _ = parser.parse_known_args()
# modify model-related parser options
model_name = opt.model
model_option_setter = models.get_option_setter(model_name)
parser = model_option_setter(parser, self.isTrain)
opt, _ = parser.parse_known_args() # parse again with the new defaults
# modify dataset-related parser options
dataset_name = opt.dataset_mode
dataset_option_setter = data.get_option_setter(dataset_name)
parser = dataset_option_setter(parser, self.isTrain)
self.parser = parser
return parser.parse_args()
def print_options(self, opt):
message = ''
message += '----------------- Options ---------------\n'
for k, v in sorted(vars(opt).items()):
comment = ''
default = self.parser.get_default(k)
if v != default:
comment = '\t[default: %s]' % str(default)
message += '{:>25}: {:<30}{}\n'.format(str(k), str(v), comment)
message += '----------------- End -------------------'
print(message)
# save to the disk
expr_dir = os.path.join(opt.checkpoints_dir, opt.name)
util.mkdirs(expr_dir)
file_name = os.path.join(expr_dir, 'opt.txt')
with open(file_name, 'wt') as opt_file:
opt_file.write(message)
opt_file.write('\n')
def parse(self):
opt = self.gather_options()
opt.isTrain = self.isTrain # train or test
# process opt.suffix
if opt.suffix:
suffix = ('_' + opt.suffix.format(**vars(opt))) if opt.suffix != '' else ''
opt.name = opt.name + suffix
self.print_options(opt)
# set gpu ids
str_ids = opt.gpu_ids.split(',')
opt.gpu_ids = []
for str_id in str_ids:
id = int(str_id)
if id >= 0:
opt.gpu_ids.append(id)
if len(opt.gpu_ids) > 0:
torch.cuda.set_device(opt.gpu_ids[0])
self.opt = opt
return self.opt
| 8,300 | 58.719424 | 223 | py |
NeuralVoicePuppetry | NeuralVoicePuppetry-master/Audio2ExpressionNet/Training Code/models/base_model.py | import os
import torch
import torch.nn as nn
from collections import OrderedDict
from . import networks
import numpy as np
from PIL import Image
def save_tensor_image(input_image, image_path):
if isinstance(input_image, torch.Tensor):
image_tensor = input_image.data
image_numpy = image_tensor[0].cpu().float().numpy()
if image_numpy.shape[0] == 1:
image_numpy = np.tile(image_numpy, (3, 1, 1))
image_numpy = (np.transpose(image_numpy, (1, 2, 0)) + 1) / 2.0 * 255.0
else:
image_numpy = input_image
image_numpy = image_numpy.astype(np.uint8)
image_pil = Image.fromarray(image_numpy)
image_pil.save(image_path)
class BaseModel():
# modify parser to add command line options,
# and also change the default values if needed
@staticmethod
def modify_commandline_options(parser, is_train):
return parser
def name(self):
return 'BaseModel'
def initialize(self, opt):
self.opt = opt
self.gpu_ids = opt.gpu_ids
self.isTrain = opt.isTrain
self.device = torch.device('cuda:{}'.format(self.gpu_ids[0])) if self.gpu_ids else torch.device('cpu')
self.load_dir = os.path.join(opt.checkpoints_dir, opt.name)
self.save_dir = os.path.join(opt.checkpoints_dir, opt.name)
if opt.resize_or_crop != 'scale_width':
torch.backends.cudnn.benchmark = True
self.loss_names = []
self.model_names = []
self.visual_names = []
self.image_paths = []
def set_input(self, input):
pass
def forward(self):
pass
# load and print networks; create schedulers
def setup(self, opt, parser=None):
if self.isTrain:
self.schedulers = [networks.get_scheduler(optimizer, opt) for optimizer in self.optimizers]
if not self.isTrain or opt.continue_train:
load_suffix = 'iter_%d' % opt.load_iter if opt.load_iter > 0 else opt.epoch
self.load_networks(load_suffix)
self.print_networks(opt.verbose)
# load specific moudles
def loadModules(self, opt, model_name, module_names):
for name in module_names:
if isinstance(name, str):
load_dir = os.path.join(opt.checkpoints_dir, model_name)
load_filename = 'latest_%s.pth' % (name)
load_path = os.path.join(load_dir, load_filename)
net = getattr(self, name)
if isinstance(net, torch.Tensor):
print('loading the tensor from %s' % load_path)
net_loaded = torch.load(load_path, map_location=str(self.device))
net.copy_(net_loaded)
else:
# if isinstance(net, torch.nn.DataParallel):
# net = net.module
print('loading the module from %s' % load_path)
# if you are using PyTorch newer than 0.4 (e.g., built from
# GitHub source), you can remove str() on self.device
state_dict = torch.load(load_path, map_location=str(self.device))
if hasattr(state_dict, '_metadata'):
del state_dict._metadata
# patch InstanceNorm checkpoints prior to 0.4
for key in list(state_dict.keys()): # need to copy keys here because we mutate in loop
self.__patch_instance_norm_state_dict(state_dict, net, key.split('.'))
net.load_state_dict(state_dict)
# make models eval mode during test time
def eval(self):
for name in self.model_names:
if isinstance(name, str):
net = getattr(self, name)
net.eval()
# used in test time, wrapping `forward` in no_grad() so we don't save
# intermediate steps for backprop
def test(self):
with torch.no_grad():
self.forward()
# get image paths
def get_image_paths(self):
return self.image_paths
def optimize_parameters(self):
pass
# update learning rate (called once every epoch)
def update_learning_rate(self):
for scheduler in self.schedulers:
scheduler.step()
lr = self.optimizers[0].param_groups[0]['lr']
print('learning rate = %.7f' % lr)
# return visualization images. train.py will display these images, and save the images to a html
def get_current_visuals(self):
visual_ret = OrderedDict()
for name in self.visual_names:
if isinstance(name, str):
visual_ret[name] = getattr(self, name)
return visual_ret
# return traning losses/errors. train.py will print out these errors as debugging information
def get_current_losses(self):
errors_ret = OrderedDict()
for name in self.loss_names:
if isinstance(name, str):
# float(...) works for both scalar tensor and float number
errors_ret[name] = float(getattr(self, 'loss_' + name))
return errors_ret
# save models to the disk
def save_networks(self, epoch):
for name in self.model_names:
if isinstance(name, str):
save_filename = '%s_%s.pth' % (epoch, name)
save_path = os.path.join(self.save_dir, save_filename)
net = getattr(self, name)
if isinstance(net, torch.Tensor):
#torch.save(net.state_dict(), save_path)
torch.save(net, save_path)
for i in range(0, list(net.size())[0]):
save_tensor_image(net[i:i+1,0:3,:,:], save_path+str(i)+'.png')
else:
if len(self.gpu_ids) > 0 and torch.cuda.is_available():
#torch.save(net.module.cpu().state_dict(), save_path) # << original
torch.save(net.cpu().state_dict(), save_path)
net.cuda(self.gpu_ids[0])
else:
torch.save(net.cpu().state_dict(), save_path)
def __patch_instance_norm_state_dict(self, state_dict, module, keys, i=0):
key = keys[i]
if i + 1 == len(keys): # at the end, pointing to a parameter/buffer
if module.__class__.__name__.startswith('InstanceNorm') and \
(key == 'running_mean' or key == 'running_var'):
if getattr(module, key) is None:
state_dict.pop('.'.join(keys))
if module.__class__.__name__.startswith('InstanceNorm') and \
(key == 'num_batches_tracked'):
state_dict.pop('.'.join(keys))
else:
self.__patch_instance_norm_state_dict(state_dict, getattr(module, key), keys, i + 1)
# load models from the disk
def load_networks(self, epoch):
for name in self.model_names:
if isinstance(name, str):
load_filename = '%s_%s.pth' % (epoch, name)
load_path = os.path.join(self.load_dir, load_filename)
net = getattr(self, name)
if isinstance(net, torch.Tensor):
print('loading the tensor from %s' % load_path)
net_loaded = torch.load(load_path, map_location=str(self.device))
net.copy_(net_loaded)
else:
# if isinstance(net, torch.nn.DataParallel):
# net = net.module
print('loading the module from %s' % load_path)
# if you are using PyTorch newer than 0.4 (e.g., built from
# GitHub source), you can remove str() on self.device
state_dict = torch.load(load_path, map_location=str(self.device))
if hasattr(state_dict, '_metadata'):
del state_dict._metadata
# patch InstanceNorm checkpoints prior to 0.4
for key in list(state_dict.keys()): # need to copy keys here because we mutate in loop
self.__patch_instance_norm_state_dict(state_dict, net, key.split('.'))
net.load_state_dict(state_dict)
# print network information
def print_networks(self, verbose):
print('---------- Networks initialized -------------')
for name in self.model_names:
if isinstance(name, str):
net = getattr(self, name)
if isinstance(net, torch.Tensor):
num_params = net.numel()
print('[Tensor %s] Total number of parameters : %.3f M' % (name, num_params / 1e6))
else:
num_params = 0
for param in net.parameters():
num_params += param.numel()
if verbose:
print(net)
print('[Network %s] Total number of parameters : %.3f M' % (name, num_params / 1e6))
print('-----------------------------------------------')
# set requies_grad=False to avoid computation
def set_requires_grad(self, nets, requires_grad=False):
if not isinstance(nets, list):
nets = [nets]
for net in nets:
if net is not None:
for param in net.parameters():
param.requires_grad = requires_grad
| 9,458 | 41.227679 | 110 | py |
NeuralVoicePuppetry | NeuralVoicePuppetry-master/Audio2ExpressionNet/Training Code/models/networks.py | import torch
import torch.nn as nn
from torch.nn import init
import functools
from torch.optim import lr_scheduler
###############################################################################
# Helper Functions
###############################################################################
def get_norm_layer(norm_type='instance'):
if norm_type == 'batch':
norm_layer = functools.partial(nn.BatchNorm2d, affine=True)
elif norm_type == 'instance':
norm_layer = functools.partial(nn.InstanceNorm2d, affine=False, track_running_stats=False)
elif norm_type == 'none':
norm_layer = None
else:
raise NotImplementedError('normalization layer [%s] is not found' % norm_type)
return norm_layer
def get_scheduler(optimizer, opt):
if opt.lr_policy == 'lambda':
def lambda_rule(epoch):
lr_l = 1.0 - max(0, epoch + opt.epoch_count - opt.niter) / float(opt.niter_decay + 1)
return lr_l
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule)
elif opt.lr_policy == 'step':
scheduler = lr_scheduler.StepLR(optimizer, step_size=opt.lr_decay_iters, gamma=0.1)
elif opt.lr_policy == 'plateau':
scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.2, threshold=0.01, patience=5)
elif opt.lr_policy == 'cosine':
scheduler = lr_scheduler.CosineAnnealingLR(optimizer, T_max=opt.niter, eta_min=0)
else:
return NotImplementedError('learning rate policy [%s] is not implemented', opt.lr_policy)
return scheduler
def init_weights(net, init_type='normal', gain=0.02):
def init_func(m):
classname = m.__class__.__name__
if hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1):
if init_type == 'normal':
init.normal_(m.weight.data, 0.0, gain)
elif init_type == 'xavier':
init.xavier_normal_(m.weight.data, gain=gain)
elif init_type == 'kaiming':
init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
elif init_type == 'orthogonal':
init.orthogonal_(m.weight.data, gain=gain)
else:
raise NotImplementedError('initialization method [%s] is not implemented' % init_type)
if hasattr(m, 'bias') and m.bias is not None:
init.constant_(m.bias.data, 0.0)
elif classname.find('BatchNorm2d') != -1:
init.normal_(m.weight.data, 1.0, gain)
init.constant_(m.bias.data, 0.0)
print('initialize network with %s' % init_type)
net.apply(init_func)
def init_net(net, init_type='normal', init_gain=0.02, gpu_ids=[]):
if len(gpu_ids) > 0:
assert(torch.cuda.is_available())
net.to(gpu_ids[0])
#net = torch.nn.DataParallel(net, gpu_ids)
init_weights(net, init_type, gain=init_gain)
return net
def define_G(input_nc, output_nc, ngf, netG, norm='batch', use_dropout=False, init_type='normal', init_gain=0.02, gpu_ids=[]):
net = None
norm_layer = get_norm_layer(norm_type=norm)
if netG == 'resnet_9blocks':
net = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=9)
elif netG == 'resnet_6blocks':
net = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=6)
elif netG == 'unet_128':
net = UnetGenerator(input_nc, output_nc, 7, ngf, norm_layer=norm_layer, use_dropout=use_dropout)
elif netG == 'unet_256':
net = UnetGenerator(input_nc, output_nc, 8, ngf, norm_layer=norm_layer, use_dropout=use_dropout)
else:
raise NotImplementedError('Generator model name [%s] is not recognized' % netG)
return init_net(net, init_type, init_gain, gpu_ids)
def define_D(input_nc, ndf, netD,
n_layers_D=3, norm='batch', use_sigmoid=False, init_type='normal', init_gain=0.02, gpu_ids=[]):
net = None
norm_layer = get_norm_layer(norm_type=norm)
if netD == 'basic':
net = NLayerDiscriminator(input_nc, ndf, n_layers=3, norm_layer=norm_layer, use_sigmoid=use_sigmoid)
elif netD == 'n_layers':
net = NLayerDiscriminator(input_nc, ndf, n_layers_D, norm_layer=norm_layer, use_sigmoid=use_sigmoid)
elif netD == 'pixel':
net = PixelDiscriminator(input_nc, ndf, norm_layer=norm_layer, use_sigmoid=use_sigmoid)
else:
raise NotImplementedError('Discriminator model name [%s] is not recognized' % net)
return init_net(net, init_type, init_gain, gpu_ids)
##############################################################################
# Classes
##############################################################################
# Defines the GAN loss which uses either LSGAN or the regular GAN.
# When LSGAN is used, it is basically same as MSELoss,
# but it abstracts away the need to create the target label tensor
# that has the same size as the input
class GANLoss(nn.Module):
def __init__(self, use_lsgan=True, target_real_label=1.0, target_fake_label=0.0):
super(GANLoss, self).__init__()
self.register_buffer('real_label', torch.tensor(target_real_label))
self.register_buffer('fake_label', torch.tensor(target_fake_label))
if use_lsgan:
self.loss = nn.MSELoss()
else:
self.loss = nn.BCELoss()
def get_target_tensor(self, input, target_is_real):
if target_is_real:
target_tensor = self.real_label
else:
target_tensor = self.fake_label
return target_tensor.expand_as(input)
def __call__(self, input, target_is_real):
target_tensor = self.get_target_tensor(input, target_is_real)
return self.loss(input, target_tensor)
# Defines the generator that consists of Resnet blocks between a few
# downsampling/upsampling operations.
# Code and idea originally from Justin Johnson's architecture.
# https://github.com/jcjohnson/fast-neural-style/
class ResnetGenerator(nn.Module):
def __init__(self, input_nc, output_nc, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False, n_blocks=6, padding_type='reflect'):
assert(n_blocks >= 0)
super(ResnetGenerator, self).__init__()
self.input_nc = input_nc
self.output_nc = output_nc
self.ngf = ngf
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
model = [nn.ReflectionPad2d(3),
nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0,
bias=use_bias),
norm_layer(ngf),
nn.ReLU(True)]
n_downsampling = 2
for i in range(n_downsampling):
mult = 2**i
model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3,
stride=2, padding=1, bias=use_bias),
norm_layer(ngf * mult * 2),
nn.ReLU(True)]
mult = 2**n_downsampling
for i in range(n_blocks):
model += [ResnetBlock(ngf * mult, padding_type=padding_type, norm_layer=norm_layer, use_dropout=use_dropout, use_bias=use_bias)]
for i in range(n_downsampling):
mult = 2**(n_downsampling - i)
model += [nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2),
kernel_size=3, stride=2,
padding=1, output_padding=1,
bias=use_bias),
norm_layer(int(ngf * mult / 2)),
nn.ReLU(True)]
model += [nn.ReflectionPad2d(3)]
model += [nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0)]
model += [nn.Tanh()]
self.model = nn.Sequential(*model)
def forward(self, input):
return self.model(input)
# Define a resnet block
class ResnetBlock(nn.Module):
def __init__(self, dim, padding_type, norm_layer, use_dropout, use_bias):
super(ResnetBlock, self).__init__()
self.conv_block = self.build_conv_block(dim, padding_type, norm_layer, use_dropout, use_bias)
def build_conv_block(self, dim, padding_type, norm_layer, use_dropout, use_bias):
conv_block = []
p = 0
if padding_type == 'reflect':
conv_block += [nn.ReflectionPad2d(1)]
elif padding_type == 'replicate':
conv_block += [nn.ReplicationPad2d(1)]
elif padding_type == 'zero':
p = 1
else:
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias),
norm_layer(dim),
nn.ReLU(True)]
if use_dropout:
conv_block += [nn.Dropout(0.5)]
p = 0
if padding_type == 'reflect':
conv_block += [nn.ReflectionPad2d(1)]
elif padding_type == 'replicate':
conv_block += [nn.ReplicationPad2d(1)]
elif padding_type == 'zero':
p = 1
else:
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias),
norm_layer(dim)]
return nn.Sequential(*conv_block)
def forward(self, x):
out = x + self.conv_block(x)
return out
# Defines the Unet generator.
# |num_downs|: number of downsamplings in UNet. For example,
# if |num_downs| == 7, image of size 128x128 will become of size 1x1
# at the bottleneck
class UnetGenerator(nn.Module):
def __init__(self, input_nc, output_nc, num_downs, ngf=64,
norm_layer=nn.BatchNorm2d, use_dropout=False):
super(UnetGenerator, self).__init__()
# construct unet structure
unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=None, norm_layer=norm_layer, innermost=True)
for i in range(num_downs - 5):
unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer, use_dropout=use_dropout)
unet_block = UnetSkipConnectionBlock(ngf * 4, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer)
unet_block = UnetSkipConnectionBlock(ngf * 2, ngf * 4, input_nc=None, submodule=unet_block, norm_layer=norm_layer)
unet_block = UnetSkipConnectionBlock(ngf, ngf * 2, input_nc=None, submodule=unet_block, norm_layer=norm_layer)
unet_block = UnetSkipConnectionBlock(output_nc, ngf, input_nc=input_nc, submodule=unet_block, outermost=True, norm_layer=norm_layer)
self.model = unet_block
def forward(self, input):
return self.model(input)
# Defines the submodule with skip connection.
# X -------------------identity---------------------- X
# |-- downsampling -- |submodule| -- upsampling --|
class UnetSkipConnectionBlock(nn.Module):
def __init__(self, outer_nc, inner_nc, input_nc=None,
submodule=None, outermost=False, innermost=False, norm_layer=nn.BatchNorm2d, use_dropout=False):
super(UnetSkipConnectionBlock, self).__init__()
self.outermost = outermost
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
if input_nc is None:
input_nc = outer_nc
downconv = nn.Conv2d(input_nc, inner_nc, kernel_size=4,
stride=2, padding=1, bias=use_bias)
downrelu = nn.LeakyReLU(0.2, True)
downnorm = norm_layer(inner_nc)
uprelu = nn.ReLU(True)
upnorm = norm_layer(outer_nc)
if outermost:
upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc,
kernel_size=4, stride=2,
padding=1)
down = [downconv]
up = [uprelu, upconv, nn.Tanh()]
model = down + [submodule] + up
elif innermost:
upconv = nn.ConvTranspose2d(inner_nc, outer_nc,
kernel_size=4, stride=2,
padding=1, bias=use_bias)
down = [downrelu, downconv]
up = [uprelu, upconv, upnorm]
model = down + up
else:
upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc,
kernel_size=4, stride=2,
padding=1, bias=use_bias)
down = [downrelu, downconv, downnorm]
up = [uprelu, upconv, upnorm]
if use_dropout:
model = down + [submodule] + up + [nn.Dropout(0.5)]
else:
model = down + [submodule] + up
self.model = nn.Sequential(*model)
def forward(self, x):
if self.outermost:
return self.model(x)
else:
return torch.cat([x, self.model(x)], 1)
# Defines the PatchGAN discriminator with the specified arguments.
class NLayerDiscriminator(nn.Module):
def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d, use_sigmoid=False):
super(NLayerDiscriminator, self).__init__()
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
kw = 4
padw = 1
sequence = [
nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw),
nn.LeakyReLU(0.2, True)
]
nf_mult = 1
nf_mult_prev = 1
for n in range(1, n_layers):
nf_mult_prev = nf_mult
nf_mult = min(2**n, 8)
sequence += [
nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult,
kernel_size=kw, stride=2, padding=padw, bias=use_bias),
norm_layer(ndf * nf_mult),
nn.LeakyReLU(0.2, True)
]
nf_mult_prev = nf_mult
nf_mult = min(2**n_layers, 8)
sequence += [
nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult,
kernel_size=kw, stride=1, padding=padw, bias=use_bias),
norm_layer(ndf * nf_mult),
nn.LeakyReLU(0.2, True)
]
sequence += [nn.Conv2d(ndf * nf_mult, 1, kernel_size=kw, stride=1, padding=padw)]
if use_sigmoid:
sequence += [nn.Sigmoid()]
self.model = nn.Sequential(*sequence)
def forward(self, input):
return self.model(input)
class PixelDiscriminator(nn.Module):
def __init__(self, input_nc, ndf=64, norm_layer=nn.BatchNorm2d, use_sigmoid=False):
super(PixelDiscriminator, self).__init__()
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
self.net = [
nn.Conv2d(input_nc, ndf, kernel_size=1, stride=1, padding=0),
nn.LeakyReLU(0.2, True),
nn.Conv2d(ndf, ndf * 2, kernel_size=1, stride=1, padding=0, bias=use_bias),
norm_layer(ndf * 2),
nn.LeakyReLU(0.2, True),
nn.Conv2d(ndf * 2, 1, kernel_size=1, stride=1, padding=0, bias=use_bias)]
if use_sigmoid:
self.net.append(nn.Sigmoid())
self.net = nn.Sequential(*self.net)
def forward(self, input):
return self.net(input)
| 15,748 | 40.013021 | 151 | py |
NeuralVoicePuppetry | NeuralVoicePuppetry-master/Audio2ExpressionNet/Training Code/models/audio2ExpressionsAttentionTMP4_model.py | import os
import torch
import torch.nn as nn
import torchvision.transforms as transforms
from util.image_pool import ImagePool
from .base_model import BaseModel
from . import networks
import numpy as np
import functools
from BaselModel.basel_model import *
################
### HELPER ###
################
INVALID_UV = -1.0
from torchvision import models
from collections import namedtuple
class ExpressionEstimator_Attention(nn.Module):
def __init__(self, n_output_expressions, nIdentities, seq_len, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False):
super(ExpressionEstimator_Attention, self).__init__()
print('Estimator Attention')
#################################
######## audio net ##########
#################################
self.seq_len = seq_len
dropout_rate = 0.0
if use_dropout == True:
#dropout_rate = 0.5
dropout_rate = 0.25
self.convNet = nn.Sequential(
nn.Conv2d(29, 32, kernel_size=(3,1), stride=(2,1), padding=(1,0), bias=True), # 29 x 16 x 1 => 32 x 8 x 1
nn.LeakyReLU(0.02, True),
nn.Conv2d(32, 32, kernel_size=(3,1), stride=(2,1), padding=(1,0), bias=True), # 32 x 8 x 1 => 32 x 4 x 1
nn.LeakyReLU(0.02, True),
nn.Conv2d(32, 64, kernel_size=(3,1), stride=(2,1), padding=(1,0), bias=True), # 32 x 4 x 1 => 64 x 2 x 1
nn.LeakyReLU(0.2, True),
nn.Conv2d(64, 64, kernel_size=(3,1), stride=(2,1), padding=(1,0), bias=True), # 64 x 2 x 1 => 64 x 1 x 1
nn.LeakyReLU(0.2, True),
)
fullNet_input_size = 64
self.subspace_dim = 32 # number of audio expressions
print('fullNet_input_size: ', fullNet_input_size)
self.fullNet = nn.Sequential(
nn.Linear(in_features = fullNet_input_size, out_features=128, bias = True),
nn.LeakyReLU(0.02),
nn.Linear(in_features = 128, out_features=64, bias = True),
nn.LeakyReLU(0.02),
nn.Linear(in_features = 64, out_features=self.subspace_dim, bias = True),
nn.Tanh()
)
# mapping from subspace to full expression space
self.register_parameter('mapping', torch.nn.Parameter(torch.randn(1, nIdentities, N_EXPRESSIONS, self.subspace_dim, requires_grad=True)))
# attention
self.attentionConvNet = nn.Sequential( # b x subspace_dim x seq_len
nn.Conv1d(self.subspace_dim, 16, kernel_size=3, stride=1, padding=1, bias=True),
nn.LeakyReLU(0.02, True),
nn.Conv1d(16, 8, kernel_size=3, stride=1, padding=1, bias=True),
nn.LeakyReLU(0.02, True),
nn.Conv1d(8, 4, kernel_size=3, stride=1, padding=1, bias=True),
nn.LeakyReLU(0.02, True),
nn.Conv1d(4, 2, kernel_size=3, stride=1, padding=1, bias=True),
nn.LeakyReLU(0.02, True),
nn.Conv1d(2, 1, kernel_size=3, stride=1, padding=1, bias=True),
nn.LeakyReLU(0.02, True)
)
self.attentionNet = nn.Sequential(
nn.Linear(in_features = self.seq_len, out_features=self.seq_len, bias = True),
nn.Softmax(dim=1)
)
#self.hidden2subspace = nn.Linear(self.subspace_dim,self.subspace_dim)
def forward_internal(self, audio_features_sequence, identity_id):
result_subspace, intermediate_expression = self.getAudioExpressions_internal(audio_features_sequence)
mapping = torch.index_select(self.mapping[0], dim = 0, index = identity_id)
result = 10.0 * torch.bmm(mapping, result_subspace)[:,:,0]
result_intermediate = 10.0 * torch.bmm(mapping, intermediate_expression)[:,:,0]
return result, result_intermediate
def forward(self, audio_features_sequence, identity_id):
result_subspace = self.getAudioExpressions(audio_features_sequence)
mapping = torch.index_select(self.mapping[0], dim = 0, index = identity_id)
result = torch.bmm(mapping, result_subspace)[:,:,0]
return 10.0 * result
def getAudioExpressions_internal(self, audio_features_sequence):
# audio_features_sequence: b x seq_len x 16 x 29
b = audio_features_sequence.shape[0] # batchsize
audio_features_sequence = audio_features_sequence.view(b * self.seq_len, 1, 16, 29) # b * seq_len x 1 x 16 x 29
audio_features_sequence = torch.transpose(audio_features_sequence, 1, 3) # b* seq_len x 29 x 16 x 1
conv_res = self.convNet( audio_features_sequence )
conv_res = torch.reshape( conv_res, (b * self.seq_len, 1, -1))
result_subspace = self.fullNet(conv_res)[:,0,:] # b * seq_len x subspace_dim
result_subspace = result_subspace.view(b, self.seq_len, self.subspace_dim)# b x seq_len x subspace_dim
#################
### attention ###
#################
result_subspace_T = torch.transpose(result_subspace, 1, 2) # b x subspace_dim x seq_len
intermediate_expression = result_subspace_T[:,:,(self.seq_len // 2):(self.seq_len // 2) + 1]
att_conv_res = self.attentionConvNet(result_subspace_T)
#print('att_conv_res', att_conv_res.shape)
attention = self.attentionNet(att_conv_res.view(b, self.seq_len)).view(b, self.seq_len, 1) # b x seq_len x 1
#print('attention', attention.shape)
# pooling along the sequence dimension
result_subspace = torch.bmm(result_subspace_T, attention)
#print('result_subspace', result_subspace.shape)
###
return result_subspace.view(b, self.subspace_dim, 1), intermediate_expression
def getAudioExpressions(self, audio_features_sequence):
expr, _ = self.getAudioExpressions_internal(audio_features_sequence)
return expr
def regularizer(self):
#reg = torch.norm(self.mapping)
reg_mapping = torch.mean(torch.abs(self.mapping))
# one could also enforce orthogonality here
# s_browExpressions[] = { 32, 41, 71, 72, 73, 74, 75 };
reg_eye_brow = torch.mean(torch.abs( self.mapping[0,:,[32, 41, 71, 72, 73, 74, 75],:] ))
#return 0.01 * reg_mapping + 1.0 * reg_eye_brow
return 0.0 * reg_mapping
def define_ExpressionEstimator(estimatorType='estimatorDefault', nIdentities=1, seq_len=1, norm='batch', use_dropout=False, init_type='normal', init_gain=0.02, gpu_ids=[]):
net = None
print('EstimatorType: ', estimatorType)
if estimatorType=='estimatorAttention': net = ExpressionEstimator_Attention(N_EXPRESSIONS,nIdentities, seq_len)
return networks.init_net(net, init_type, init_gain, gpu_ids)
class Audio2ExpressionsAttentionTMP4Model(BaseModel):
def name(self):
return 'Audio2ExpressionsAttentionTMP4Model'
@staticmethod
def modify_commandline_options(parser, is_train=True):
# changing the default values to match the pix2pix paper
# (https://phillipi.github.io/pix2pix/)
#parser.set_defaults(norm='batch', netG='unet_256')
parser.set_defaults(norm='instance', netG='unet_256')
parser.set_defaults(dataset_mode='aligned')
if is_train:
parser.set_defaults(pool_size=0, no_lsgan=True)
parser.add_argument('--lambda_L1', type=float, default=100.0, help='weight for L1 loss')
return parser
def initialize(self, opt):
BaseModel.initialize(self, opt)
self.isTrain = opt.isTrain
self.trainRenderer = not opt.fix_renderer
# specify the training losses you want to print out. The program will call base_model.get_current_losses
self.loss_names = ['G_L1','G_L1_ABSOLUTE','G_L1_RELATIVE', 'G_Regularizer']
# specify the images you want to save/display. The program will call base_model.get_current_visuals
#self.visual_names = ['input_uv', 'fake', 'target']
self.visual_names = ['zeros']
self.zeros = torch.zeros(1,3,2,2)
# specify the models you want to save to the disk. The program will call base_model.save_networks and base_model.load_networks
if self.isTrain:
self.model_names = ['netG']
else: # during test time, only load Gs
self.model_names = ['netG']
self.fake_expressions = None
self.fake_expressions_prv = None
self.fake_expressions_nxt = None
self.morphable_model = MorphableModel()
self.mask = self.morphable_model.LoadMask()
nIdentities=opt.nTrainObjects
# load/define networks
self.netG = define_ExpressionEstimator(estimatorType=opt.rendererType, nIdentities=nIdentities, seq_len=opt.seq_len, gpu_ids=self.gpu_ids)
if self.isTrain:
use_sigmoid = opt.no_lsgan
self.fake_AB_pool = ImagePool(opt.pool_size)
# define loss functions
self.criterionL1 = torch.nn.L1Loss()
self.criterionL1Smooth = torch.nn.SmoothL1Loss()
self.criterionL2 = torch.nn.MSELoss()
# initialize optimizers
self.optimizers = []
self.optimizer_G = torch.optim.Adam(self.netG.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999), weight_decay=0.0 )
self.optimizers.append(self.optimizer_G)
def set_input(self, input):
self.image_paths = input['paths']
self.expressions = input['expressions'].cuda()
self.audio_features = input['audio_deepspeech'].cuda() # b x seq_len x 16 x 29
if self.isTrain:
self.expressions_prv = input['expressions_prv'].cuda()
self.audio_features_prv = input['audio_deepspeech_prv'].cuda() # b x seq_len x 16 x 29
self.expressions_nxt = input['expressions_nxt'].cuda()
self.audio_features_nxt = input['audio_deepspeech_nxt'].cuda() # b x seq_len x 16 x 29
self.target_id = input['target_id'].cuda()
def forward(self):
# estimate expressions
if self.opt.output_audio_expressions: #self.opt.dataset_mode=='audio':
self.fake_expressions = self.netG.getAudioExpressions(self.audio_features)
if self.isTrain:
self.fake_expressions_prv = self.netG.getAudioExpressions(self.audio_features_prv)
self.fake_expressions_nxt = self.netG.getAudioExpressions(self.audio_features_nxt)
else:
self.fake_expressions, self.fake_expressions_intermediate = self.netG.forward_internal(self.audio_features, self.target_id)
if self.isTrain:
self.fake_expressions_prv = self.netG(self.audio_features_prv, self.target_id)
self.fake_expressions_nxt = self.netG(self.audio_features_nxt, self.target_id)
def backward_G(self, epoch):
# Second, G(A) = B
#self.loss_G_L1 = self.criterionL1(self.fake_expressions, self.expressions)
# difference in vertex space
mask = torch.cat([self.mask[:,None],self.mask[:,None],self.mask[:,None]], 1)
mask = mask + 0.1 * torch.ones_like(mask) # priority for the mask region, but other region should also be constrained
# absolute (single timesteps)
diff_expression = self.fake_expressions - self.expressions
diff_vertices = self.morphable_model.compute_expression_delta(diff_expression)
diff_expression_intermediate = self.fake_expressions_intermediate - self.expressions
diff_vertices_intermediate = self.morphable_model.compute_expression_delta(diff_expression_intermediate)
diff_expression_prv = self.fake_expressions_prv - self.expressions_prv
diff_vertices_prv = self.morphable_model.compute_expression_delta(diff_expression_prv)
diff_expression_nxt = self.fake_expressions_nxt - self.expressions_nxt
diff_vertices_nxt = self.morphable_model.compute_expression_delta(diff_expression_nxt)
# relative (temporal 1 timestep) cur - nxt and prv - cur
diff_expression_tmp_cur_nxt = (self.fake_expressions - self.fake_expressions_nxt) - (self.expressions - self.expressions_nxt)
diff_vertices_tmp_cur_nxt = self.morphable_model.compute_expression_delta(diff_expression_tmp_cur_nxt)
diff_expression_tmp_prv_cur = (self.fake_expressions_prv - self.fake_expressions) - (self.expressions_prv - self.expressions)
diff_vertices_tmp_prv_cur = self.morphable_model.compute_expression_delta(diff_expression_tmp_prv_cur)
# relative (temporal 2 timesteps) nxt - prv
diff_expression_tmp_nxt_prv = (self.fake_expressions_nxt - self.fake_expressions_prv) - (self.expressions_nxt - self.expressions_prv)
diff_vertices_tmp_nxt_prv = self.morphable_model.compute_expression_delta(diff_expression_tmp_nxt_prv)
#print('mask: ', mask.shape)
#print('diff_vertices: ', diff_vertices.shape)
self.loss_G_L1_ABSOLUTE = 0.0
self.loss_G_L1_RELATIVE = 0.0
if self.opt.lossType == 'L1':
self.loss_G_L1_ABSOLUTE += 1000.0 * torch.mean(mask * torch.abs(diff_vertices)) # scale brings meters to millimeters
self.loss_G_L1_ABSOLUTE += 1000.0 * torch.mean(mask * torch.abs(diff_vertices_prv))
self.loss_G_L1_ABSOLUTE += 1000.0 * torch.mean(mask * torch.abs(diff_vertices_nxt))
self.loss_G_L1_ABSOLUTE += 3000.0 * torch.mean(mask * torch.abs(diff_vertices_intermediate))
self.loss_G_L1_RELATIVE += 20000.0 * torch.mean(mask * torch.abs(diff_vertices_tmp_cur_nxt))
self.loss_G_L1_RELATIVE += 20000.0 * torch.mean(mask * torch.abs(diff_vertices_tmp_prv_cur))
self.loss_G_L1_RELATIVE += 20000.0 * torch.mean(mask * torch.abs(diff_vertices_tmp_nxt_prv))
elif self.opt.lossType == 'L2':
self.loss_G_L1_ABSOLUTE += 1000.0 * torch.mean(mask * diff_vertices * diff_vertices)
self.loss_G_L1_ABSOLUTE += 1000.0 * torch.mean(mask * diff_vertices_prv * diff_vertices_prv)
self.loss_G_L1_ABSOLUTE += 1000.0 * torch.mean(mask * diff_vertices_nxt * diff_vertices_nxt)
self.loss_G_L1_ABSOLUTE += 3000.0 * torch.mean(mask * diff_vertices_intermediate * diff_vertices_intermediate)
self.loss_G_L1_RELATIVE += 20000.0 * torch.mean(mask * diff_vertices_tmp_cur_nxt * diff_vertices_tmp_cur_nxt)
self.loss_G_L1_RELATIVE += 20000.0 * torch.mean(mask * diff_vertices_tmp_prv_cur * diff_vertices_tmp_prv_cur)
self.loss_G_L1_RELATIVE += 20000.0 * torch.mean(mask * diff_vertices_tmp_nxt_prv * diff_vertices_tmp_nxt_prv)
elif self.opt.lossType == 'RMS':
self.loss_G_L1_ABSOLUTE += 1000.0 * torch.sqrt(torch.mean(mask * diff_vertices * diff_vertices))
self.loss_G_L1_ABSOLUTE += 1000.0 * torch.sqrt(torch.mean(mask * diff_vertices_prv * diff_vertices_prv))
self.loss_G_L1_ABSOLUTE += 1000.0 * torch.sqrt(torch.mean(mask * diff_vertices_nxt * diff_vertices_nxt))
self.loss_G_L1_ABSOLUTE += 3000.0 * torch.sqrt(torch.mean(mask * diff_vertices_intermediate * diff_vertices_intermediate))
self.loss_G_L1_RELATIVE += 20000.0 * torch.sqrt(torch.mean(mask * diff_vertices_tmp_cur_nxt * diff_vertices_tmp_cur_nxt))
self.loss_G_L1_RELATIVE += 20000.0 * torch.sqrt(torch.mean(mask * diff_vertices_tmp_prv_cur * diff_vertices_tmp_prv_cur))
self.loss_G_L1_RELATIVE += 20000.0 * torch.sqrt(torch.mean(mask * diff_vertices_tmp_nxt_prv * diff_vertices_tmp_nxt_prv))
else: # L1
self.loss_G_L1_ABSOLUTE += 1000.0 * torch.mean(mask * torch.abs(diff_vertices))
self.loss_G_L1_ABSOLUTE += 1000.0 * torch.mean(mask * torch.abs(diff_vertices_prv))
self.loss_G_L1_ABSOLUTE += 1000.0 * torch.mean(mask * torch.abs(diff_vertices_nxt))
self.loss_G_L1_ABSOLUTE += 3000.0 * torch.mean(mask * torch.abs(diff_vertices_intermediate))
self.loss_G_L1_RELATIVE += 20000.0 * torch.mean(mask * torch.abs(diff_vertices_tmp_cur_nxt))
self.loss_G_L1_RELATIVE += 20000.0 * torch.mean(mask * torch.abs(diff_vertices_tmp_prv_cur))
self.loss_G_L1_RELATIVE += 20000.0 * torch.mean(mask * torch.abs(diff_vertices_tmp_nxt_prv))
self.loss_G_L1 = self.loss_G_L1_ABSOLUTE + self.loss_G_L1_RELATIVE
self.loss_G_Regularizer = self.netG.regularizer()
self.loss_G = self.loss_G_L1 + self.loss_G_Regularizer
self.loss_G.backward()
def optimize_parameters(self, epoch_iter):
self.forward()
# update Generator
self.optimizer_G.zero_grad()
self.backward_G(epoch_iter)
self.optimizer_G.step()
| 16,798 | 46.860399 | 172 | py |
NeuralVoicePuppetry | NeuralVoicePuppetry-master/Audio2ExpressionNet/Training Code/util/image_pool.py | import random
import torch
class ImagePool():
def __init__(self, pool_size):
self.pool_size = pool_size
if self.pool_size > 0:
self.num_imgs = 0
self.images = []
def query(self, images):
if self.pool_size == 0:
return images
return_images = []
for image in images:
image = torch.unsqueeze(image.data, 0)
if self.num_imgs < self.pool_size:
self.num_imgs = self.num_imgs + 1
self.images.append(image)
return_images.append(image)
else:
p = random.uniform(0, 1)
if p > 0.5:
random_id = random.randint(0, self.pool_size - 1) # randint is inclusive
tmp = self.images[random_id].clone()
self.images[random_id] = image
return_images.append(tmp)
else:
return_images.append(image)
return_images = torch.cat(return_images, 0)
return return_images
| 1,072 | 31.515152 | 93 | py |
NeuralVoicePuppetry | NeuralVoicePuppetry-master/Audio2ExpressionNet/Training Code/util/util.py | from __future__ import print_function
import torch
import numpy as np
from PIL import Image
import os
import sys
import array
import OpenEXR
import Imath
def load_exr(image_path):
# Open the input file
file = OpenEXR.InputFile(image_path)
# Compute the size
dw = file.header()['dataWindow']
w, h = (dw.max.x - dw.min.x + 1, dw.max.y - dw.min.y + 1)
# Read the three color channels as 32-bit floats
FLOAT = Imath.PixelType(Imath.PixelType.FLOAT)
#(R,G,B) = [np.array(array.array('f', file.channel(Chan, FLOAT)).tolist()).reshape((w, h, 1)) for Chan in ("R", "G", "B") ]
(r, g, b) = file.channels("RGB")
R = np.array(array.array('f', r).tolist()).reshape((w, h, 1))
G = np.array(array.array('f', g).tolist()).reshape((w, h, 1))
B = np.array(array.array('f', b).tolist()).reshape((w, h, 1))
return np.concatenate((R, G, B), axis=2)
# Converts a Tensor into an image array (numpy)
# |imtype|: the desired type of the converted numpy array
def tensor2im(input_image, imtype=np.uint8):
if isinstance(input_image, torch.Tensor):
input_image = torch.clamp(input_image, -1.0, 1.0)
image_tensor = input_image.data
else:
return input_image
image_numpy = image_tensor[0].cpu().float().numpy()
if image_numpy.shape[0] == 1:
image_numpy = np.tile(image_numpy, (3, 1, 1))
image_numpy = (np.transpose(image_numpy, (1, 2, 0)) + 1) / 2.0 * 255.0
return image_numpy.astype(imtype)
def diagnose_network(net, name='network'):
mean = 0.0
count = 0
for param in net.parameters():
if param.grad is not None:
mean += torch.mean(torch.abs(param.grad.data))
count += 1
if count > 0:
mean = mean / count
print(name)
print(mean)
def save_image(image_numpy, image_path):
image_pil = Image.fromarray(image_numpy)
image_pil.save(image_path)
def print_numpy(x, val=True, shp=False):
x = x.astype(np.float64)
if shp:
print('shape,', x.shape)
if val:
x = x.flatten()
print('mean = %3.3f, min = %3.3f, max = %3.3f, median = %3.3f, std=%3.3f' % (
np.mean(x), np.min(x), np.max(x), np.median(x), np.std(x)))
def mkdirs(paths):
if isinstance(paths, list) and not isinstance(paths, str):
for path in paths:
mkdir(path)
else:
mkdir(paths)
def mkdir(path):
if not os.path.exists(path):
os.makedirs(path)
| 2,454 | 28.578313 | 127 | py |
NeuralVoicePuppetry | NeuralVoicePuppetry-master/Audio2ExpressionNet/Training Code/BaselModel/basel_model.py | import os
import torch
import torch.nn as nn
import torchvision.transforms as transforms
import numpy as np
import soft_renderer as sr
N_EXPRESSIONS=76
class MorphableModel(nn.Module):
def __init__(self, filename_average=''):
super(MorphableModel, self).__init__()
print('Load Morphable Model (Basel)')
#filename_mesh = os.path.join(opt.dataroot, opt.phase + '/average_model.obj')
filename_mesh = filename_average
if filename_average=='':
print('use default identity')
filename_mesh = './BaselModel/average.obj'
mesh = sr.Mesh.from_obj(filename_mesh, normalization=False, load_texture=True)
self.average_vertices = mesh.vertices[0]
self.faces = mesh.faces[0]
self.average_vertices = self.average_vertices[None, :, :] # [num_vertices, XYZ] -> [batch_size=1, num_vertices, XYZ]
self.faces = self.faces[None, :, :] # [num_faces, 3] -> [batch_size=1, num_faces, 3]
self.textures = mesh.textures
self.num_vertices = self.average_vertices.shape[1]
self.num_faces = self.faces.shape[1]
print('vertices:', self.average_vertices.shape)
print('faces:', self.faces.shape)
## basis function
self.expression_basis = np.memmap('./BaselModel/ExpressionBasis.matrix', dtype='float32', mode='r').__array__()[1:] # first entry is the size
self.expression_basis = np.resize(self.expression_basis, (N_EXPRESSIONS, self.num_vertices, 4))[:,:,0:3]
self.expression_basis = torch.tensor(self.expression_basis.astype(np.float32)).cuda() # N_EXPRESSIONS x num_vertices x 3
self.expression_basis = torch.transpose(self.expression_basis,0,2) # transpose for matmul
print('expression_basis', self.expression_basis.shape)
#texture_size = 2
#self.textures = torch.ones(1, self.faces.shape[1], texture_size, texture_size, texture_size, 3, dtype=torch.float32).cuda()
#print('textures:', self.textures.shape)
## ## debug
## zeroExpr = torch.zeros(1, N_EXPRESSIONS, dtype=torch.float32).cuda()
## self.morph(zeroExpr)
## self.save_model_to_obj_file('model_zero_expression.obj')
## ##
## onesExpr = torch.ones(1, N_EXPRESSIONS, dtype=torch.float32).cuda()
## self.morph(onesExpr)
## self.save_model_to_obj_file('model_ones_expression.obj')
## exit()
## ##
# default expression
zeroExpr = torch.zeros(1, N_EXPRESSIONS, dtype=torch.float32).cuda()
self.morph(zeroExpr)
def save_model_to_obj_file(self, filename, mask=None):
faces_cpu = self.faces.detach().cpu().numpy()
vertices_cpu = self.vertices.detach().cpu().numpy()
mask_cpu = None
if not type(mask) == type(None):
mask_cpu = mask.detach().cpu().numpy()
f = open(filename, 'w')
if type(mask) == type(None):
for i in range(0, self.num_vertices):
f.write('v ' + str(vertices_cpu[0, i, 0]) + ' ' + str(vertices_cpu[0, i, 1]) + ' ' + str(vertices_cpu[0, i, 2]) + '\n')
else:
for i in range(0, self.num_vertices):
f.write('v ' + str(vertices_cpu[0, i, 0]) + ' ' + str(vertices_cpu[0, i, 1]) + ' ' + str(vertices_cpu[0, i, 2]) + ' ' + str(mask_cpu[i]) + ' ' + str(mask_cpu[i]) + ' ' + str(mask_cpu[i]) + ' 1'+ '\n')
for i in range(0, self.num_faces):
f.write('f ' + str(faces_cpu[0, i, 0]+1) + '// ' + str(faces_cpu[0, i, 1]+1) + '// ' + str(faces_cpu[0, i, 2]+1) + '//\n')
f.close()
def compute_expression_delta(self, expressions):
return torch.transpose(torch.matmul(self.expression_basis, torch.transpose(expressions, 0,1)), 0, 2) # note that matmul wants to have this order: (a x b x c) x (c x m) => (a x b x m)
def morph(self, expressions):
self.vertices = self.average_vertices + self.compute_expression_delta(expressions)
return self.vertices
def LoadMask(self, filename=''):
if filename=='':
print('use default mask')
filename = './BaselModel/mask/defaultMask_mouth.obj'
mask = np.zeros(self.num_vertices)
file = open(filename, 'r')
i=0
for line in file:
if line[0] == 'v':
floats = [float(x) for x in line[1:].split()]
if floats[3] == 1.0 and floats[4] == 0.0 and floats[5] == 0.0:
mask[i] = 1.0
i += 1
file.close()
return torch.tensor(mask.astype(np.float32)).cuda() | 4,607 | 42.065421 | 221 | py |
NeuralVoicePuppetry | NeuralVoicePuppetry-master/Audio2ExpressionNet/Training Code/data/facetmp_dataset.py | import os.path
import random
import torchvision.transforms as transforms
import torch
import numpy as np
from data.base_dataset import BaseDataset
from data.audio import Audio
#from data.image_folder import make_dataset
from PIL import Image
from util import util
#def make_dataset(dir):
# images = []
# assert os.path.isdir(dir), '%s is not a valid directory' % dir
# for root, _, fnames in sorted(os.walk(dir)):
# for fname in fnames:
# if any(fname.endswith(extension) for extension in ['.bin', '.BIN']):
# path = os.path.join(root, fname)
# images.append(path)
# return sorted(images)
def make_dataset(dir):
images = []
ids = []
assert os.path.isdir(dir), '%s is not a valid directory' % dir
for root, _, fnames in sorted(os.walk(dir)):
for fname in fnames:
if any(fname.endswith(extension) for extension in ['.npy', '.NPY']):
#.deepspeech.npy
id_str = fname[:-15] #4]
i = int(id_str)
ids.append(i)
ids = sorted(ids)
for id in ids:
fname=str(id)+'.deepspeech.npy'
path = os.path.join(root, fname)
images.append(path)
return images
def make_ids(paths, root_dir):
ids = []
for fname in paths:
l = fname.rfind('/')
id_str = fname[l+1:-15]#4]
i = int(id_str)
#print(fname, ': ', i)
ids.append(i)
return ids
def make_dataset_png_ids(dir):
images = []
ids = []
assert os.path.isdir(dir), '%s is not a valid directory' % dir
for root, _, fnames in sorted(os.walk(dir)):
for fname in fnames:
if any(fname.endswith(extension) for extension in ['.png', '.PNG']):
id_str = fname[:-4]
i = int(id_str)
ids.append(i)
ids = sorted(ids)
return ids
def make_dataset_exr_ids(dir):
images = []
ids = []
assert os.path.isdir(dir), '%s is not a valid directory' % dir
for root, _, fnames in sorted(os.walk(dir)):
for fname in fnames:
if any(fname.endswith(extension) for extension in ['.exr', '.EXR']):
id_str = fname[:-4]
i = int(id_str)
ids.append(i)
ids = sorted(ids)
return ids
def load_intrinsics(input_dir):
file = open(input_dir+"/intrinsics.txt", "r")
intrinsics = [[(float(x) for x in line.split())] for line in file]
file.close()
intrinsics = list(intrinsics[0][0])
return intrinsics
def load_rigids(input_dir):
file = open(input_dir+"/rigid.txt", "r")
rigid_floats = [[float(x) for x in line.split()] for line in file] # note that it stores 5 lines per matrix (blank line)
file.close()
all_rigids = [ [rigid_floats[4*idx + 0],rigid_floats[4*idx + 1],rigid_floats[4*idx + 2],rigid_floats[4*idx + 3]] for idx in range(0, len(rigid_floats)//4) ]
return all_rigids
def load_expressions(input_dir):
file = open(input_dir+"/expression.txt", "r")
expressions = [[float(x) for x in line.split()] for line in file]
file.close()
return expressions
def load_identity(input_dir):
file = open(input_dir+"/identities.txt", "r")
identity = [[float(x) for x in line.split()] for line in file]
file.close()
return identity
class FaceTmpDataset(BaseDataset):
@staticmethod
def modify_commandline_options(parser, is_train):
return parser
def initialize(self, opt):
self.opt = opt
# directories
self.dataroot = opt.dataroot
self.audio_feature_dir = os.path.join(opt.dataroot, 'audio_feature')
self.image_dir = os.path.join(opt.dataroot, 'images')
self.uvs_dir = os.path.join(opt.dataroot, 'uvs')
# debug print
print('load sequence:', self.dataroot)
print('\taudio_feature_dir:', self.audio_feature_dir)
print('\timage_dir:', self.image_dir)
print('\tuvs_dir:', self.uvs_dir)
# generate index maps
audio_ids = make_ids(make_dataset(self.audio_feature_dir), self.dataroot)
image_ids = make_dataset_png_ids(self.image_dir)
uvs_ids = make_dataset_exr_ids(self.uvs_dir)
# get model parameters
intrinsics = load_intrinsics(self.dataroot)
extrinsics = load_rigids(self.dataroot)
expressions = load_expressions(self.dataroot)
identities = load_identity(self.dataroot)
if opt.phase == 'test': # test overwrites the audio and uv files, as well as expressions
print('Test mode. Overwriting audio, uv and expressions')
print('source sequence:', opt.source_dir)
dataroot = opt.source_dir
self.audio_feature_dir = os.path.join(dataroot, 'audio_feature')
self.uvs_dir = os.path.join(dataroot, 'uvs')
audio_ids = make_ids(make_dataset(self.audio_feature_dir), dataroot)
uvs_ids = make_dataset_exr_ids(self.uvs_dir)
intrinsics = load_intrinsics(dataroot)
extrinsics = load_rigids(dataroot)
expressions = load_expressions(dataroot)
identities = load_identity(dataroot)
print('\tnum audio_ids:', len(audio_ids))
print('\tnum image_ids:', len(image_ids))
print('\tnum uvs_ids:', len(uvs_ids))
# set data
min_len = min(len(audio_ids), len(image_ids), len(uvs_ids), len(extrinsics), len(expressions))
self.audio_ids = audio_ids[:min_len]
self.image_ids = image_ids[:min_len]
self.uvs_ids = uvs_ids[:min_len]
self.intrinsics = intrinsics
self.extrinsics = extrinsics[:] # extrinsics[:min_len]
self.expressions = expressions[:] # expressions[:min_len]
self.identities = identities[:] #identities[:min_len]
self.n_frames_total = min_len
print('\tnum frames:', self.n_frames_total)
opt.nTrainObjects = 1
opt.nValObjects = 1
opt.nTestObjects = 1
opt.test_sequence_names = [[opt.dataroot.split("/")[-1], 'train']]
if opt.phase == 'test':
opt.test_sequence_names = [[opt.source_dir.split("/")[-1], 'test']]
print('test:', opt.test_sequence_names)
assert(opt.resize_or_crop == 'resize_and_crop')
def getSampleWeights(self):
weights = np.ones((self.n_frames_total))
return weights
def computeCrop(self, mask, MULTIPLE_OF=64, random_size=False):
IMG_DIM_X = mask.shape[2]
IMG_DIM_Y = mask.shape[1]
if random_size:
# random dimensions
new_dim_x = np.random.randint(int(IMG_DIM_X * 0.75), IMG_DIM_X+1)
new_dim_y = np.random.randint(int(IMG_DIM_Y * 0.75), IMG_DIM_Y+1)
new_dim_x = int(np.floor(new_dim_x / float(MULTIPLE_OF)) * MULTIPLE_OF )
new_dim_y = int(np.floor(new_dim_y / float(MULTIPLE_OF)) * MULTIPLE_OF )
else:
new_dim_x = 2 * MULTIPLE_OF
new_dim_y = 2 * MULTIPLE_OF
# check dims
if new_dim_x > IMG_DIM_X: new_dim_x -= MULTIPLE_OF
if new_dim_y > IMG_DIM_Y: new_dim_y -= MULTIPLE_OF
# random pos
mask_indices = torch.nonzero(mask)
_, bb_mid_point_y, bb_mid_point_x = mask_indices[np.random.randint(0, mask_indices.shape[0])].data.cpu()
#print('bb_mid_point', bb_mid_point_x, bb_mid_point_y)
offset_x = bb_mid_point_x - new_dim_x/2
offset_y = bb_mid_point_y - new_dim_y/2
if IMG_DIM_X == new_dim_x: offset_x = 0
if offset_x < 0: offset_x = 0
if offset_x+new_dim_x >= IMG_DIM_X: offset_x = IMG_DIM_X-new_dim_x
if IMG_DIM_Y == new_dim_y: offset_y = 0
if offset_y < 0: offset_y = 0
if offset_y+new_dim_y >= IMG_DIM_Y: offset_y = IMG_DIM_Y-new_dim_y
return np.array([int(offset_x),int(offset_y),int(new_dim_x), int(new_dim_y)])
def getitem(self, global_index, crop=None):
# select frame from sequence
index = global_index
# get data ids
audio_id = self.audio_ids[index]
image_id = self.image_ids[index]
uv_id = self.uvs_ids[index]
#print('GET ITEM: ', index)
img_fname = os.path.join(self.image_dir, str(image_id).zfill(5) + '.png')
img_numpy = np.asarray(Image.open(img_fname))
TARGET = 2.0 * transforms.ToTensor()(img_numpy.astype(np.float32))/255.0 - 1.0
uv_fname = os.path.join(self.uvs_dir, str(uv_id).zfill(5) + '.exr')
uv_numpy = util.load_exr(uv_fname)
UV = transforms.ToTensor()(uv_numpy.astype(np.float32))
UV = torch.where(UV > 1.0, torch.zeros_like(UV), UV)
UV = torch.where(UV < 0.0, torch.zeros_like(UV), UV)
UV = 2.0 * UV - 1.0
# intrinsics and extrinsics
intrinsics = self.intrinsics
extrinsics = self.extrinsics[uv_id]
# expressions
expressions = np.asarray(self.expressions[audio_id], dtype=np.float32)
#print('expressions:', expressions.shape)
expressions[32] *= 0.0 # remove eye brow movements
expressions[41] *= 0.0 # remove eye brow movements
expressions[71:75] *= 0.0 # remove eye brow movements
expressions = torch.tensor(expressions)
# identity
identity = torch.tensor(self.identities[audio_id])
# load deepspeech feature
dsf_fname = os.path.join(self.audio_feature_dir, str(audio_id) + '.deepspeech.npy')
feature_array = np.load(dsf_fname)
dsf_np = np.resize(feature_array, (16,29,1))
dsf = transforms.ToTensor()(dsf_np.astype(np.float32))
# load sequence data if necessary
if self.opt.look_ahead:# use prev and following frame infos
r = self.opt.seq_len//2
last_valid_idx = audio_id
for i in range(1,r): # prev frames
index_seq = index - i
if index_seq < 0: index_seq = 0
audio_id_seq = self.audio_ids[index_seq]
if audio_id_seq == audio_id - i: last_valid_idx = audio_id_seq
else: audio_id_seq = last_valid_idx
dsf_fname = os.path.join(self.audio_feature_dir, str(audio_id_seq) + '.deepspeech.npy')
feature_array = np.load(dsf_fname)
dsf_np = np.resize(feature_array, (16,29,1))
dsf_seq = transforms.ToTensor()(dsf_np.astype(np.float32)) # 1 x 16 x 29
dsf = torch.cat([dsf_seq, dsf], 0) # seq_len x 16 x 29
# note the ordering [old ... current]
last_valid_idx = audio_id
for i in range(1,self.opt.seq_len - r + 1): # following frames
index_seq = index + i
max_idx = len(self.audio_ids)-1
if index_seq > max_idx: index_seq = max_idx
audio_id_seq = self.audio_ids[index_seq]
if audio_id_seq == audio_id + i: last_valid_idx = audio_id_seq
else: audio_id_seq = last_valid_idx
dsf_fname = os.path.join(self.audio_feature_dir, str(audio_id_seq) + '.deepspeech.npy')
feature_array = np.load(dsf_fname)
dsf_np = np.resize(feature_array, (16,29,1))
dsf_seq = transforms.ToTensor()(dsf_np.astype(np.float32)) # 1 x 16 x 29
dsf = torch.cat([dsf, dsf_seq], 0) # seq_len x 16 x 29
# note the ordering [old ... current ... future]
else:
last_valid_idx = audio_id
for i in range(1,self.opt.seq_len):
index_seq = index - i
if index_seq < 0: index_seq = 0
audio_id_seq = self.audio_ids[index_seq]
if audio_id_seq == audio_id - i: last_valid_idx = audio_id_seq
else: audio_id_seq = last_valid_idx
dsf_fname = os.path.join(self.audio_feature_dir, str(audio_id_seq) + '.deepspeech.npy')
feature_array = np.load(dsf_fname)
dsf_np = np.resize(feature_array, (16,29,1))
dsf_seq = transforms.ToTensor()(dsf_np.astype(np.float32)) # 1 x 16 x 29
dsf = torch.cat([dsf_seq, dsf], 0) # seq_len x 16 x 29
# note the ordering [old ... current]
#################################
####### apply augmentation ######
#################################
if not self.opt.no_augmentation:
if type(crop) == type(None):
INVALID_UV = -1
mask = ( (UV[0:1,:,:] != INVALID_UV) | (UV[1:2,:,:] != INVALID_UV) )
crop = self.computeCrop(mask, MULTIPLE_OF=64) # << dependent on the network structure !! 64 => 6 layers
offset_x,offset_y,new_dim_x, new_dim_y = crop
# select subwindow
TARGET = TARGET[:, offset_y:offset_y+new_dim_y, offset_x:offset_x+new_dim_x]
UV = UV[:, offset_y:offset_y+new_dim_y, offset_x:offset_x+new_dim_x]
# compute new intrinsics
# TODO: atm not needed but maybe later
#################################
return {'TARGET': TARGET, 'UV': UV,
'paths': dsf_fname, #img_path,
'intrinsics': np.array(intrinsics),
'extrinsics': np.array(extrinsics),
'expressions': expressions,
'identity': identity,
'audio_deepspeech': dsf, # deepspeech feature
#'target_id':target_id,
'crop': crop,
'internal_id': 0}
def __getitem__(self, global_index):
# select frame from sequence
index = global_index
current = self.getitem(index)
crop = current['crop']
prv = self.getitem(max(index-1, 0), crop)
nxt = self.getitem(min(index+1, self.n_frames_total-1), crop)
if type(crop) == type(None):
crop = np.array([0,0,current['UV'].shape[2],current['UV'].shape[1]])
weight = 1.0 / self.n_frames_total
return {'TARGET': current['TARGET'],
'UV': current['UV'],
'paths': current['paths'],
'intrinsics': current['intrinsics'],
'extrinsics': current['extrinsics'],
'expressions': current['expressions'],
'identity': current['identity'],
'audio_deepspeech': current['audio_deepspeech'],
'prv_TARGET': prv['TARGET'],
'prv_UV': prv['UV'],
'prv_audio_deepspeech': prv['audio_deepspeech'],
'prv_expressions': prv['expressions'],
'nxt_TARGET': nxt['TARGET'],
'nxt_UV': nxt['UV'],
'nxt_audio_deepspeech': nxt['audio_deepspeech'],
'nxt_expressions': nxt['expressions'],
'crop': crop,
'internal_id': current['internal_id'],
'weight': np.array([weight]).astype(np.float32)}
def __len__(self):
return self.n_frames_total
def name(self):
return 'FaceTmpDataset'
| 15,143 | 37.339241 | 160 | py |
NeuralVoicePuppetry | NeuralVoicePuppetry-master/Audio2ExpressionNet/Training Code/data/audio_dataset.py | import os.path
import random
import torchvision.transforms as transforms
import torch
import numpy as np
from data.base_dataset import BaseDataset
from data.audio import Audio
#from data.image_folder import make_dataset
from PIL import Image
def make_dataset(dir):
images = []
ids = []
assert os.path.isdir(dir), '%s is not a valid directory' % dir
for root, _, fnames in sorted(os.walk(dir)):
for fname in fnames:
if any(fname.endswith(extension) for extension in ['.deepspeech.npy']):
id_str = fname[:-15]
i = int(id_str)
ids.append(i)
ids = sorted(ids)
for id in ids:
fname=str(id)+'.deepspeech.npy'
path = os.path.join(root, fname)
images.append(path)
return images
def make_ids(paths, root_dir):
ids = []
for fname in paths:
l = fname.rfind('/')
#id_str = fname[l+1:-4]
id_str = fname[l+1:-15]
i = int(id_str)
#print(fname, ': ', i)
ids.append(i)
return ids
class AudioDataset(BaseDataset):
@staticmethod
def modify_commandline_options(parser, is_train):
return parser
def initialize(self, opt):
self.opt = opt
self.root = opt.dataroot
self.data_dir = os.path.join(opt.dataroot, 'audio_feature')
self.frame_paths = make_dataset(self.data_dir)
self.frame_ids = make_ids(self.frame_paths, self.root)
opt.nObjects = 1
opt.nTrainObjects = 116 # TODO
opt.nTestObjects = 1
opt.test_sequence_names = [[opt.dataroot.split("/")[-1], 'test']]
assert(opt.resize_or_crop == 'resize_and_crop')
if opt.isTrain:
print('ERROR: audio_dataset only allowed for test')
exit()
def getSampleWeights(self):
weights = np.ones((len(self.frame_paths)))
return weights
def getAudioFilename(self):
return os.path.join(self.root, 'audio.wav')
def getAudioFeatureFilename(self, idx):
return self.frame_paths[idx % len(self.frame_paths)]
def __getitem__(self, index):
#print('GET ITEM: ', index)
frame_path = self.frame_paths[index]
frame_id = self.frame_ids[index]
# load deepspeech feature
feature_array = np.load(frame_path)
dsf_np = np.resize(feature_array, (16,29,1))
dsf = transforms.ToTensor()(dsf_np.astype(np.float32))
# load sequence data if necessary
if self.opt.look_ahead:# use prev and following frame infos
r = self.opt.seq_len//2
for i in range(1,r): # prev frames
index_seq = index - i
if index_seq < 0: index_seq = 0
dsf_fname = self.frame_paths[index_seq]
feature_array = np.load(dsf_fname)
dsf_np = np.resize(feature_array, (16,29,1))
dsf_seq = transforms.ToTensor()(dsf_np.astype(np.float32)) # 1 x 16 x 29
dsf = torch.cat([dsf_seq, dsf], 0) # seq_len x 16 x 29
# note the ordering [old ... current]
for i in range(1,self.opt.seq_len - r + 1): # following frames
index_seq = index + i
max_idx = len(self.frame_paths)-1
if index_seq > max_idx: index_seq = max_idx
dsf_fname = self.frame_paths[index_seq]
feature_array = np.load(dsf_fname)
dsf_np = np.resize(feature_array, (16,29,1))
dsf_seq = transforms.ToTensor()(dsf_np.astype(np.float32)) # 1 x 16 x 29
dsf = torch.cat([dsf, dsf_seq], 0) # seq_len x 16 x 29
# note the ordering [old ... current ... future]
else:
for i in range(1,self.opt.seq_len):
index_seq = index - i
if index_seq < 0: index_seq = 0
dsf_fname = self.frame_paths[index_seq]
feature_array = np.load(dsf_fname)
dsf_np = np.resize(feature_array, (16,29,1))
dsf_seq = transforms.ToTensor()(dsf_np.astype(np.float32)) # 1 x 16 x 29
dsf = torch.cat([dsf_seq, dsf], 0) # seq_len x 16 x 29
# note the ordering [old ... current]
#################################
zeroIdentity = torch.zeros(100)
zeroExpressions = torch.zeros(76)
target_id = -1
internal_sequence_id = 0
weight = 1.0 / len(self.frame_paths)
return {'paths': frame_path,
'expressions': zeroExpressions,
'identity': zeroIdentity,
'intrinsics': np.zeros((4)),
'extrinsics': np.zeros((4,4)),
'audio_deepspeech': dsf, # deepspeech feature
'target_id':target_id,
'internal_id':internal_sequence_id,
'weight': np.array([weight]).astype(np.float32)}
def __len__(self):
return len(self.frame_paths)
def name(self):
return 'AudioDataset'
| 5,048 | 33.582192 | 88 | py |
NeuralVoicePuppetry | NeuralVoicePuppetry-master/Audio2ExpressionNet/Training Code/data/base_dataset.py | import torch.utils.data as data
from PIL import Image
import torchvision.transforms as transforms
class BaseDataset(data.Dataset):
def __init__(self):
super(BaseDataset, self).__init__()
def name(self):
return 'BaseDataset'
@staticmethod
def modify_commandline_options(parser, is_train):
return parser
def initialize(self, opt):
pass
def getSampleWeights(self):
return torch.ones((len(self)))
def __len__(self):
return 0
def get_transform(opt):
transform_list = []
if opt.resize_or_crop == 'resize_and_crop':
osize = [opt.loadSize, opt.loadSize]
transform_list.append(transforms.Resize(osize, Image.BICUBIC))
transform_list.append(transforms.RandomCrop(opt.fineSize))
elif opt.resize_or_crop == 'crop':
transform_list.append(transforms.RandomCrop(opt.fineSize))
elif opt.resize_or_crop == 'scale_width':
transform_list.append(transforms.Lambda(
lambda img: __scale_width(img, opt.fineSize)))
elif opt.resize_or_crop == 'scale_width_and_crop':
transform_list.append(transforms.Lambda(
lambda img: __scale_width(img, opt.loadSize)))
transform_list.append(transforms.RandomCrop(opt.fineSize))
elif opt.resize_or_crop == 'none':
transform_list.append(transforms.Lambda(
lambda img: __adjust(img)))
else:
raise ValueError('--resize_or_crop %s is not a valid option.' % opt.resize_or_crop)
if opt.isTrain and not opt.no_flip:
transform_list.append(transforms.RandomHorizontalFlip())
transform_list += [transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5),
(0.5, 0.5, 0.5))]
return transforms.Compose(transform_list)
# just modify the width and height to be multiple of 4
def __adjust(img):
ow, oh = img.size
# the size needs to be a multiple of this number,
# because going through generator network may change img size
# and eventually cause size mismatch error
mult = 4
if ow % mult == 0 and oh % mult == 0:
return img
w = (ow - 1) // mult
w = (w + 1) * mult
h = (oh - 1) // mult
h = (h + 1) * mult
if ow != w or oh != h:
__print_size_warning(ow, oh, w, h)
return img.resize((w, h), Image.BICUBIC)
def __scale_width(img, target_width):
ow, oh = img.size
# the size needs to be a multiple of this number,
# because going through generator network may change img size
# and eventually cause size mismatch error
mult = 4
assert target_width % mult == 0, "the target width needs to be multiple of %d." % mult
if (ow == target_width and oh % mult == 0):
return img
w = target_width
target_height = int(target_width * oh / ow)
m = (target_height - 1) // mult
h = (m + 1) * mult
if target_height != h:
__print_size_warning(target_width, target_height, w, h)
return img.resize((w, h), Image.BICUBIC)
def __print_size_warning(ow, oh, w, h):
if not hasattr(__print_size_warning, 'has_printed'):
print("The image size needs to be a multiple of 4. "
"The loaded image size was (%d, %d), so it was adjusted to "
"(%d, %d). This adjustment will be done to all images "
"whose sizes are not multiples of 4" % (ow, oh, w, h))
__print_size_warning.has_printed = True
| 3,469 | 31.735849 | 91 | py |
NeuralVoicePuppetry | NeuralVoicePuppetry-master/Audio2ExpressionNet/Training Code/data/multi_face_audio_eq_tmp_dataset.py | import os.path
import random
import torchvision.transforms as transforms
import torch
import numpy as np
from data.base_dataset import BaseDataset
from data.audio import Audio
#from data.image_folder import make_dataset
from PIL import Image
#def make_dataset(dir):
# images = []
# assert os.path.isdir(dir), '%s is not a valid directory' % dir
# for root, _, fnames in sorted(os.walk(dir)):
# for fname in fnames:
# if any(fname.endswith(extension) for extension in ['.bin', '.BIN']):
# path = os.path.join(root, fname)
# images.append(path)
# return sorted(images)
def make_dataset(dir):
images = []
ids = []
assert os.path.isdir(dir), '%s is not a valid directory' % dir
for root, _, fnames in sorted(os.walk(dir)):
for fname in fnames:
if any(fname.endswith(extension) for extension in ['.npy', '.NPY']):
#.deepspeech.npy
id_str = fname[:-15] #4]
i = int(id_str)
ids.append(i)
ids = sorted(ids)
for id in ids:
fname=str(id)+'.deepspeech.npy'
path = os.path.join(root, fname)
images.append(path)
return images
def make_ids(paths, root_dir):
ids = []
for fname in paths:
l = fname.rfind('/')
id_str = fname[l+1:-15]#4]
i = int(id_str)
#print(fname, ': ', i)
ids.append(i)
return ids
def make_dataset_ids_png(dir):
images = []
ids = []
assert os.path.isdir(dir), '%s is not a valid directory' % dir
for root, _, fnames in sorted(os.walk(dir)):
for fname in fnames:
if any(fname.endswith(extension) for extension in ['.png', '.png']):
id_str = fname[:-4]
i = int(id_str)
ids.append(i)
ids = sorted(ids)
return ids
def load_intrinsics(input_dir):
file = open(input_dir+"/intrinsics.txt", "r")
intrinsics = [[(float(x) for x in line.split())] for line in file]
file.close()
intrinsics = list(intrinsics[0][0])
return intrinsics
def load_rigids(input_dir):
file = open(input_dir+"/rigid.txt", "r")
rigid_floats = [[float(x) for x in line.split()] for line in file] # note that it stores 5 lines per matrix (blank line)
file.close()
all_rigids = [ [rigid_floats[4*idx + 0],rigid_floats[4*idx + 1],rigid_floats[4*idx + 2],rigid_floats[4*idx + 3]] for idx in range(0, len(rigid_floats)//4) ]
return all_rigids
def load_expressions(input_dir):
file = open(input_dir+"/expression.txt", "r")
expressions = [[float(x) for x in line.split()] for line in file]
file.close()
return expressions
def load_identity(input_dir):
file = open(input_dir+"/identities.txt", "r")
identities = [[float(x) for x in line.split()] for line in file]
file.close()
return identities
class MultiFaceAudioEQTmpDataset(BaseDataset):
@staticmethod
def modify_commandline_options(parser, is_train):
return parser
def initialize(self, opt):
self.opt = opt
self.root = opt.dataroot
# read dataset file that contains the filenames for the train, val and test lists
file = open(self.root+"/dataset.txt", "r")
self.filename_train_list, self.filename_val_list, self.filename_test_list = [str(line) for line in file]
file.close()
if self.filename_train_list[-1] == '\n': self.filename_train_list = self.filename_train_list[:-1]
if self.filename_val_list[-1] == '\n': self.filename_val_list = self.filename_val_list[:-1]
if self.filename_test_list[-1] == '\n': self.filename_test_list = self.filename_test_list[:-1]
# get list of train sequences
file = open(self.root+"/" + self.filename_train_list, "r")
self.train_sequence_names = [str(line) for line in file]
file.close()
for i in range(0,len(self.train_sequence_names)):
if self.train_sequence_names[i][-1] == '\n':
self.train_sequence_names[i] = self.train_sequence_names[i][:-1]
# get list of val sequences
file = open(self.root+"/" + self.filename_val_list, "r")
self.val_sequence_names = [[str(w) for w in line.split()] for line in file]
file.close()
for i in range(0,len(self.val_sequence_names)):
if self.val_sequence_names[i][0][-1] == '\n': self.val_sequence_names[i][0] = self.val_sequence_names[i][0][:-1]
if self.val_sequence_names[i][1][-1] == '\n': self.val_sequence_names[i][1] = self.val_sequence_names[i][1][:-1]
# get list of test sequences
file = open(self.root+"/" + self.filename_test_list, "r")
self.test_sequence_names = [[str(w) for w in line.split()] for line in file]
if opt.output_audio_expressions: self.test_sequence_names = self.test_sequence_names[0:1]
file.close()
for i in range(0,len(self.test_sequence_names)):
if self.test_sequence_names[i][0][-1] == '\n': self.test_sequence_names[i][0] = self.test_sequence_names[i][0][:-1]
if self.test_sequence_names[i][1][-1] == '\n': self.test_sequence_names[i][1] = self.test_sequence_names[i][1][:-1]
# print some stats
print('filename_train_list:', self.filename_train_list)
print('\tnum_seq:', len(self.train_sequence_names))
print('filename_val_list: ', self.filename_val_list)
print('\tnum_seq:', len(self.val_sequence_names))
print('filename_test_list: ', self.filename_test_list)
print('\tnum_seq:', len(self.test_sequence_names))
opt.train_sequence_names = self.train_sequence_names
opt.val_sequence_names = self.val_sequence_names
opt.test_sequence_names = self.test_sequence_names
# search mapping from val, test to train sequences that are used as targets
self.val_sequence_targets = []
for i in range(0,len(self.val_sequence_names)):
target_name = self.val_sequence_names[i][1]
target_id = -1
for j in range(0,len(self.train_sequence_names)):
if self.train_sequence_names[j] == target_name:
target_id = j
break
if target_id == -1:
print('Target sequence not in train set! ', target_name)
exit()
self.val_sequence_targets.append(target_id)
self.test_sequence_targets = []
for i in range(0,len(self.test_sequence_names)):
target_name = self.test_sequence_names[i][1]
target_id = -1
for j in range(0,len(self.train_sequence_names)):
if self.train_sequence_names[j] == target_name:
target_id = j
break
if target_id == -1:
print('Target sequence not in train set! ', target_name)
exit()
self.test_sequence_targets.append(target_id)
print('test: ', self.test_sequence_names[i])
print('\t target:', target_id)
# store len values
opt.nTrainObjects = len(self.train_sequence_names)
opt.nValObjects = len(self.val_sequence_names)
opt.nTestObjects = len(self.test_sequence_names)
################################################
################################################
################################################
# prepare dataloader paths / data
self.audio_feature_dir = []
self.image_dir = []
self.uvs_dir = []
self.audio_ids = []
self.image_ids = []
self.intrinsics = []
self.extrinsics = []
self.expressions = []
self.identities = []
self.target_id = []
self.n_frames_total = 0
if opt.phase == 'train':
self.sequence_names = self.train_sequence_names
for i in range(0,len(self.train_sequence_names)):
dataroot = os.path.join(opt.dataroot, self.train_sequence_names[i])
audio_feature_dir = os.path.join(opt.dataroot, self.train_sequence_names[i], 'audio_feature')
image_dir = os.path.join(opt.dataroot, self.train_sequence_names[i], 'images')
uvs_dir = os.path.join(opt.dataroot, self.train_sequence_names[i], 'uvs')
print('load train sequence:', self.train_sequence_names[i])
print('\tidentity_dir:', dataroot)
print('\taudio_feature_dir:', audio_feature_dir)
print('\timage_dir:', image_dir)
print('\tuvs_dir:', uvs_dir)
audio_ids = make_ids(make_dataset(audio_feature_dir), dataroot)
image_ids = make_dataset_ids_png(image_dir) # [-1] * len(audio_ids) #make_ids(make_dataset(image_dir), dataroot)
intrinsics = load_intrinsics(dataroot)
extrinsics = load_rigids(dataroot)
expressions = load_expressions(dataroot)
identity = load_identity(dataroot)
min_len = min(len(audio_ids), len(image_ids), len(extrinsics), len(expressions))
self.audio_feature_dir.append(audio_feature_dir)
self.image_dir.append(image_dir)
self.uvs_dir.append(uvs_dir)
self.audio_ids.append(audio_ids[:min_len])
self.image_ids.append(image_ids[:min_len])
self.intrinsics.append(intrinsics)
self.extrinsics.append(extrinsics[:min_len])
self.expressions.append(expressions[:min_len])
self.identities.append(identity[:min_len])
self.target_id.append(i)
self.n_frames_total += min_len
elif opt.phase == 'val':
for i in range(0,len(self.val_sequence_names)):
target_id = self.val_sequence_targets[i]
dataroot = os.path.join(opt.dataroot, self.train_sequence_names[target_id])
audio_feature_dir = os.path.join(opt.dataroot, self.val_sequence_names[i][0], 'audio_feature')
image_dir = os.path.join(opt.dataroot, self.train_sequence_names[target_id], 'images')
uvs_dir = os.path.join(opt.dataroot, self.train_sequence_names[target_id], 'uvs')
print('load val sequence:', self.val_sequence_names[i])
print('\tidentity_dir:', dataroot)
print('\taudio_feature_dir:', audio_feature_dir)
print('\timage_dir:', image_dir)
print('\tuvs_dir:', uvs_dir)
audio_ids = make_ids(make_dataset(audio_feature_dir), os.path.join(opt.dataroot, self.val_sequence_names[i][0]))
image_ids = make_dataset_ids_png(image_dir) # [-1] * len(audio_ids) #make_ids(make_dataset(image_dir), dataroot)
intrinsics = load_intrinsics(dataroot)
extrinsics = load_rigids(dataroot)
expressions = load_expressions(os.path.join(opt.dataroot, self.val_sequence_names[i][0]))
identity = load_identity(dataroot)
min_len = min(len(audio_ids), len(image_ids), len(extrinsics), len(expressions))
self.audio_feature_dir.append(audio_feature_dir)
self.image_dir.append(image_dir)
self.uvs_dir.append(uvs_dir)
self.audio_ids.append(audio_ids[:min_len])
self.image_ids.append(image_ids[:min_len])
self.intrinsics.append(intrinsics)
self.extrinsics.append(extrinsics[:min_len])
self.expressions.append(expressions[:min_len])
self.identities.append(identity[:min_len])
self.target_id.append(target_id)
self.n_frames_total += min_len
else: # test
for i in range(0,len(self.test_sequence_names)):
target_id = self.test_sequence_targets[i]
dataroot = os.path.join(opt.dataroot, self.train_sequence_names[target_id])
audio_feature_dir = os.path.join(opt.dataroot, self.test_sequence_names[i][0], 'audio_feature')
image_dir = os.path.join(opt.dataroot, self.train_sequence_names[target_id], 'images')
uvs_dir = os.path.join(opt.dataroot, self.train_sequence_names[target_id], 'uvs')
print('load test sequence:', self.test_sequence_names[i])
print('\tidentity_dir:', dataroot)
print('\taudio_feature_dir:', audio_feature_dir)
print('\timage_dir:', image_dir)
print('\tuvs_dir:', uvs_dir)
audio_ids = make_ids(make_dataset(audio_feature_dir), os.path.join(opt.dataroot, self.test_sequence_names[i][0]))
image_ids = make_dataset_ids_png(image_dir) # [-1] * len(audio_ids) #make_ids(make_dataset(image_dir), dataroot)
intrinsics = load_intrinsics(dataroot)
extrinsics = load_rigids(dataroot)
expressions = load_expressions(os.path.join(opt.dataroot, self.test_sequence_names[i][0]))
identity = load_identity(dataroot)
min_len = min(len(audio_ids), len(image_ids), len(extrinsics), len(expressions))
self.audio_feature_dir.append(audio_feature_dir)
self.image_dir.append(image_dir)
self.uvs_dir.append(uvs_dir)
self.audio_ids.append(audio_ids[:min_len])
self.image_ids.append(image_ids[:min_len])
self.intrinsics.append(intrinsics)
self.extrinsics.append(extrinsics[:min_len])
self.expressions.append(expressions[:min_len])
self.identities.append(identity[:min_len])
self.target_id.append(target_id)
self.n_frames_total += min_len
print('frames_total:', self.n_frames_total)
#global_target_ids = []
#for i in range(0,len(self.audio_ids)):
# for j in range(0,len(self.audio_ids[i])):
# global_target_ids.append(self.target_id[i])
#global_target_ids=np.array(global_target_ids)
#self.weights = np.where(global_target_ids==2, 1.0 * np.ones((self.n_frames_total)), 0.01 * np.ones((self.n_frames_total)) )
self.weights = []
for i in range(0,len(self.audio_ids)):
l = len(self.audio_ids[i])
for j in range(0,l):
self.weights.append(1.0 / l)
self.weights = np.array(self.weights)
assert(opt.resize_or_crop == 'resize_and_crop')
def getSampleWeights(self):
return self.weights
def getitem(self, global_index):
# select sequence
internal_sequence_id = 0
sum_frames = 0
for i in range(0,len(self.audio_ids)):
l = len(self.audio_ids[i])
if (global_index-sum_frames) < l:
internal_sequence_id = i
break
else:
sum_frames += len(self.audio_ids[i])
# select frame from sequence
index = (global_index-sum_frames) % len(self.audio_ids[internal_sequence_id])
# get data ids
audio_id = self.audio_ids[internal_sequence_id][index]
image_id = self.image_ids[internal_sequence_id][index]
#print('GET ITEM: ', index)
#img_path = self.frame_paths[sequence_id][index]
# intrinsics and extrinsics
intrinsics = self.intrinsics[internal_sequence_id]
extrinsics = self.extrinsics[internal_sequence_id][image_id]
# expressions
expressions = np.asarray(self.expressions[internal_sequence_id][audio_id], dtype=np.float32)
#print('expressions:', expressions.shape)
expressions[32] *= 0.0 # remove eye brow movements
expressions[41] *= 0.0 # remove eye brow movements
expressions[71:75] *= 0.0 # remove eye brow movements
expressions = torch.tensor(expressions)
# identity
identity = torch.tensor(self.identities[internal_sequence_id][image_id])
target_id = self.target_id[internal_sequence_id] # sequence id refers to the target sequence (of the training corpus)
# load deepspeech feature
#print('audio_id', audio_id)
dsf_fname = os.path.join(self.audio_feature_dir[internal_sequence_id], str(audio_id) + '.deepspeech.npy')
feature_array = np.load(dsf_fname)
dsf_np = np.resize(feature_array, (16,29,1))
dsf = transforms.ToTensor()(dsf_np.astype(np.float32))
# load sequence data if necessary
last_valid_idx = audio_id
for i in range(1,self.opt.seq_len):
index_seq = index - i
if index_seq < 0: index_seq = 0
audio_id_seq = self.audio_ids[internal_sequence_id][index_seq]
if audio_id_seq == audio_id - i: last_valid_idx = audio_id_seq
else: audio_id_seq = last_valid_idx
dsf_fname = os.path.join(self.audio_feature_dir[internal_sequence_id], str(audio_id_seq) + '.deepspeech.npy')
feature_array = np.load(dsf_fname)
dsf_np = np.resize(feature_array, (16,29,1))
dsf_seq = transforms.ToTensor()(dsf_np.astype(np.float32)) # 1 x 16 x 29
dsf = torch.cat([dsf_seq, dsf], 0) # seq_len x 16 x 29
# note the ordering [old ... current]
#weight = 1.0 / len(self.audio_feature_dir[internal_sequence_id])
weight = self.weights[global_index]
return {'paths': dsf_fname, #img_path,
'intrinsics': np.array(intrinsics),
'extrinsics': np.array(extrinsics),
'expressions': expressions,
'identity': identity,
'audio_deepspeech': dsf, # deepspeech feature
'target_id':target_id,
'internal_id':internal_sequence_id,
'weight': np.array([weight]).astype(np.float32)}
def __getitem__(self, global_index):
# select frame from sequence
index = global_index
current = self.getitem(index)
prv = self.getitem(max(index-1, 0))
nxt = self.getitem(min(index+1, self.n_frames_total-1))
return {
'paths': current['paths'], #img_path,
'target_id': current['target_id'],
'internal_id': current['internal_id'],
'weight': current['weight'],
'identity': current['identity'],
'intrinsics': current['intrinsics'],
'extrinsics': current['extrinsics'],
'expressions': current['expressions'],
'audio_deepspeech': current['audio_deepspeech'],
'extrinsics_prv': prv['extrinsics'],
'expressions_prv': prv['expressions'],
'audio_deepspeech_prv': prv['audio_deepspeech'],
'extrinsics_nxt': nxt['extrinsics'],
'expressions_nxt': nxt['expressions'],
'audio_deepspeech_nxt': nxt['audio_deepspeech'],
}
def __len__(self):
return self.n_frames_total #len(self.frame_paths[0])
def name(self):
return 'MultiFaceAudioEQTmpDataset'
| 19,436 | 43.580275 | 160 | py |
NeuralVoicePuppetry | NeuralVoicePuppetry-master/Audio2ExpressionNet/Training Code/data/multi_face_audio_eq_tmp_cached_dataset.py | import os.path
import random
import torchvision.transforms as transforms
import torch
import numpy as np
from data.base_dataset import BaseDataset
from data.audio import Audio
#from data.image_folder import make_dataset
from PIL import Image
import progressbar
#def make_dataset(dir):
# images = []
# assert os.path.isdir(dir), '%s is not a valid directory' % dir
# for root, _, fnames in sorted(os.walk(dir)):
# for fname in fnames:
# if any(fname.endswith(extension) for extension in ['.bin', '.BIN']):
# path = os.path.join(root, fname)
# images.append(path)
# return sorted(images)
def make_dataset(dir):
images = []
ids = []
assert os.path.isdir(dir), '%s is not a valid directory' % dir
for root, _, fnames in sorted(os.walk(dir)):
for fname in fnames:
if any(fname.endswith(extension) for extension in ['.npy', '.NPY']):
#.deepspeech.npy
id_str = fname[:-15] #4]
i = int(id_str)
ids.append(i)
ids = sorted(ids)
for id in ids:
fname=str(id)+'.deepspeech.npy'
path = os.path.join(root, fname)
images.append(path)
return images
def make_ids(paths, root_dir):
ids = []
for fname in paths:
l = fname.rfind('/')
id_str = fname[l+1:-15]#4]
i = int(id_str)
#print(fname, ': ', i)
ids.append(i)
return ids
def make_dataset_ids_png(dir):
images = []
ids = []
assert os.path.isdir(dir), '%s is not a valid directory' % dir
for root, _, fnames in sorted(os.walk(dir)):
for fname in fnames:
if any(fname.endswith(extension) for extension in ['.png', '.png']):
id_str = fname[:-4]
i = int(id_str)
ids.append(i)
ids = sorted(ids)
return ids
def load_intrinsics(input_dir):
file = open(input_dir+"/intrinsics.txt", "r")
intrinsics = [[(float(x) for x in line.split())] for line in file]
file.close()
intrinsics = list(intrinsics[0][0])
return intrinsics
def load_rigids(input_dir):
file = open(input_dir+"/rigid.txt", "r")
rigid_floats = [[float(x) for x in line.split()] for line in file] # note that it stores 5 lines per matrix (blank line)
file.close()
all_rigids = [ [rigid_floats[4*idx + 0],rigid_floats[4*idx + 1],rigid_floats[4*idx + 2],rigid_floats[4*idx + 3]] for idx in range(0, len(rigid_floats)//4) ]
return all_rigids
def load_expressions(input_dir):
file = open(input_dir+"/expression.txt", "r")
expressions = [[float(x) for x in line.split()] for line in file]
file.close()
return expressions
def load_identity(input_dir):
file = open(input_dir+"/identities.txt", "r")
identities = [[float(x) for x in line.split()] for line in file]
file.close()
return identities
class MultiFaceAudioEQTmpCachedDataset(BaseDataset):
@staticmethod
def modify_commandline_options(parser, is_train):
return parser
def initialize(self, opt):
self.opt = opt
self.root = opt.dataroot
# read dataset file that contains the filenames for the train, val and test lists
file = open(self.root+"/dataset.txt", "r")
self.filename_train_list, self.filename_val_list, self.filename_test_list = [str(line) for line in file]
file.close()
if self.filename_train_list[-1] == '\n': self.filename_train_list = self.filename_train_list[:-1]
if self.filename_val_list[-1] == '\n': self.filename_val_list = self.filename_val_list[:-1]
if self.filename_test_list[-1] == '\n': self.filename_test_list = self.filename_test_list[:-1]
# get list of train sequences
file = open(self.root+"/" + self.filename_train_list, "r")
self.train_sequence_names = [str(line) for line in file]
file.close()
for i in range(0,len(self.train_sequence_names)):
if self.train_sequence_names[i][-1] == '\n':
self.train_sequence_names[i] = self.train_sequence_names[i][:-1]
# get list of val sequences
file = open(self.root+"/" + self.filename_val_list, "r")
self.val_sequence_names = [[str(w) for w in line.split()] for line in file]
file.close()
for i in range(0,len(self.val_sequence_names)):
if self.val_sequence_names[i][0][-1] == '\n': self.val_sequence_names[i][0] = self.val_sequence_names[i][0][:-1]
if self.val_sequence_names[i][1][-1] == '\n': self.val_sequence_names[i][1] = self.val_sequence_names[i][1][:-1]
# get list of test sequences
file = open(self.root+"/" + self.filename_test_list, "r")
self.test_sequence_names = [[str(w) for w in line.split()] for line in file]
if opt.output_audio_expressions: self.test_sequence_names = self.test_sequence_names[0:1]
file.close()
for i in range(0,len(self.test_sequence_names)):
if self.test_sequence_names[i][0][-1] == '\n': self.test_sequence_names[i][0] = self.test_sequence_names[i][0][:-1]
if self.test_sequence_names[i][1][-1] == '\n': self.test_sequence_names[i][1] = self.test_sequence_names[i][1][:-1]
# print some stats
print('filename_train_list:', self.filename_train_list)
print('\tnum_seq:', len(self.train_sequence_names))
print('filename_val_list: ', self.filename_val_list)
print('\tnum_seq:', len(self.val_sequence_names))
print('filename_test_list: ', self.filename_test_list)
print('\tnum_seq:', len(self.test_sequence_names))
opt.train_sequence_names = self.train_sequence_names
opt.val_sequence_names = self.val_sequence_names
opt.test_sequence_names = self.test_sequence_names
# search mapping from val, test to train sequences that are used as targets
self.val_sequence_targets = []
for i in range(0,len(self.val_sequence_names)):
target_name = self.val_sequence_names[i][1]
target_id = -1
for j in range(0,len(self.train_sequence_names)):
if self.train_sequence_names[j] == target_name:
target_id = j
break
if target_id == -1:
print('Target sequence not in train set! ', target_name)
exit()
self.val_sequence_targets.append(target_id)
self.test_sequence_targets = []
for i in range(0,len(self.test_sequence_names)):
target_name = self.test_sequence_names[i][1]
target_id = -1
for j in range(0,len(self.train_sequence_names)):
if self.train_sequence_names[j] == target_name:
target_id = j
break
if target_id == -1:
print('Target sequence not in train set! ', target_name)
exit()
self.test_sequence_targets.append(target_id)
print('test: ', self.test_sequence_names[i])
print('\t target:', target_id)
# store len values
opt.nTrainObjects = len(self.train_sequence_names)
opt.nValObjects = len(self.val_sequence_names)
opt.nTestObjects = len(self.test_sequence_names)
################################################
################################################
################################################
# prepare dataloader paths / data
self.audio_feature_dir = []
self.image_dir = []
self.uvs_dir = []
self.audio_ids = []
self.image_ids = []
self.intrinsics = []
self.extrinsics = []
self.expressions = []
self.identities = []
self.target_id = []
self.n_frames_total = 0
if opt.phase == 'train':
self.sequence_names = self.train_sequence_names
for i in range(0,len(self.train_sequence_names)):
dataroot = os.path.join(opt.dataroot, self.train_sequence_names[i])
audio_feature_dir = os.path.join(opt.dataroot, self.train_sequence_names[i], 'audio_feature')
image_dir = os.path.join(opt.dataroot, self.train_sequence_names[i], 'images')
uvs_dir = os.path.join(opt.dataroot, self.train_sequence_names[i], 'uvs')
print('load train sequence:', self.train_sequence_names[i])
print('\tidentity_dir:', dataroot)
print('\taudio_feature_dir:', audio_feature_dir)
print('\timage_dir:', image_dir)
print('\tuvs_dir:', uvs_dir)
audio_ids = make_ids(make_dataset(audio_feature_dir), dataroot)
image_ids = make_dataset_ids_png(image_dir) # [-1] * len(audio_ids) #make_ids(make_dataset(image_dir), dataroot)
intrinsics = load_intrinsics(dataroot)
extrinsics = load_rigids(dataroot)
expressions = load_expressions(dataroot)
identity = load_identity(dataroot)
min_len = min(len(audio_ids), len(image_ids), len(extrinsics), len(expressions))
self.audio_feature_dir.append(audio_feature_dir)
self.image_dir.append(image_dir)
self.uvs_dir.append(uvs_dir)
self.audio_ids.append(audio_ids[:min_len])
self.image_ids.append(image_ids[:min_len])
self.intrinsics.append(intrinsics)
self.extrinsics.append(extrinsics[:min_len])
self.expressions.append(expressions[:min_len])
self.identities.append(identity[:min_len])
self.target_id.append(i)
self.n_frames_total += min_len
elif opt.phase == 'val':
for i in range(0,len(self.val_sequence_names)):
target_id = self.val_sequence_targets[i]
dataroot = os.path.join(opt.dataroot, self.train_sequence_names[target_id])
audio_feature_dir = os.path.join(opt.dataroot, self.val_sequence_names[i][0], 'audio_feature')
image_dir = os.path.join(opt.dataroot, self.train_sequence_names[target_id], 'images')
uvs_dir = os.path.join(opt.dataroot, self.train_sequence_names[target_id], 'uvs')
print('load val sequence:', self.val_sequence_names[i])
print('\tidentity_dir:', dataroot)
print('\taudio_feature_dir:', audio_feature_dir)
print('\timage_dir:', image_dir)
print('\tuvs_dir:', uvs_dir)
audio_ids = make_ids(make_dataset(audio_feature_dir), os.path.join(opt.dataroot, self.val_sequence_names[i][0]))
image_ids = make_dataset_ids_png(image_dir) # [-1] * len(audio_ids) #make_ids(make_dataset(image_dir), dataroot)
intrinsics = load_intrinsics(dataroot)
extrinsics = load_rigids(dataroot)
expressions = load_expressions(os.path.join(opt.dataroot, self.val_sequence_names[i][0]))
identity = load_identity(dataroot)
min_len = min(len(audio_ids), len(image_ids), len(extrinsics), len(expressions))
self.audio_feature_dir.append(audio_feature_dir)
self.image_dir.append(image_dir)
self.uvs_dir.append(uvs_dir)
self.audio_ids.append(audio_ids[:min_len])
self.image_ids.append(image_ids[:min_len])
self.intrinsics.append(intrinsics)
self.extrinsics.append(extrinsics[:min_len])
self.expressions.append(expressions[:min_len])
self.identities.append(identity[:min_len])
self.target_id.append(target_id)
self.n_frames_total += min_len
else: # test
for i in range(0,len(self.test_sequence_names)):
target_id = self.test_sequence_targets[i]
dataroot = os.path.join(opt.dataroot, self.train_sequence_names[target_id])
audio_feature_dir = os.path.join(opt.dataroot, self.test_sequence_names[i][0], 'audio_feature')
image_dir = os.path.join(opt.dataroot, self.train_sequence_names[target_id], 'images')
uvs_dir = os.path.join(opt.dataroot, self.train_sequence_names[target_id], 'uvs')
print('load test sequence:', self.test_sequence_names[i])
print('\tidentity_dir:', dataroot)
print('\taudio_feature_dir:', audio_feature_dir)
print('\timage_dir:', image_dir)
print('\tuvs_dir:', uvs_dir)
audio_ids = make_ids(make_dataset(audio_feature_dir), os.path.join(opt.dataroot, self.test_sequence_names[i][0]))
image_ids = make_dataset_ids_png(image_dir) # [-1] * len(audio_ids) #make_ids(make_dataset(image_dir), dataroot)
intrinsics = load_intrinsics(dataroot)
extrinsics = load_rigids(dataroot)
expressions = load_expressions(os.path.join(opt.dataroot, self.test_sequence_names[i][0]))
identity = load_identity(dataroot)
min_len = min(len(audio_ids), len(image_ids), len(extrinsics), len(expressions))
self.audio_feature_dir.append(audio_feature_dir)
self.image_dir.append(image_dir)
self.uvs_dir.append(uvs_dir)
self.audio_ids.append(audio_ids[:min_len])
self.image_ids.append(image_ids[:min_len])
self.intrinsics.append(intrinsics)
self.extrinsics.append(extrinsics[:min_len])
self.expressions.append(expressions[:min_len])
self.identities.append(identity[:min_len])
self.target_id.append(target_id)
self.n_frames_total += min_len
print('frames_total:', self.n_frames_total)
#global_target_ids = []
#for i in range(0,len(self.audio_ids)):
# for j in range(0,len(self.audio_ids[i])):
# global_target_ids.append(self.target_id[i])
#global_target_ids=np.array(global_target_ids)
#self.weights = np.where(global_target_ids==2, 1.0 * np.ones((self.n_frames_total)), 0.01 * np.ones((self.n_frames_total)) )
self.weights = []
for i in range(0,len(self.audio_ids)):
l = len(self.audio_ids[i])
for j in range(0,l):
self.weights.append(1.0 / l)
self.weights = np.array(self.weights)
assert(opt.resize_or_crop == 'resize_and_crop')
# mapping global to internal
self.mapping_global2internal = []
self.mapping_global2internal_offset = []
self.dsf = []
offset = 0
with progressbar.ProgressBar(max_value=self.n_frames_total) as bar:
for i in range(0,len(self.audio_ids)):
l = len(self.audio_ids[i])
dsf_seq = []
for k in range(0,l):
self.mapping_global2internal.append(i)
self.mapping_global2internal_offset.append(offset)
dsf_fname = os.path.join(self.audio_feature_dir[i], str(self.audio_ids[i][k]) + '.deepspeech.npy')
feature_array = np.load(dsf_fname)
dsf_np = np.resize(feature_array, (16,29,1))
dsf_seq.append(dsf_np.astype(np.float32))
bar.update(offset + k)
self.dsf.append(dsf_seq)
offset += l
def getSampleWeights(self):
return self.weights
def getitem(self, global_index):
# select sequence
internal_sequence_id = self.mapping_global2internal[global_index]
sum_frames = self.mapping_global2internal_offset[global_index]
# select frame from sequence
index = (global_index-sum_frames) % len(self.audio_ids[internal_sequence_id])
# get data ids
audio_id = self.audio_ids[internal_sequence_id][index]
image_id = self.image_ids[internal_sequence_id][index]
#print('GET ITEM: ', index)
#img_path = self.frame_paths[sequence_id][index]
# intrinsics and extrinsics
intrinsics = self.intrinsics[internal_sequence_id]
extrinsics = self.extrinsics[internal_sequence_id][image_id]
# expressions
expressions = np.asarray(self.expressions[internal_sequence_id][audio_id], dtype=np.float32)
#print('expressions:', expressions.shape)
expressions[32] *= 0.0 # remove eye brow movements
expressions[41] *= 0.0 # remove eye brow movements
expressions[71:75] *= 0.0 # remove eye brow movements
expressions = torch.tensor(expressions)
# identity
identity = torch.tensor(self.identities[internal_sequence_id][image_id])
target_id = self.target_id[internal_sequence_id] # sequence id refers to the target sequence (of the training corpus)
# load deepspeech feature
#print('audio_id', audio_id)
dsf_fname = os.path.join(self.audio_feature_dir[internal_sequence_id], str(audio_id) + '.deepspeech.npy')
dsf_np = self.dsf[internal_sequence_id][index]
dsf = transforms.ToTensor()(dsf_np)
# load sequence data if necessary
if self.opt.look_ahead:# use prev and following frame infos
r = self.opt.seq_len//2
for i in range(1,r): # prev frames
index_seq = index - i
if index_seq < 0: index_seq = 0
dsf_np = self.dsf[internal_sequence_id][index_seq]
dsf_seq = transforms.ToTensor()(dsf_np) # 1 x 16 x 29
dsf = torch.cat([dsf_seq, dsf], 0) # seq_len x 16 x 29
# note the ordering [old ... current]
for i in range(1,self.opt.seq_len - r + 1): # following frames
index_seq = index + i
max_idx = len(self.audio_ids[internal_sequence_id])-1
if index_seq > max_idx: index_seq = max_idx
dsf_np = self.dsf[internal_sequence_id][index_seq]
dsf_seq = transforms.ToTensor()(dsf_np) # 1 x 16 x 29
dsf = torch.cat([dsf, dsf_seq], 0) # seq_len x 16 x 29
# note the ordering [old ... current ... future]
else:
for i in range(1,self.opt.seq_len):
index_seq = index - i
if index_seq < 0: index_seq = 0
dsf_np = self.dsf[internal_sequence_id][index_seq]
dsf_seq = transforms.ToTensor()(dsf_np) # 1 x 16 x 29
dsf = torch.cat([dsf_seq, dsf], 0) # seq_len x 16 x 29
# note the ordering [old ... current]
#weight = 1.0 / len(self.audio_feature_dir[internal_sequence_id])
weight = self.weights[global_index]
return {'paths': dsf_fname, #img_path,
'intrinsics': np.array(intrinsics),
'extrinsics': np.array(extrinsics),
'expressions': expressions,
'identity': identity,
'audio_deepspeech': dsf, # deepspeech feature
'target_id':target_id,
'internal_id':internal_sequence_id,
'weight': np.array([weight]).astype(np.float32)}
def __getitem__(self, global_index):
# select frame from sequence
index = global_index
current = self.getitem(index)
prv = self.getitem(max(index-1, 0))
nxt = self.getitem(min(index+1, self.n_frames_total-1))
return {
'paths': current['paths'], #img_path,
'target_id': current['target_id'],
'internal_id': current['internal_id'],
'weight': current['weight'],
'identity': current['identity'],
'intrinsics': current['intrinsics'],
'extrinsics': current['extrinsics'],
'expressions': current['expressions'],
'audio_deepspeech': current['audio_deepspeech'],
'extrinsics_prv': prv['extrinsics'],
'expressions_prv': prv['expressions'],
'audio_deepspeech_prv': prv['audio_deepspeech'],
'extrinsics_nxt': nxt['extrinsics'],
'expressions_nxt': nxt['expressions'],
'audio_deepspeech_nxt': nxt['audio_deepspeech'],
}
def __len__(self):
return self.n_frames_total #len(self.frame_paths[0])
def name(self):
return 'MultiFaceAudioEQTmpCachedDataset'
| 20,838 | 44.400871 | 160 | py |
NeuralVoicePuppetry | NeuralVoicePuppetry-master/Audio2ExpressionNet/Training Code/data/audio.py | import time
import random
import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.transforms as transforms
import torchaudio
import torchaudio.transforms
import librosa
import scipy.signal
import librosa.display
import matplotlib.pyplot as plt
class Audio():
def name(self):
return 'Audio'
def __init__(self, filename, write_mel_spectogram = False):
self.n_mels=128
self.fmax=8000
self.hop_length_ms = 20
sound, sample_rate = librosa.load(filename)#torchaudio.load(filename)
self.raw_audio = sound
self.sample_rate = sample_rate
print('sample_rate = %d' % self.sample_rate)
self.n_samples = sound.shape[0]
self.time_total = self.n_samples / self.sample_rate
print('length = %ds' % self.time_total)
print('compute mel spectrogram...')
self.hop_length = int(sample_rate / 1000.0 * self.hop_length_ms)
print('hop_length: ', self.hop_length)
self.mel_spectrogram = librosa.feature.melspectrogram(y=self.raw_audio, sr=self.sample_rate, hop_length=self.hop_length, n_mels=self.n_mels, fmax=self.fmax)
if write_mel_spectogram:
print('write spectrogram to file')
plt.figure(figsize=(100, 15))
librosa.display.specshow(librosa.power_to_db(self.mel_spectrogram, ref=np.max), y_axis='mel', fmax=self.fmax, x_axis='time')
plt.colorbar(format='%+2.0f dB')
plt.title('Mel spectrogram')
plt.tight_layout()
plt.savefig('mel_features.png', dpi=None, facecolor='w', edgecolor='w', orientation='portrait', papertype=None, format=None, transparent=False, bbox_inches=None, pad_inches=0.1, frameon=None, metadata=None)
print('mel: ', self.mel_spectrogram.shape) # (128, 18441)
self.n_mel_frames = self.mel_spectrogram.shape[1]
self.mel_sample_rate = self.mel_spectrogram.shape[1] / self.time_total
print('n_mel_frames: ', self.n_mel_frames)
print('mel_sample_rate: ', self.mel_sample_rate)
# convert to torch
self.mel_spectrogram = torch.FloatTensor(self.mel_spectrogram)
def getWindow(self, mel_frame_idx, window_size):
# get audio mel sample window
audio_start = mel_frame_idx - (window_size//2)
audio_end = mel_frame_idx + (window_size//2)
if audio_start < 0:
audio_input = self.mel_spectrogram[0:self.n_mels, 0:audio_end]
zeros = torch.zeros((self.n_mels,-audio_start))
audio_input = torch.cat([zeros, audio_input], 1)
elif audio_end >= self.n_mel_frames:
audio_input = self.mel_spectrogram[:, audio_start:-1]
zeros = torch.zeros((self.n_mels,audio_end-self.n_mel_frames + 1))
audio_input = torch.cat([audio_input, zeros], 1)
else:
audio_input = self.mel_spectrogram[:, audio_start:audio_end]
return torch.reshape(audio_input, (1, 1, self.n_mels, window_size)) | 3,047 | 38.584416 | 218 | py |
NeuralVoicePuppetry | NeuralVoicePuppetry-master/Audio2ExpressionNet/Training Code/data/face_dataset.py | import os.path
import random
import torchvision.transforms as transforms
import torch
import numpy as np
from data.base_dataset import BaseDataset
from data.audio import Audio
#from data.image_folder import make_dataset
from PIL import Image
from util import util
#def make_dataset(dir):
# images = []
# assert os.path.isdir(dir), '%s is not a valid directory' % dir
# for root, _, fnames in sorted(os.walk(dir)):
# for fname in fnames:
# if any(fname.endswith(extension) for extension in ['.bin', '.BIN']):
# path = os.path.join(root, fname)
# images.append(path)
# return sorted(images)
def make_dataset(dir):
images = []
ids = []
assert os.path.isdir(dir), '%s is not a valid directory' % dir
for root, _, fnames in sorted(os.walk(dir)):
for fname in fnames:
if any(fname.endswith(extension) for extension in ['.npy', '.NPY']):
#.deepspeech.npy
id_str = fname[:-15] #4]
i = int(id_str)
ids.append(i)
ids = sorted(ids)
for id in ids:
fname=str(id)+'.deepspeech.npy'
path = os.path.join(root, fname)
images.append(path)
return images
def make_ids(paths, root_dir):
ids = []
for fname in paths:
l = fname.rfind('/')
id_str = fname[l+1:-15]#4]
i = int(id_str)
#print(fname, ': ', i)
ids.append(i)
return ids
def make_dataset_png_ids(dir):
images = []
ids = []
assert os.path.isdir(dir), '%s is not a valid directory' % dir
for root, _, fnames in sorted(os.walk(dir)):
for fname in fnames:
if any(fname.endswith(extension) for extension in ['.png', '.PNG']):
id_str = fname[:-4]
i = int(id_str)
ids.append(i)
ids = sorted(ids)
return ids
def make_dataset_exr_ids(dir):
images = []
ids = []
assert os.path.isdir(dir), '%s is not a valid directory' % dir
for root, _, fnames in sorted(os.walk(dir)):
for fname in fnames:
if any(fname.endswith(extension) for extension in ['.exr', '.EXR']):
id_str = fname[:-4]
i = int(id_str)
ids.append(i)
ids = sorted(ids)
return ids
def load_intrinsics(input_dir):
file = open(input_dir+"/intrinsics.txt", "r")
intrinsics = [[(float(x) for x in line.split())] for line in file]
file.close()
intrinsics = list(intrinsics[0][0])
return intrinsics
def load_rigids(input_dir):
file = open(input_dir+"/rigid.txt", "r")
rigid_floats = [[float(x) for x in line.split()] for line in file] # note that it stores 5 lines per matrix (blank line)
file.close()
all_rigids = [ [rigid_floats[4*idx + 0],rigid_floats[4*idx + 1],rigid_floats[4*idx + 2],rigid_floats[4*idx + 3]] for idx in range(0, len(rigid_floats)//4) ]
return all_rigids
def load_expressions(input_dir):
file = open(input_dir+"/expression.txt", "r")
expressions = [[float(x) for x in line.split()] for line in file]
file.close()
return expressions
def load_identity(input_dir):
file = open(input_dir+"/identities.txt", "r")
identity = [[float(x) for x in line.split()] for line in file]
file.close()
return identity
class FaceDataset(BaseDataset):
@staticmethod
def modify_commandline_options(parser, is_train):
return parser
def initialize(self, opt):
self.opt = opt
# directories
self.dataroot = opt.dataroot
self.audio_feature_dir = os.path.join(opt.dataroot, 'audio_feature')
self.image_dir = os.path.join(opt.dataroot, 'images')
self.uvs_dir = os.path.join(opt.dataroot, 'uvs')
# debug print
print('load sequence:', self.dataroot)
print('\taudio_feature_dir:', self.audio_feature_dir)
print('\timage_dir:', self.image_dir)
print('\tuvs_dir:', self.uvs_dir)
# generate index maps
audio_ids = make_ids(make_dataset(self.audio_feature_dir), self.dataroot)
image_ids = make_dataset_png_ids(self.image_dir)
uvs_ids = make_dataset_exr_ids(self.uvs_dir)
# get model parameters
intrinsics = load_intrinsics(self.dataroot)
extrinsics = load_rigids(self.dataroot)
expressions = load_expressions(self.dataroot)
identities = load_identity(self.dataroot)
if opt.phase == 'test': # test overwrites the audio and uv files, as well as expressions
print('Test mode. Overwriting audio, uv and expressions')
print('source sequence:', opt.source_dir)
dataroot = opt.source_dir
self.audio_feature_dir = os.path.join(dataroot, 'audio_feature')
self.uvs_dir = os.path.join(dataroot, 'uvs')
audio_ids = make_ids(make_dataset(self.audio_feature_dir), dataroot)
uvs_ids = make_dataset_exr_ids(self.uvs_dir)
intrinsics = load_intrinsics(dataroot)
extrinsics = load_rigids(dataroot)
expressions = load_expressions(dataroot)
identities = load_identity(dataroot)
print('\tnum audio_ids:', len(audio_ids))
print('\tnum image_ids:', len(image_ids))
print('\tnum uvs_ids:', len(uvs_ids))
# set data
min_len = min(len(audio_ids), len(image_ids), len(uvs_ids), len(extrinsics), len(expressions))
self.audio_ids = audio_ids[:min_len]
self.image_ids = image_ids[:min_len]
self.uvs_ids = uvs_ids[:min_len]
self.intrinsics = intrinsics
self.extrinsics = extrinsics[:] #extrinsics[:min_len]
self.expressions = expressions[:] #expressions[:min_len]
self.identities = identities[:] #identities[:min_len]
self.n_frames_total = min_len
print('\tnum frames:', self.n_frames_total)
opt.nTrainObjects = 1
opt.nValObjects = 1
opt.nTestObjects = 1
opt.test_sequence_names = [[opt.dataroot.split("/")[-1], 'train']]
if opt.phase == 'test':
opt.test_sequence_names = [[opt.source_dir.split("/")[-1], 'test']]
print('test:', opt.test_sequence_names)
assert(opt.resize_or_crop == 'resize_and_crop')
def getSampleWeights(self):
weights = np.ones((self.n_frames_total))
return weights
def getExtrinsics(self, idx):
return self.extrinsics[self.uvs_ids[idx % self.n_frames_total]]
def getIntrinsics(self, idx):
return self.intrinsics
def getIdentities(self, idx):
return self.identities[self.uvs_ids[idx % self.n_frames_total]]
def getExpressions(self, idx):
return self.expressions[self.uvs_ids[idx % self.n_frames_total]]
def getAudioFilename(self):
return os.path.join(self.dataroot, 'audio.wav')
def getImageFilename(self, idx):
image_id = self.image_ids[idx % self.n_frames_total]
img_fname = os.path.join(self.image_dir, str(image_id).zfill(5) + '.png')
return img_fname
#img_numpy = np.asarray(Image.open(img_fname))
#TARGET = 2.0 * transforms.ToTensor()(img_numpy.astype(np.float32))/255.0 - 1.0
def getAudioFeatureFilename(self, idx):
#return self.frame_paths[idx % len(self.frame_paths)]
audio_id = self.audio_ids[idx]
return os.path.join(self.audio_feature_dir, str(audio_id) + '.deepspeech.npy')
def computeCrop(self, mask, MULTIPLE_OF=64, random_size=False):
IMG_DIM_X = mask.shape[2]
IMG_DIM_Y = mask.shape[1]
if random_size:
# random dimensions
new_dim_x = np.random.randint(int(IMG_DIM_X * 0.75), IMG_DIM_X+1)
new_dim_y = np.random.randint(int(IMG_DIM_Y * 0.75), IMG_DIM_Y+1)
new_dim_x = int(np.floor(new_dim_x / float(MULTIPLE_OF)) * MULTIPLE_OF )
new_dim_y = int(np.floor(new_dim_y / float(MULTIPLE_OF)) * MULTIPLE_OF )
else:
new_dim_x = 3 * MULTIPLE_OF
new_dim_y = 3 * MULTIPLE_OF
# check dims
if new_dim_x > IMG_DIM_X: new_dim_x -= MULTIPLE_OF
if new_dim_y > IMG_DIM_Y: new_dim_y -= MULTIPLE_OF
# random pos
mask_indices = torch.nonzero(mask)
_, bb_mid_point_y, bb_mid_point_x = mask_indices[np.random.randint(0, mask_indices.shape[0])].data.cpu()
#print('bb_mid_point', bb_mid_point_x, bb_mid_point_y)
offset_x = bb_mid_point_x - new_dim_x/2
offset_y = bb_mid_point_y - new_dim_y/2
if IMG_DIM_X == new_dim_x: offset_x = 0
if offset_x < 0: offset_x = 0
if offset_x+new_dim_x >= IMG_DIM_X: offset_x = IMG_DIM_X-new_dim_x
if IMG_DIM_Y == new_dim_y: offset_y = 0
if offset_y < 0: offset_y = 0
if offset_y+new_dim_y >= IMG_DIM_Y: offset_y = IMG_DIM_Y-new_dim_y
return np.array([int(offset_x),int(offset_y),int(new_dim_x), int(new_dim_y)])
def __getitem__(self, global_index):
# select frame from sequence
index = global_index
# get data ids
audio_id = self.audio_ids[index]
image_id = self.image_ids[index]
uv_id = self.uvs_ids[index]
#print('GET ITEM: ', index)
img_fname = os.path.join(self.image_dir, str(image_id).zfill(5) + '.png')
img_numpy = np.asarray(Image.open(img_fname))
TARGET = 2.0 * transforms.ToTensor()(img_numpy.astype(np.float32))/255.0 - 1.0
uv_fname = os.path.join(self.uvs_dir, str(uv_id).zfill(5) + '.exr')
uv_numpy = util.load_exr(uv_fname)
UV = transforms.ToTensor()(uv_numpy.astype(np.float32))
UV = torch.where(UV > 1.0, torch.zeros_like(UV), UV)
UV = torch.where(UV < 0.0, torch.zeros_like(UV), UV)
UV = 2.0 * UV - 1.0
#print('img_fname:', img_fname)
#print('uv_fname:', uv_fname)
# intrinsics and extrinsics
intrinsics = self.intrinsics
extrinsics = self.extrinsics[uv_id]
# expressions
expressions = np.asarray(self.expressions[audio_id], dtype=np.float32)
#print('expressions:', expressions.shape)
expressions[32] *= 0.0 # remove eye brow movements
expressions[41] *= 0.0 # remove eye brow movements
expressions[71:75] *= 0.0 # remove eye brow movements
expressions = torch.tensor(expressions)
# identity
identity = torch.tensor(self.identities[audio_id])
# load deepspeech feature
dsf_fname = os.path.join(self.audio_feature_dir, str(audio_id) + '.deepspeech.npy')
feature_array = np.load(dsf_fname)
dsf_np = np.resize(feature_array, (16,29,1))
dsf = transforms.ToTensor()(dsf_np.astype(np.float32))
# load sequence data if necessary
if self.opt.look_ahead:# use prev and following frame infos
r = self.opt.seq_len//2
last_valid_idx = audio_id
for i in range(1,r): # prev frames
index_seq = index - i
if index_seq < 0: index_seq = 0
audio_id_seq = self.audio_ids[index_seq]
if audio_id_seq == audio_id - i: last_valid_idx = audio_id_seq
else: audio_id_seq = last_valid_idx
dsf_fname = os.path.join(self.audio_feature_dir, str(audio_id_seq) + '.deepspeech.npy')
feature_array = np.load(dsf_fname)
dsf_np = np.resize(feature_array, (16,29,1))
dsf_seq = transforms.ToTensor()(dsf_np.astype(np.float32)) # 1 x 16 x 29
dsf = torch.cat([dsf_seq, dsf], 0) # seq_len x 16 x 29
# note the ordering [old ... current]
last_valid_idx = audio_id
for i in range(1,self.opt.seq_len - r + 1): # following frames
index_seq = index + i
max_idx = len(self.audio_ids)-1
if index_seq > max_idx: index_seq = max_idx
audio_id_seq = self.audio_ids[index_seq]
if audio_id_seq == audio_id + i: last_valid_idx = audio_id_seq
else: audio_id_seq = last_valid_idx
dsf_fname = os.path.join(self.audio_feature_dir, str(audio_id_seq) + '.deepspeech.npy')
feature_array = np.load(dsf_fname)
dsf_np = np.resize(feature_array, (16,29,1))
dsf_seq = transforms.ToTensor()(dsf_np.astype(np.float32)) # 1 x 16 x 29
dsf = torch.cat([dsf, dsf_seq], 0) # seq_len x 16 x 29
# note the ordering [old ... current ... future]
else:
last_valid_idx = audio_id
for i in range(1,self.opt.seq_len):
index_seq = index - i
if index_seq < 0: index_seq = 0
audio_id_seq = self.audio_ids[index_seq]
if audio_id_seq == audio_id - i: last_valid_idx = audio_id_seq
else: audio_id_seq = last_valid_idx
dsf_fname = os.path.join(self.audio_feature_dir, str(audio_id_seq) + '.deepspeech.npy')
feature_array = np.load(dsf_fname)
dsf_np = np.resize(feature_array, (16,29,1))
dsf_seq = transforms.ToTensor()(dsf_np.astype(np.float32)) # 1 x 16 x 29
dsf = torch.cat([dsf_seq, dsf], 0) # seq_len x 16 x 29
# note the ordering [old ... current]
#################################
####### apply augmentation ######
#################################
crop = np.array([0,0,UV.shape[2],UV.shape[1]])
if not self.opt.no_augmentation:
INVALID_UV = -1
mask = ( (UV[0:1,:,:] != INVALID_UV) | (UV[1:2,:,:] != INVALID_UV) )
crop = self.computeCrop(mask, MULTIPLE_OF=64) # << dependent on the network structure !! 64 => 6 layers
offset_x,offset_y,new_dim_x, new_dim_y = crop
# select subwindow
TARGET = TARGET[:, offset_y:offset_y+new_dim_y, offset_x:offset_x+new_dim_x]
UV = UV[:, offset_y:offset_y+new_dim_y, offset_x:offset_x+new_dim_x]
# compute new intrinsics
# TODO: atm not needed but maybe later
#################################
weight = 1.0 / self.n_frames_total
return {'TARGET': TARGET, 'UV': UV,
'paths': dsf_fname, #img_path,
'intrinsics': np.array(intrinsics),
'extrinsics': np.array(extrinsics),
'expressions': expressions,
'identity': identity,
'audio_deepspeech': dsf, # deepspeech feature
'target_id': -1,
'crop': crop,
'internal_id': 0,
'weight': np.array([weight]).astype(np.float32)}
def __len__(self):
return self.n_frames_total
def name(self):
return 'FaceDataset'
| 14,941 | 38.013055 | 160 | py |
NeuralVoicePuppetry | NeuralVoicePuppetry-master/Audio2ExpressionNet/Training Code/data/aligned_dataset.py | import os.path
import random
import torchvision.transforms as transforms
import torch
import numpy as np
from data.base_dataset import BaseDataset
from data.audio import Audio
#from data.image_folder import make_dataset
from PIL import Image
#def make_dataset(dir):
# images = []
# assert os.path.isdir(dir), '%s is not a valid directory' % dir
# for root, _, fnames in sorted(os.walk(dir)):
# for fname in fnames:
# if any(fname.endswith(extension) for extension in ['.bin', '.BIN']):
# path = os.path.join(root, fname)
# images.append(path)
# return sorted(images)
def make_dataset(dir):
images = []
ids = []
assert os.path.isdir(dir), '%s is not a valid directory' % dir
for root, _, fnames in sorted(os.walk(dir)):
for fname in fnames:
if any(fname.endswith(extension) for extension in ['.bin', '.BIN']):
id_str = fname[:-4]
i = int(id_str)
ids.append(i)
ids = sorted(ids)
for id in ids:
fname=str(id)+'.bin'
path = os.path.join(root, fname)
images.append(path)
return images
def load_intrinsics(input_dir):
file = open(input_dir+"/intrinsics.txt", "r")
intrinsics = [[(float(x) for x in line.split())] for line in file]
file.close()
intrinsics = list(intrinsics[0][0])
return intrinsics
def load_rigids(input_dir):
file = open(input_dir+"/rigid.txt", "r")
rigid_floats = [[float(x) for x in line.split()] for line in file] # note that it stores 5 lines per matrix (blank line)
file.close()
all_rigids = [ [rigid_floats[4*idx + 0],rigid_floats[4*idx + 1],rigid_floats[4*idx + 2],rigid_floats[4*idx + 3]] for idx in range(0, len(rigid_floats)//4) ]
return all_rigids
def load_expressions(input_dir):
file = open(input_dir+"/expression.txt", "r")
expressions = [[float(x) for x in line.split()] for line in file]
file.close()
return expressions
def load_audio(input_dir):
audio = Audio(input_dir + '/audio.mp3', write_mel_spectogram = False)
#audio = Audio(input_dir + '/audio.mp3', write_mel_spectogram = True)
return audio
def make_ids(paths, root_dir):
ids = []
for fname in paths:
l = fname.rfind('/')
id_str = fname[l+1:-4]
i = int(id_str)
#print(fname, ': ', i)
ids.append(i)
return ids
class AlignedDataset(BaseDataset):
@staticmethod
def modify_commandline_options(parser, is_train):
return parser
def initialize(self, opt):
self.opt = opt
self.root = opt.dataroot
self.data_dir = os.path.join(opt.dataroot, opt.phase)
self.frame_paths = make_dataset(self.data_dir)
self.frame_ids = make_ids(self.frame_paths, self.root)
self.intrinsics = load_intrinsics(self.data_dir)
self.extrinsics = load_rigids(self.data_dir)
self.expressions = load_expressions(self.data_dir)
self.audio = load_audio(self.data_dir)
self.audio_window_size = opt.audio_window_size
opt.nObjects = 1
assert(opt.resize_or_crop == 'resize_and_crop')
def __getitem__(self, index):
# get video data
frame_id = index
#print('GET ITEM: ', index)
img_path = self.frame_paths[index]
frame_id = self.frame_ids[index]
# intrinsics and extrinsics
intrinsics = self.intrinsics
extrinsics = self.extrinsics[frame_id]
# get audio mel sample window
frame_rate = 24#29.97
#frame_rate = len(self.expressions) / self.audio.time_total
mel_frame_idx = int((frame_id / frame_rate) * self.audio.mel_sample_rate)
mels = self.audio.getWindow(mel_frame_idx, self.audio_window_size)
# expressions
expressions = torch.tensor(self.expressions[frame_id])
# default image dimensions
IMG_DIM_X = 512
IMG_DIM_Y = 512
# load image data
#assert(IMG_DIM == self.opt.fineSize)
img_array = np.memmap(img_path, dtype='float32', mode='r').__array__()
if img_array.size != IMG_DIM_X * IMG_DIM_Y * 5:
IMG_DIM_X = int(img_array[0])
IMG_DIM_Y = int(img_array[1])
img_array = img_array[2:]
intrinsics = img_array[0:4]
img_array = img_array[4:]
img_array = np.clip(img_array, 0.0, 1.0)
img = np.resize(img_array, (IMG_DIM_Y, IMG_DIM_X, 5))
A = img[:,:,0:3]
B = img[:,:,3:5]
B = np.concatenate((B, np.zeros((IMG_DIM_Y, IMG_DIM_X, 1))), axis=2)
TARGET = transforms.ToTensor()(A.astype(np.float32))
UV = transforms.ToTensor()(B.astype(np.float32))
TARGET = 2.0 * TARGET - 1.0
UV = 2.0 * UV - 1.0
# load deepspeech feature
dsf_fname = img_path[:-4] + '.deepspeech.npy'
# print('dsf_fname:', dsf_fname)
feature_array = np.load(dsf_fname)
#feature_array = np.memmap(dsf_fname, dtype='float32', mode='r').__array__()
# print('feature_array shape: ', feature_array.shape)
dsf_np = np.resize(feature_array, (16,29,1))
dsf = transforms.ToTensor()(dsf_np.astype(np.float32))
#################################
####### apply augmentation ######
#################################
if not self.opt.no_augmentation:
# random dimensions
new_dim_x = np.random.randint(int(IMG_DIM_X * 0.75), IMG_DIM_X+1)
new_dim_y = np.random.randint(int(IMG_DIM_Y * 0.75), IMG_DIM_Y+1)
new_dim_x = int(np.floor(new_dim_x / 64.0) * 64 ) # << dependent on the network structure !! 64 => 6 layers
new_dim_y = int(np.floor(new_dim_y / 64.0) * 64 )
if new_dim_x > IMG_DIM_X: new_dim_x -= 64
if new_dim_y > IMG_DIM_Y: new_dim_y -= 64
# random pos
if IMG_DIM_X == new_dim_x: offset_x = 0
else: offset_x = np.random.randint(0, IMG_DIM_X-new_dim_x)
if IMG_DIM_Y == new_dim_y: offset_y = 0
else: offset_y = np.random.randint(0, IMG_DIM_Y-new_dim_y)
# select subwindow
TARGET = TARGET[:, offset_y:offset_y+new_dim_y, offset_x:offset_x+new_dim_x]
UV = UV[:, offset_y:offset_y+new_dim_y, offset_x:offset_x+new_dim_x]
# compute new intrinsics
# TODO: atm not needed but maybe later
else:
new_dim_x = int(np.floor(IMG_DIM_X / 64.0) * 64 ) # << dependent on the network structure !! 64 => 6 layers
new_dim_y = int(np.floor(IMG_DIM_Y / 64.0) * 64 )
offset_x = 0
offset_y = 0
# select subwindow
TARGET = TARGET[:, offset_y:offset_y+new_dim_y, offset_x:offset_x+new_dim_x]
UV = UV[:, offset_y:offset_y+new_dim_y, offset_x:offset_x+new_dim_x]
# compute new intrinsics
# TODO: atm not needed but maybe later
#################################
return {'TARGET': TARGET, 'UV': UV,
'paths': self.frame_paths[index],#img_path,
'intrinsics': intrinsics,
'extrinsics': extrinsics,
'expressions': expressions,
'audio_mels': mels,
'audio_deepspeech': dsf, # deepspeech feature
'object_id':0}
def __len__(self):
return len(self.frame_paths)
def name(self):
return 'AlignedDataset'
| 7,488 | 34.832536 | 160 | py |
NeuralVoicePuppetry | NeuralVoicePuppetry-master/Audio2ExpressionNet/Training Code/data/__init__.py | import importlib
import torch.utils.data
from data.base_data_loader import BaseDataLoader
from data.base_dataset import BaseDataset
def find_dataset_using_name(dataset_name):
# Given the option --dataset_mode [datasetname],
# the file "data/datasetname_dataset.py"
# will be imported.
dataset_filename = "data." + dataset_name + "_dataset"
datasetlib = importlib.import_module(dataset_filename)
# In the file, the class called DatasetNameDataset() will
# be instantiated. It has to be a subclass of BaseDataset,
# and it is case-insensitive.
dataset = None
target_dataset_name = dataset_name.replace('_', '') + 'dataset'
for name, cls in datasetlib.__dict__.items():
if name.lower() == target_dataset_name.lower() \
and issubclass(cls, BaseDataset):
dataset = cls
if dataset is None:
print("In %s.py, there should be a subclass of BaseDataset with class name that matches %s in lowercase." % (dataset_filename, target_dataset_name))
exit(0)
return dataset
def get_option_setter(dataset_name):
dataset_class = find_dataset_using_name(dataset_name)
return dataset_class.modify_commandline_options
def create_dataset(opt):
dataset = find_dataset_using_name(opt.dataset_mode)
instance = dataset()
instance.initialize(opt)
print("dataset [%s] was created" % (instance.name()))
return instance
def CreateDataLoader(opt):
data_loader = CustomDatasetDataLoader()
data_loader.initialize(opt)
return data_loader
# Wrapper class of Dataset class that performs
# multi-threaded data loading
class CustomDatasetDataLoader(BaseDataLoader):
def name(self):
return 'CustomDatasetDataLoader'
def initialize(self, opt):
BaseDataLoader.initialize(self, opt)
self.dataset = create_dataset(opt)
if opt.serial_batches:
self.dataloader = torch.utils.data.DataLoader(
self.dataset,
batch_size=opt.batch_size,
shuffle=not opt.serial_batches,
num_workers=int(opt.num_threads))
else:
#weights = make_weights_for_balanced_classes(dataset_train.imgs, len(dataset_train.classes))
weights = self.dataset.getSampleWeights()
weights = torch.DoubleTensor(weights)
sampler = torch.utils.data.sampler.WeightedRandomSampler(weights, len(weights))
self.dataloader = torch.utils.data.DataLoader(
self.dataset,
batch_size=opt.batch_size,
#shuffle=True,
sampler=sampler,
pin_memory=True,
num_workers=int(opt.num_threads))
def load_data(self):
return self
def __len__(self):
return min(len(self.dataset), self.opt.max_dataset_size)
def __iter__(self):
for i, data in enumerate(self.dataloader):
if i * self.opt.batch_size >= self.opt.max_dataset_size:
break
yield data
| 3,157 | 33.703297 | 168 | py |
NeuralVoicePuppetry | NeuralVoicePuppetry-master/Audio2ExpressionNet/Inference/transfer.py | import os
import os.path
from options.transfer_options import TransferOptions
from data import CreateDataLoader
from data.face_dataset import FaceDataset
from data.audio_dataset import AudioDataset
from models import create_model
from util.visualizer import save_images
from util import html
import torch
import torch.nn as nn
import torchvision.transforms as transforms
import numpy as np
from PIL import Image
import time
import random
import progressbar
import copy
from shutil import copyfile
from BaselModel.basel_model import *
def load_model(opt):
opt.output_audio_expressions = True
opt.nTrainObjects = 116
print('#train objects = %d' % opt.nTrainObjects)
print('>>> create model <<<')
model = create_model(opt)
print('>>> setup model <<<')
model.setup(opt)
return model
def load_target_sequence(opt):
opt_target = copy.copy(opt) # create a clone
opt_target.dataroot = opt.target_actor # overwrite root directory
opt_target.dataset_mode = 'face'
opt_target.phase = 'train'
dataset_target = FaceDataset()
dataset_target.initialize(opt_target)
dataloader = torch.utils.data.DataLoader(
dataset_target,
batch_size=opt.batch_size,
shuffle=not opt.serial_batches,
num_workers=int(opt.num_threads))
return dataset_target, dataloader
def load_source_sequence(opt):
opt_source = copy.copy(opt) # create a clone
opt_source.dataroot = opt.source_actor # overwrite root directory
opt_source.dataset_mode = 'audio'
opt_source.phase = 'train'
dataset_source = AudioDataset()
dataset_source.initialize(opt_source)
dataloader = torch.utils.data.DataLoader(
dataset_source,
batch_size=opt.batch_size,
shuffle=not opt.serial_batches,
num_workers=int(opt.num_threads))
return dataset_source, dataloader
if __name__ == '__main__':
# read options
opt = TransferOptions().parse()
target_name = opt.target_actor.split("/")[-1]
# hard-code some parameters for test
opt.num_threads = 1 # test code only supports num_threads = 1
opt.batch_size = 1 # test code only supports batch_size = 1
opt.serial_batches = True # no shuffle
opt.no_augmentation = True # no flip
opt.display_id = -1 # no visdom display
# load model
model = load_model(opt)
print('model version:', opt.name)
# # load face model
# morphable_model = MorphableModel()
# mask = morphable_model.LoadMask()
# mask = mask + 0.1 * torch.ones_like(mask)
# read target sequence
dataset_target, data_loader_target = load_target_sequence(opt)
dataset_target_size = len(dataset_target)
print('#target_actor frames = %d' % dataset_target_size)
##################################
####### create mapping #######
##################################
os.makedirs('./mappings/'+opt.name, exist_ok=True)
mapping_fn = './mappings/'+opt.name+'/'+'mapping_'+target_name
#not_exists = True
not_exists = not os.path.exists(mapping_fn+'.npy')
if not_exists:
# collect data
print('collect data')
audio_expressions = None
gt_expressions = None
with progressbar.ProgressBar(max_value=len(dataset_target)) as bar:
for i, data in enumerate(data_loader_target):
bar.update(i)
model.set_input(data)
model.test()
ae = model.fake_expressions.data[:,:,0]
if type(audio_expressions) == type(None):
audio_expressions = ae
e = model.expressions.data
gt_expressions = e
else:
audio_expressions = torch.cat([audio_expressions,ae],dim=0)
e = model.expressions.data
gt_expressions = torch.cat([gt_expressions,e],dim=0)
# solve for mapping
print('solve for mapping')
optimize_in_parameter_space = True #False
if optimize_in_parameter_space:
# A = audio_expressions
# B = gt_expressions
# # solve lstsq ||AX - B||
# X, _ = torch.gels(B, A, out=None)
# #X, _ = torch.lstsq(B, A) # requires pytorch 1.2
# X = X[0:A.shape[1],:]
# mapping = X.t()
# use gradient descent method
n = audio_expressions.shape[0]
subspace_dim = 32
X = torch.nn.Parameter(torch.randn(N_EXPRESSIONS, subspace_dim, requires_grad=True).cuda())
optimizer = torch.optim.Adam([X], lr=0.01)
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=30, gamma=0.1)
num_epochs = 90
random_range = [k for k in range(0,n)]
with progressbar.ProgressBar(max_value=num_epochs) as bar:
for ep in range(0,num_epochs):
bar.update(ep)
random.shuffle(random_range)
for j in random_range:
expressions = gt_expressions[j]
fake_expressions = 10.0 * torch.matmul(X, audio_expressions[j])
diff_expression = fake_expressions - expressions
loss = torch.mean(diff_expression * diff_expression) # L2
optimizer.zero_grad()
loss.backward()
optimizer.step()
lr_scheduler.step()
mapping = X.data
# else: # optimize in vertex space
# # use gradient descent method
# n = audio_expressions.shape[0]
# subspace_dim = 32
# X = torch.nn.Parameter(torch.randn(N_EXPRESSIONS, subspace_dim, requires_grad=True).cuda())
# optimizer = torch.optim.Adam([X], lr=0.01)
# lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=30, gamma=0.1)
# num_epochs = 90
# random_range = [k for k in range(0,n)]
# with progressbar.ProgressBar(max_value=num_epochs) as bar:
# for ep in range(0,num_epochs):
# bar.update(ep)
# random.shuffle(random_range)
# for j in random_range:
# expressions = gt_expressions[j]
# fake_expressions = 10.0 * torch.matmul(X, audio_expressions[j])
# diff_expression = fake_expressions - expressions
# diff_vertices = torch.matmul(morphable_model.expression_basis, diff_expression)
# #loss = torch.sqrt(torch.mean(mask * diff_vertices * diff_vertices)) # RMS
# loss = torch.mean(mask * diff_vertices * diff_vertices) # L2
# #
# optimizer.zero_grad()
# loss.backward()
# optimizer.step()
# lr_scheduler.step()
#
# mapping = X.data
map_cpu = mapping.data.cpu().numpy()
file_out=open(mapping_fn+'.txt', 'w')
np.savetxt(file_out, map_cpu, delimiter=' ')
file_out.close()
np.save(mapping_fn+'.npy', map_cpu)
else:
# load mapping from file
map_cpu = np.load(mapping_fn+'.npy')
mapping = torch.tensor(map_cpu.astype(np.float32)).cuda()
print('loaded mapping from file', mapping.shape)
# process source sequence (opt.source_actor)
source_actors = [
'./datasets/SOURCES/greta_1',
]
os.makedirs('./datasets/TRANSFERS/'+opt.name, exist_ok=True)
list_transfer = open('./datasets/TRANSFERS/'+opt.name+'/list_transfer.txt', "a")
target_actor_offset = 0 # default
expression_multiplier = 1.0 # default
if target_actor_offset != 0.0:
target_name = target_name + '--offset'
if expression_multiplier != 1.0:
target_name = target_name + '-X'
for source_actor in source_actors:
opt.source_actor = source_actor
source_name = opt.source_actor.split("/")[-1]
# read source sequence
dataset_source, data_loader_source = load_source_sequence(opt)
dataset_source_size = len(dataset_source)
print('#source_actor frames = %d' % dataset_source_size)
list_transfer.write(source_name+'--'+target_name+'\n')
out_dir = './datasets/TRANSFERS/'+opt.name+'/'+source_name+'--'+target_name
os.makedirs(out_dir, exist_ok=True)
result_expressions_file=open(out_dir+'/'+'expression.txt', 'w')
result_expressions = []
with progressbar.ProgressBar(max_value=len(dataset_source)) as bar:
for i, data in enumerate(data_loader_source):
bar.update(i)
model.set_input(data)
model.test()
audio_expression = model.fake_expressions.data[0,:,0]
expression = expression_multiplier * 10.0 * torch.matmul(mapping, audio_expression)
expression = expression[None,:]
result_expressions.append(expression)
np.savetxt(result_expressions_file, expression.cpu().numpy(), delimiter=' ')
result_expressions_file.close()
list_transfer.close()
exit()
| 9,356 | 36.729839 | 104 | py |
NeuralVoicePuppetry | NeuralVoicePuppetry-master/Audio2ExpressionNet/Inference/options/base_options.py | import argparse
import os
from util import util
import torch
import models
import data
class BaseOptions():
def __init__(self):
self.initialized = False
def initialize(self, parser):
parser.add_argument('--dataroot', required=True, help='path to images (should have subfolders trainA, trainB, valA, valB, etc)')
parser.add_argument('--batch_size', type=int, default=1, help='input batch size')
parser.add_argument('--seq_len', type=int, default=1, help='sequence length (if applicable)')
parser.add_argument('--fineSize', type=int, default=512, help='then crop to this size')
parser.add_argument('--display_winsize', type=int, default=256, help='display window size for both visdom and HTML')
parser.add_argument('--input_nc', type=int, default=3, help='# of input image channels')
parser.add_argument('--output_nc', type=int, default=3, help='# of output image channels')
parser.add_argument('--ngf', type=int, default=64, help='# of gen filters in first conv layer')
parser.add_argument('--ndf', type=int, default=64, help='# of discrim filters in first conv layer')
parser.add_argument('--netD', type=str, default='basic', help='selects model to use for netD')
parser.add_argument('--netG', type=str, default='resnet_9blocks', help='selects model to use for netG')
parser.add_argument('--n_layers_D', type=int, default=3, help='only used if netD==n_layers')
parser.add_argument('--gpu_ids', type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU')
parser.add_argument('--name', type=str, default='experiment_name', help='name of the experiment. It decides where to store samples and models')
parser.add_argument('--renderer', type=str, default='no_renderer', help='name of the renderer to load the models from')
parser.add_argument('--fix_renderer', action='store_true', help='renderer is fixed')
parser.add_argument('--dataset_mode', type=str, default='aligned', help='chooses how datasets are loaded. [aligned | multi]')
parser.add_argument('--model', type=str, default='cycle_gan', help='chooses which model to use. cycle_gan, pix2pix, test')
parser.add_argument('--direction', type=str, default='AtoB', help='AtoB or BtoA')
parser.add_argument('--epoch', type=str, default='latest', help='which epoch to load? set to latest to use latest cached model')
parser.add_argument('--load_iter', type=int, default='0', help='which iteration to load? if load_iter > 0, the code will load models by iter_[load_iter]; otherwise, the code will load models by [epoch]')
parser.add_argument('--num_threads', default=4, type=int, help='# threads for loading data')
parser.add_argument('--checkpoints_dir', type=str, default='./checkpoints', help='models are saved here')
parser.add_argument('--norm', type=str, default='instance', help='instance normalization or batch normalization')
parser.add_argument('--serial_batches', action='store_true', help='if true, takes images in order to make batches, otherwise takes them randomly')
parser.add_argument('--no_dropout', action='store_true', help='no dropout for the generator')
parser.add_argument('--max_dataset_size', type=int, default=float("inf"), help='Maximum number of samples allowed per dataset. If the dataset directory contains more than max_dataset_size, only a subset is loaded.')
parser.add_argument('--resize_or_crop', type=str, default='resize_and_crop', help='scaling and cropping of images at load time [resize_and_crop|crop|scale_width|scale_width_and_crop|none]')
parser.add_argument('--no_augmentation', action='store_true', help='if specified, no data augmentation')
#parser.add_argument('--init_type', type=str, default='normal', help='network initialization [normal|xavier|kaiming|orthogonal]')
parser.add_argument('--init_type', type=str, default='xavier', help='network initialization [normal|xavier|kaiming|orthogonal]')
parser.add_argument('--init_gain', type=float, default=0.02, help='scaling factor for normal, xavier and orthogonal.')
parser.add_argument('--verbose', action='store_true', help='if specified, print more debugging information')
parser.add_argument('--suffix', default='', type=str, help='customized suffix: opt.name = opt.name + suffix: e.g., {model}_{netG}_size{loadSize}')
parser.add_argument('--tex_dim', type=int, default=256, help='neural texture dimensions')
parser.add_argument('--tex_features_intermediate', type=int, default=16, help='# intermediate neural texture features when using dynamic textures')
parser.add_argument('--tex_features', type=int, default=16, help='# neural texture features')
parser.add_argument('--textureModel', type=str, default='DynamicNeuralTextureAudio', help='texture model')
parser.add_argument('--rendererType', type=str, default='UNET_5_level', help='neural renderer network')
parser.add_argument('--lossType', type=str, default='L1', help='loss type for the final output')
parser.add_argument('--hierarchicalTex', action='store_true', help='if specified, hierachical neural textures are used')
parser.add_argument('--output_audio_expressions', action='store_true', help='if specified, no sh layers are used')
parser.add_argument('--erosionFactor', type=float, default=1.0, help='scaling factor for erosion of the background.')
parser.add_argument('--audio_window_size', type=float, default=16, help='audio window size = #mel feature bins')
parser.add_argument('--look_ahead', action='store_true', help='cache images in numpy format')
parser.add_argument('--cached_images', action='store_true', help='cache images in numpy format')
self.initialized = True
return parser
def gather_options(self):
# initialize parser with basic options
if not self.initialized:
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser = self.initialize(parser)
# get the basic options
opt, _ = parser.parse_known_args()
# modify model-related parser options
model_name = opt.model
model_option_setter = models.get_option_setter(model_name)
parser = model_option_setter(parser, self.isTrain)
opt, _ = parser.parse_known_args() # parse again with the new defaults
# modify dataset-related parser options
dataset_name = opt.dataset_mode
dataset_option_setter = data.get_option_setter(dataset_name)
parser = dataset_option_setter(parser, self.isTrain)
self.parser = parser
return parser.parse_args()
def print_options(self, opt):
message = ''
message += '----------------- Options ---------------\n'
for k, v in sorted(vars(opt).items()):
comment = ''
default = self.parser.get_default(k)
if v != default:
comment = '\t[default: %s]' % str(default)
message += '{:>25}: {:<30}{}\n'.format(str(k), str(v), comment)
message += '----------------- End -------------------'
print(message)
# save to the disk
expr_dir = os.path.join(opt.checkpoints_dir, opt.name)
util.mkdirs(expr_dir)
file_name = os.path.join(expr_dir, 'opt.txt')
with open(file_name, 'wt') as opt_file:
opt_file.write(message)
opt_file.write('\n')
def parse(self):
opt = self.gather_options()
opt.isTrain = self.isTrain # train or test
# process opt.suffix
if opt.suffix:
suffix = ('_' + opt.suffix.format(**vars(opt))) if opt.suffix != '' else ''
opt.name = opt.name + suffix
self.print_options(opt)
# set gpu ids
str_ids = opt.gpu_ids.split(',')
opt.gpu_ids = []
for str_id in str_ids:
id = int(str_id)
if id >= 0:
opt.gpu_ids.append(id)
if len(opt.gpu_ids) > 0:
torch.cuda.set_device(opt.gpu_ids[0])
self.opt = opt
return self.opt
| 8,300 | 58.719424 | 223 | py |
NeuralVoicePuppetry | NeuralVoicePuppetry-master/Audio2ExpressionNet/Inference/models/base_model.py | import os
import torch
import torch.nn as nn
from collections import OrderedDict
from . import networks
import numpy as np
from PIL import Image
def save_tensor_image(input_image, image_path):
if isinstance(input_image, torch.Tensor):
image_tensor = input_image.data
image_numpy = image_tensor[0].cpu().float().numpy()
if image_numpy.shape[0] == 1:
image_numpy = np.tile(image_numpy, (3, 1, 1))
image_numpy = (np.transpose(image_numpy, (1, 2, 0)) + 1) / 2.0 * 255.0
else:
image_numpy = input_image
image_numpy = image_numpy.astype(np.uint8)
image_pil = Image.fromarray(image_numpy)
image_pil.save(image_path)
class BaseModel():
# modify parser to add command line options,
# and also change the default values if needed
@staticmethod
def modify_commandline_options(parser, is_train):
return parser
def name(self):
return 'BaseModel'
def initialize(self, opt):
self.opt = opt
self.gpu_ids = opt.gpu_ids
self.isTrain = opt.isTrain
self.device = torch.device('cuda:{}'.format(self.gpu_ids[0])) if self.gpu_ids else torch.device('cpu')
self.load_dir = os.path.join(opt.checkpoints_dir, opt.name)
self.save_dir = os.path.join(opt.checkpoints_dir, opt.name)
if opt.resize_or_crop != 'scale_width':
torch.backends.cudnn.benchmark = True
self.loss_names = []
self.model_names = []
self.visual_names = []
self.image_paths = []
def set_input(self, input):
pass
def forward(self):
pass
# load and print networks; create schedulers
def setup(self, opt, parser=None):
if self.isTrain:
self.schedulers = [networks.get_scheduler(optimizer, opt) for optimizer in self.optimizers]
if not self.isTrain or opt.continue_train:
load_suffix = 'iter_%d' % opt.load_iter if opt.load_iter > 0 else opt.epoch
self.load_networks(load_suffix)
self.print_networks(opt.verbose)
# load specific moudles
def loadModules(self, opt, model_name, module_names):
for name in module_names:
if isinstance(name, str):
load_dir = os.path.join(opt.checkpoints_dir, model_name)
load_filename = 'latest_%s.pth' % (name)
load_path = os.path.join(load_dir, load_filename)
net = getattr(self, name)
if isinstance(net, torch.Tensor):
print('loading the tensor from %s' % load_path)
net_loaded = torch.load(load_path, map_location=str(self.device))
net.copy_(net_loaded)
else:
# if isinstance(net, torch.nn.DataParallel):
# net = net.module
print('loading the module from %s' % load_path)
# if you are using PyTorch newer than 0.4 (e.g., built from
# GitHub source), you can remove str() on self.device
state_dict = torch.load(load_path, map_location=str(self.device))
if hasattr(state_dict, '_metadata'):
del state_dict._metadata
# patch InstanceNorm checkpoints prior to 0.4
for key in list(state_dict.keys()): # need to copy keys here because we mutate in loop
self.__patch_instance_norm_state_dict(state_dict, net, key.split('.'))
net.load_state_dict(state_dict)
# make models eval mode during test time
def eval(self):
for name in self.model_names:
if isinstance(name, str):
net = getattr(self, name)
net.eval()
# used in test time, wrapping `forward` in no_grad() so we don't save
# intermediate steps for backprop
def test(self):
with torch.no_grad():
self.forward()
# get image paths
def get_image_paths(self):
return self.image_paths
def optimize_parameters(self):
pass
# update learning rate (called once every epoch)
def update_learning_rate(self):
for scheduler in self.schedulers:
scheduler.step()
lr = self.optimizers[0].param_groups[0]['lr']
print('learning rate = %.7f' % lr)
# return visualization images. train.py will display these images, and save the images to a html
def get_current_visuals(self):
visual_ret = OrderedDict()
for name in self.visual_names:
if isinstance(name, str):
visual_ret[name] = getattr(self, name)
return visual_ret
# return traning losses/errors. train.py will print out these errors as debugging information
def get_current_losses(self):
errors_ret = OrderedDict()
for name in self.loss_names:
if isinstance(name, str):
# float(...) works for both scalar tensor and float number
errors_ret[name] = float(getattr(self, 'loss_' + name))
return errors_ret
# save models to the disk
def save_networks(self, epoch):
for name in self.model_names:
if isinstance(name, str):
save_filename = '%s_%s.pth' % (epoch, name)
save_path = os.path.join(self.save_dir, save_filename)
net = getattr(self, name)
if isinstance(net, torch.Tensor):
#torch.save(net.state_dict(), save_path)
torch.save(net, save_path)
for i in range(0, list(net.size())[0]):
save_tensor_image(net[i:i+1,0:3,:,:], save_path+str(i)+'.png')
else:
if len(self.gpu_ids) > 0 and torch.cuda.is_available():
#torch.save(net.module.cpu().state_dict(), save_path) # << original
torch.save(net.cpu().state_dict(), save_path)
net.cuda(self.gpu_ids[0])
else:
torch.save(net.cpu().state_dict(), save_path)
def __patch_instance_norm_state_dict(self, state_dict, module, keys, i=0):
key = keys[i]
if i + 1 == len(keys): # at the end, pointing to a parameter/buffer
if module.__class__.__name__.startswith('InstanceNorm') and \
(key == 'running_mean' or key == 'running_var'):
if getattr(module, key) is None:
state_dict.pop('.'.join(keys))
if module.__class__.__name__.startswith('InstanceNorm') and \
(key == 'num_batches_tracked'):
state_dict.pop('.'.join(keys))
else:
self.__patch_instance_norm_state_dict(state_dict, getattr(module, key), keys, i + 1)
# load models from the disk
def load_networks(self, epoch):
for name in self.model_names:
if isinstance(name, str):
load_filename = '%s_%s.pth' % (epoch, name)
load_path = os.path.join(self.load_dir, load_filename)
net = getattr(self, name)
if isinstance(net, torch.Tensor):
print('loading the tensor from %s' % load_path)
net_loaded = torch.load(load_path, map_location=str(self.device))
net.copy_(net_loaded)
else:
# if isinstance(net, torch.nn.DataParallel):
# net = net.module
print('loading the module from %s' % load_path)
# if you are using PyTorch newer than 0.4 (e.g., built from
# GitHub source), you can remove str() on self.device
state_dict = torch.load(load_path, map_location=str(self.device))
if hasattr(state_dict, '_metadata'):
del state_dict._metadata
# patch InstanceNorm checkpoints prior to 0.4
for key in list(state_dict.keys()): # need to copy keys here because we mutate in loop
self.__patch_instance_norm_state_dict(state_dict, net, key.split('.'))
net.load_state_dict(state_dict)
# print network information
def print_networks(self, verbose):
print('---------- Networks initialized -------------')
for name in self.model_names:
if isinstance(name, str):
net = getattr(self, name)
if isinstance(net, torch.Tensor):
num_params = net.numel()
print('[Tensor %s] Total number of parameters : %.3f M' % (name, num_params / 1e6))
else:
num_params = 0
for param in net.parameters():
num_params += param.numel()
if verbose:
print(net)
print('[Network %s] Total number of parameters : %.3f M' % (name, num_params / 1e6))
print('-----------------------------------------------')
# set requies_grad=False to avoid computation
def set_requires_grad(self, nets, requires_grad=False):
if not isinstance(nets, list):
nets = [nets]
for net in nets:
if net is not None:
for param in net.parameters():
param.requires_grad = requires_grad
| 9,458 | 41.227679 | 110 | py |
NeuralVoicePuppetry | NeuralVoicePuppetry-master/Audio2ExpressionNet/Inference/models/networks.py | import torch
import torch.nn as nn
from torch.nn import init
import functools
from torch.optim import lr_scheduler
###############################################################################
# Helper Functions
###############################################################################
def get_norm_layer(norm_type='instance'):
if norm_type == 'batch':
norm_layer = functools.partial(nn.BatchNorm2d, affine=True)
elif norm_type == 'instance':
norm_layer = functools.partial(nn.InstanceNorm2d, affine=False, track_running_stats=False)
elif norm_type == 'none':
norm_layer = None
else:
raise NotImplementedError('normalization layer [%s] is not found' % norm_type)
return norm_layer
def get_scheduler(optimizer, opt):
if opt.lr_policy == 'lambda':
def lambda_rule(epoch):
lr_l = 1.0 - max(0, epoch + opt.epoch_count - opt.niter) / float(opt.niter_decay + 1)
return lr_l
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule)
elif opt.lr_policy == 'step':
scheduler = lr_scheduler.StepLR(optimizer, step_size=opt.lr_decay_iters, gamma=0.1)
elif opt.lr_policy == 'plateau':
scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.2, threshold=0.01, patience=5)
elif opt.lr_policy == 'cosine':
scheduler = lr_scheduler.CosineAnnealingLR(optimizer, T_max=opt.niter, eta_min=0)
else:
return NotImplementedError('learning rate policy [%s] is not implemented', opt.lr_policy)
return scheduler
def init_weights(net, init_type='normal', gain=0.02):
def init_func(m):
classname = m.__class__.__name__
if hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1):
if init_type == 'normal':
init.normal_(m.weight.data, 0.0, gain)
elif init_type == 'xavier':
init.xavier_normal_(m.weight.data, gain=gain)
elif init_type == 'kaiming':
init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
elif init_type == 'orthogonal':
init.orthogonal_(m.weight.data, gain=gain)
else:
raise NotImplementedError('initialization method [%s] is not implemented' % init_type)
if hasattr(m, 'bias') and m.bias is not None:
init.constant_(m.bias.data, 0.0)
elif classname.find('BatchNorm2d') != -1:
init.normal_(m.weight.data, 1.0, gain)
init.constant_(m.bias.data, 0.0)
print('initialize network with %s' % init_type)
net.apply(init_func)
def init_net(net, init_type='normal', init_gain=0.02, gpu_ids=[]):
if len(gpu_ids) > 0:
assert(torch.cuda.is_available())
net.to(gpu_ids[0])
#net = torch.nn.DataParallel(net, gpu_ids)
init_weights(net, init_type, gain=init_gain)
return net
def define_G(input_nc, output_nc, ngf, netG, norm='batch', use_dropout=False, init_type='normal', init_gain=0.02, gpu_ids=[]):
net = None
norm_layer = get_norm_layer(norm_type=norm)
if netG == 'resnet_9blocks':
net = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=9)
elif netG == 'resnet_6blocks':
net = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=6)
elif netG == 'unet_128':
net = UnetGenerator(input_nc, output_nc, 7, ngf, norm_layer=norm_layer, use_dropout=use_dropout)
elif netG == 'unet_256':
net = UnetGenerator(input_nc, output_nc, 8, ngf, norm_layer=norm_layer, use_dropout=use_dropout)
else:
raise NotImplementedError('Generator model name [%s] is not recognized' % netG)
return init_net(net, init_type, init_gain, gpu_ids)
def define_D(input_nc, ndf, netD,
n_layers_D=3, norm='batch', use_sigmoid=False, init_type='normal', init_gain=0.02, gpu_ids=[]):
net = None
norm_layer = get_norm_layer(norm_type=norm)
if netD == 'basic':
net = NLayerDiscriminator(input_nc, ndf, n_layers=3, norm_layer=norm_layer, use_sigmoid=use_sigmoid)
elif netD == 'n_layers':
net = NLayerDiscriminator(input_nc, ndf, n_layers_D, norm_layer=norm_layer, use_sigmoid=use_sigmoid)
elif netD == 'pixel':
net = PixelDiscriminator(input_nc, ndf, norm_layer=norm_layer, use_sigmoid=use_sigmoid)
else:
raise NotImplementedError('Discriminator model name [%s] is not recognized' % net)
return init_net(net, init_type, init_gain, gpu_ids)
##############################################################################
# Classes
##############################################################################
# Defines the GAN loss which uses either LSGAN or the regular GAN.
# When LSGAN is used, it is basically same as MSELoss,
# but it abstracts away the need to create the target label tensor
# that has the same size as the input
class GANLoss(nn.Module):
def __init__(self, use_lsgan=True, target_real_label=1.0, target_fake_label=0.0):
super(GANLoss, self).__init__()
self.register_buffer('real_label', torch.tensor(target_real_label))
self.register_buffer('fake_label', torch.tensor(target_fake_label))
if use_lsgan:
self.loss = nn.MSELoss()
else:
self.loss = nn.BCELoss()
def get_target_tensor(self, input, target_is_real):
if target_is_real:
target_tensor = self.real_label
else:
target_tensor = self.fake_label
return target_tensor.expand_as(input)
def __call__(self, input, target_is_real):
target_tensor = self.get_target_tensor(input, target_is_real)
return self.loss(input, target_tensor)
# Defines the generator that consists of Resnet blocks between a few
# downsampling/upsampling operations.
# Code and idea originally from Justin Johnson's architecture.
# https://github.com/jcjohnson/fast-neural-style/
class ResnetGenerator(nn.Module):
def __init__(self, input_nc, output_nc, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False, n_blocks=6, padding_type='reflect'):
assert(n_blocks >= 0)
super(ResnetGenerator, self).__init__()
self.input_nc = input_nc
self.output_nc = output_nc
self.ngf = ngf
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
model = [nn.ReflectionPad2d(3),
nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0,
bias=use_bias),
norm_layer(ngf),
nn.ReLU(True)]
n_downsampling = 2
for i in range(n_downsampling):
mult = 2**i
model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3,
stride=2, padding=1, bias=use_bias),
norm_layer(ngf * mult * 2),
nn.ReLU(True)]
mult = 2**n_downsampling
for i in range(n_blocks):
model += [ResnetBlock(ngf * mult, padding_type=padding_type, norm_layer=norm_layer, use_dropout=use_dropout, use_bias=use_bias)]
for i in range(n_downsampling):
mult = 2**(n_downsampling - i)
model += [nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2),
kernel_size=3, stride=2,
padding=1, output_padding=1,
bias=use_bias),
norm_layer(int(ngf * mult / 2)),
nn.ReLU(True)]
model += [nn.ReflectionPad2d(3)]
model += [nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0)]
model += [nn.Tanh()]
self.model = nn.Sequential(*model)
def forward(self, input):
return self.model(input)
# Define a resnet block
class ResnetBlock(nn.Module):
def __init__(self, dim, padding_type, norm_layer, use_dropout, use_bias):
super(ResnetBlock, self).__init__()
self.conv_block = self.build_conv_block(dim, padding_type, norm_layer, use_dropout, use_bias)
def build_conv_block(self, dim, padding_type, norm_layer, use_dropout, use_bias):
conv_block = []
p = 0
if padding_type == 'reflect':
conv_block += [nn.ReflectionPad2d(1)]
elif padding_type == 'replicate':
conv_block += [nn.ReplicationPad2d(1)]
elif padding_type == 'zero':
p = 1
else:
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias),
norm_layer(dim),
nn.ReLU(True)]
if use_dropout:
conv_block += [nn.Dropout(0.5)]
p = 0
if padding_type == 'reflect':
conv_block += [nn.ReflectionPad2d(1)]
elif padding_type == 'replicate':
conv_block += [nn.ReplicationPad2d(1)]
elif padding_type == 'zero':
p = 1
else:
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias),
norm_layer(dim)]
return nn.Sequential(*conv_block)
def forward(self, x):
out = x + self.conv_block(x)
return out
# Defines the Unet generator.
# |num_downs|: number of downsamplings in UNet. For example,
# if |num_downs| == 7, image of size 128x128 will become of size 1x1
# at the bottleneck
class UnetGenerator(nn.Module):
def __init__(self, input_nc, output_nc, num_downs, ngf=64,
norm_layer=nn.BatchNorm2d, use_dropout=False):
super(UnetGenerator, self).__init__()
# construct unet structure
unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=None, norm_layer=norm_layer, innermost=True)
for i in range(num_downs - 5):
unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer, use_dropout=use_dropout)
unet_block = UnetSkipConnectionBlock(ngf * 4, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer)
unet_block = UnetSkipConnectionBlock(ngf * 2, ngf * 4, input_nc=None, submodule=unet_block, norm_layer=norm_layer)
unet_block = UnetSkipConnectionBlock(ngf, ngf * 2, input_nc=None, submodule=unet_block, norm_layer=norm_layer)
unet_block = UnetSkipConnectionBlock(output_nc, ngf, input_nc=input_nc, submodule=unet_block, outermost=True, norm_layer=norm_layer)
self.model = unet_block
def forward(self, input):
return self.model(input)
# Defines the submodule with skip connection.
# X -------------------identity---------------------- X
# |-- downsampling -- |submodule| -- upsampling --|
class UnetSkipConnectionBlock(nn.Module):
def __init__(self, outer_nc, inner_nc, input_nc=None,
submodule=None, outermost=False, innermost=False, norm_layer=nn.BatchNorm2d, use_dropout=False):
super(UnetSkipConnectionBlock, self).__init__()
self.outermost = outermost
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
if input_nc is None:
input_nc = outer_nc
downconv = nn.Conv2d(input_nc, inner_nc, kernel_size=4,
stride=2, padding=1, bias=use_bias)
downrelu = nn.LeakyReLU(0.2, True)
downnorm = norm_layer(inner_nc)
uprelu = nn.ReLU(True)
upnorm = norm_layer(outer_nc)
if outermost:
upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc,
kernel_size=4, stride=2,
padding=1)
down = [downconv]
up = [uprelu, upconv, nn.Tanh()]
model = down + [submodule] + up
elif innermost:
upconv = nn.ConvTranspose2d(inner_nc, outer_nc,
kernel_size=4, stride=2,
padding=1, bias=use_bias)
down = [downrelu, downconv]
up = [uprelu, upconv, upnorm]
model = down + up
else:
upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc,
kernel_size=4, stride=2,
padding=1, bias=use_bias)
down = [downrelu, downconv, downnorm]
up = [uprelu, upconv, upnorm]
if use_dropout:
model = down + [submodule] + up + [nn.Dropout(0.5)]
else:
model = down + [submodule] + up
self.model = nn.Sequential(*model)
def forward(self, x):
if self.outermost:
return self.model(x)
else:
return torch.cat([x, self.model(x)], 1)
# Defines the PatchGAN discriminator with the specified arguments.
class NLayerDiscriminator(nn.Module):
def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d, use_sigmoid=False):
super(NLayerDiscriminator, self).__init__()
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
kw = 4
padw = 1
sequence = [
nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw),
nn.LeakyReLU(0.2, True)
]
nf_mult = 1
nf_mult_prev = 1
for n in range(1, n_layers):
nf_mult_prev = nf_mult
nf_mult = min(2**n, 8)
sequence += [
nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult,
kernel_size=kw, stride=2, padding=padw, bias=use_bias),
norm_layer(ndf * nf_mult),
nn.LeakyReLU(0.2, True)
]
nf_mult_prev = nf_mult
nf_mult = min(2**n_layers, 8)
sequence += [
nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult,
kernel_size=kw, stride=1, padding=padw, bias=use_bias),
norm_layer(ndf * nf_mult),
nn.LeakyReLU(0.2, True)
]
sequence += [nn.Conv2d(ndf * nf_mult, 1, kernel_size=kw, stride=1, padding=padw)]
if use_sigmoid:
sequence += [nn.Sigmoid()]
self.model = nn.Sequential(*sequence)
def forward(self, input):
return self.model(input)
class PixelDiscriminator(nn.Module):
def __init__(self, input_nc, ndf=64, norm_layer=nn.BatchNorm2d, use_sigmoid=False):
super(PixelDiscriminator, self).__init__()
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
self.net = [
nn.Conv2d(input_nc, ndf, kernel_size=1, stride=1, padding=0),
nn.LeakyReLU(0.2, True),
nn.Conv2d(ndf, ndf * 2, kernel_size=1, stride=1, padding=0, bias=use_bias),
norm_layer(ndf * 2),
nn.LeakyReLU(0.2, True),
nn.Conv2d(ndf * 2, 1, kernel_size=1, stride=1, padding=0, bias=use_bias)]
if use_sigmoid:
self.net.append(nn.Sigmoid())
self.net = nn.Sequential(*self.net)
def forward(self, input):
return self.net(input)
| 15,748 | 40.013021 | 151 | py |
NeuralVoicePuppetry | NeuralVoicePuppetry-master/Audio2ExpressionNet/Inference/models/audio2ExpressionsAttentionTMP4_model.py | import os
import torch
import torch.nn as nn
import torchvision.transforms as transforms
from util.image_pool import ImagePool
from .base_model import BaseModel
from . import networks
import numpy as np
import functools
from BaselModel.basel_model import *
INVALID_UV = -1.0
from torchvision import models
from collections import namedtuple
class ExpressionEstimator_Attention(nn.Module):
def __init__(self, n_output_expressions, nIdentities, seq_len, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False):
super(ExpressionEstimator_Attention, self).__init__()
print('Estimator Attention')
#################################
######## audio net ##########
#################################
self.seq_len = seq_len
dropout_rate = 0.0
if use_dropout == True:
#dropout_rate = 0.5
dropout_rate = 0.25
self.convNet = nn.Sequential(
nn.Conv2d(29, 32, kernel_size=(3,1), stride=(2,1), padding=(1,0), bias=True), # 29 x 16 x 1 => 32 x 8 x 1
nn.LeakyReLU(0.02, True),
nn.Conv2d(32, 32, kernel_size=(3,1), stride=(2,1), padding=(1,0), bias=True), # 32 x 8 x 1 => 32 x 4 x 1
nn.LeakyReLU(0.02, True),
nn.Conv2d(32, 64, kernel_size=(3,1), stride=(2,1), padding=(1,0), bias=True), # 32 x 4 x 1 => 64 x 2 x 1
nn.LeakyReLU(0.2, True),
nn.Conv2d(64, 64, kernel_size=(3,1), stride=(2,1), padding=(1,0), bias=True), # 64 x 2 x 1 => 64 x 1 x 1
nn.LeakyReLU(0.2, True),
)
fullNet_input_size = 64
self.subspace_dim = 32 # number of audio expressions
print('fullNet_input_size: ', fullNet_input_size)
self.fullNet = nn.Sequential(
nn.Linear(in_features = fullNet_input_size, out_features=128, bias = True),
nn.LeakyReLU(0.02),
nn.Linear(in_features = 128, out_features=64, bias = True),
nn.LeakyReLU(0.02),
nn.Linear(in_features = 64, out_features=self.subspace_dim, bias = True),
nn.Tanh()
)
# mapping from subspace to full expression space
self.register_parameter('mapping', torch.nn.Parameter(torch.randn(1, nIdentities, N_EXPRESSIONS, self.subspace_dim, requires_grad=True)))
# attention
self.attentionConvNet = nn.Sequential( # b x subspace_dim x seq_len
nn.Conv1d(self.subspace_dim, 16, kernel_size=3, stride=1, padding=1, bias=True),
nn.LeakyReLU(0.02, True),
nn.Conv1d(16, 8, kernel_size=3, stride=1, padding=1, bias=True),
nn.LeakyReLU(0.02, True),
nn.Conv1d(8, 4, kernel_size=3, stride=1, padding=1, bias=True),
nn.LeakyReLU(0.02, True),
nn.Conv1d(4, 2, kernel_size=3, stride=1, padding=1, bias=True),
nn.LeakyReLU(0.02, True),
nn.Conv1d(2, 1, kernel_size=3, stride=1, padding=1, bias=True),
nn.LeakyReLU(0.02, True)
)
self.attentionNet = nn.Sequential(
nn.Linear(in_features = self.seq_len, out_features=self.seq_len, bias = True),
nn.Softmax(dim=1)
)
#self.hidden2subspace = nn.Linear(self.subspace_dim,self.subspace_dim)
def forward_internal(self, audio_features_sequence, identity_id):
result_subspace, intermediate_expression = self.getAudioExpressions_internal(audio_features_sequence)
mapping = torch.index_select(self.mapping[0], dim = 0, index = identity_id)
result = 10.0 * torch.bmm(mapping, result_subspace)[:,:,0]
result_intermediate = 10.0 * torch.bmm(mapping, intermediate_expression)[:,:,0]
#exit(-1)
return result, result_intermediate
def forward(self, audio_features_sequence, identity_id):
result_subspace = self.getAudioExpressions(audio_features_sequence)
mapping = torch.index_select(self.mapping[0], dim = 0, index = identity_id)
result = torch.bmm(mapping, result_subspace)[:,:,0]
#exit(-1)
return 10.0 * result
def getAudioExpressions_internal(self, audio_features_sequence):
# audio_features_sequence: b x seq_len x 16 x 29
b = audio_features_sequence.shape[0] # batchsize
audio_features_sequence = audio_features_sequence.view(b * self.seq_len, 1, 16, 29) # b * seq_len x 1 x 16 x 29
audio_features_sequence = torch.transpose(audio_features_sequence, 1, 3) # b* seq_len x 29 x 16 x 1
conv_res = self.convNet( audio_features_sequence )
conv_res = torch.reshape( conv_res, (b * self.seq_len, 1, -1))
result_subspace = self.fullNet(conv_res)[:,0,:] # b * seq_len x subspace_dim
result_subspace = result_subspace.view(b, self.seq_len, self.subspace_dim)# b x seq_len x subspace_dim
#################
### attention ###
#################
result_subspace_T = torch.transpose(result_subspace, 1, 2) # b x subspace_dim x seq_len
intermediate_expression = result_subspace_T[:,:,(self.seq_len // 2):(self.seq_len // 2) + 1]
att_conv_res = self.attentionConvNet(result_subspace_T)
#print('att_conv_res', att_conv_res.shape)
attention = self.attentionNet(att_conv_res.view(b, self.seq_len)).view(b, self.seq_len, 1) # b x seq_len x 1
#print('attention', attention.shape)
# pooling along the sequence dimension
result_subspace = torch.bmm(result_subspace_T, attention)
#print('result_subspace', result_subspace.shape)
###
return result_subspace.view(b, self.subspace_dim, 1), intermediate_expression
def getAudioExpressions(self, audio_features_sequence):
expr, _ = self.getAudioExpressions_internal(audio_features_sequence)
return expr
def regularizer(self):
#reg = torch.norm(self.mapping)
reg_mapping = torch.mean(torch.abs(self.mapping))
# one could also enforce orthogonality here
# s_browExpressions[] = { 32, 41, 71, 72, 73, 74, 75 };
reg_eye_brow = torch.mean(torch.abs( self.mapping[0,:,[32, 41, 71, 72, 73, 74, 75],:] ))
#return 0.01 * reg_mapping + 1.0 * reg_eye_brow
return 0.0 * reg_mapping
def define_ExpressionEstimator(estimatorType='estimatorDefault', nIdentities=1, seq_len=1, norm='batch', use_dropout=False, init_type='normal', init_gain=0.02, gpu_ids=[]):
net = None
print('EstimatorType: ', estimatorType)
if estimatorType=='estimatorAttention': net = ExpressionEstimator_Attention(N_EXPRESSIONS,nIdentities, seq_len)
return networks.init_net(net, init_type, init_gain, gpu_ids)
class Audio2ExpressionsAttentionTMP4Model(BaseModel):
def name(self):
return 'Audio2ExpressionsAttentionTMP4Model'
@staticmethod
def modify_commandline_options(parser, is_train=True):
# changing the default values to match the pix2pix paper
# (https://phillipi.github.io/pix2pix/)
#parser.set_defaults(norm='batch', netG='unet_256')
parser.set_defaults(norm='instance', netG='unet_256')
parser.set_defaults(dataset_mode='aligned')
if is_train:
parser.set_defaults(pool_size=0, no_lsgan=True)
parser.add_argument('--lambda_L1', type=float, default=100.0, help='weight for L1 loss')
return parser
def initialize(self, opt):
BaseModel.initialize(self, opt)
self.isTrain = opt.isTrain
self.trainRenderer = not opt.fix_renderer
# specify the training losses you want to print out. The program will call base_model.get_current_losses
self.loss_names = ['G_L1','G_L1_ABSOLUTE','G_L1_RELATIVE', 'G_Regularizer']
# specify the images you want to save/display. The program will call base_model.get_current_visuals
#self.visual_names = ['input_uv', 'fake', 'target']
self.visual_names = ['zeros']
self.zeros = torch.zeros(1,3,2,2)
# specify the models you want to save to the disk. The program will call base_model.save_networks and base_model.load_networks
if self.isTrain:
self.model_names = ['netG']
else: # during test time, only load Gs
self.model_names = ['netG']
self.fake_expressions = None
self.fake_expressions_prv = None
self.fake_expressions_nxt = None
if self.isTrain:
self.morphable_model = MorphableModel()
self.mask = self.morphable_model.LoadMask()
nIdentities=opt.nTrainObjects
# load/define networks
self.netG = define_ExpressionEstimator(estimatorType=opt.rendererType, nIdentities=nIdentities, seq_len=opt.seq_len, gpu_ids=self.gpu_ids)
if self.isTrain:
use_sigmoid = opt.no_lsgan
self.fake_AB_pool = ImagePool(opt.pool_size)
# define loss functions
self.criterionL1 = torch.nn.L1Loss()
self.criterionL1Smooth = torch.nn.SmoothL1Loss()
self.criterionL2 = torch.nn.MSELoss()
# initialize optimizers
self.optimizers = []
#self.optimizer_G = torch.optim.Adam(self.netG.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999), weight_decay=0.1 )#10.0)
self.optimizer_G = torch.optim.Adam(self.netG.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999), weight_decay=0.0 )#10.0)
self.optimizers.append(self.optimizer_G)
def set_input(self, input):
self.image_paths = input['paths']
self.expressions = input['expressions'].cuda()
self.audio_features = input['audio_deepspeech'].cuda() # b x seq_len x 16 x 29
if self.isTrain:
self.expressions_prv = input['expressions_prv'].cuda()
self.audio_features_prv = input['audio_deepspeech_prv'].cuda() # b x seq_len x 16 x 29
self.expressions_nxt = input['expressions_nxt'].cuda()
self.audio_features_nxt = input['audio_deepspeech_nxt'].cuda() # b x seq_len x 16 x 29
self.target_id = input['target_id'].cuda()
def forward(self):
# estimate expressions
if self.opt.output_audio_expressions: #self.opt.dataset_mode=='audio':
self.fake_expressions = self.netG.getAudioExpressions(self.audio_features)
if self.isTrain:
self.fake_expressions_prv = self.netG.getAudioExpressions(self.audio_features_prv)
self.fake_expressions_nxt = self.netG.getAudioExpressions(self.audio_features_nxt)
else:
self.fake_expressions, self.fake_expressions_intermediate = self.netG.forward_internal(self.audio_features, self.target_id)
if self.isTrain:
self.fake_expressions_prv = self.netG(self.audio_features_prv, self.target_id)
self.fake_expressions_nxt = self.netG(self.audio_features_nxt, self.target_id)
def backward_G(self, epoch):
# Second, G(A) = B
#self.loss_G_L1 = self.criterionL1(self.fake_expressions, self.expressions)
# difference in vertex space
mask = torch.cat([self.mask[:,None],self.mask[:,None],self.mask[:,None]], 1)
mask = mask + 0.1 * torch.ones_like(mask) # priority for the mask region, but other region should also be constrained
# absolute (single timesteps)
diff_expression = self.fake_expressions - self.expressions
diff_vertices = self.morphable_model.compute_expression_delta(diff_expression)
diff_expression_intermediate = self.fake_expressions_intermediate - self.expressions
diff_vertices_intermediate = self.morphable_model.compute_expression_delta(diff_expression_intermediate)
diff_expression_prv = self.fake_expressions_prv - self.expressions_prv
diff_vertices_prv = self.morphable_model.compute_expression_delta(diff_expression_prv)
diff_expression_nxt = self.fake_expressions_nxt - self.expressions_nxt
diff_vertices_nxt = self.morphable_model.compute_expression_delta(diff_expression_nxt)
# relative (temporal 1 timestep) cur - nxt and prv - cur
diff_expression_tmp_cur_nxt = (self.fake_expressions - self.fake_expressions_nxt) - (self.expressions - self.expressions_nxt)
diff_vertices_tmp_cur_nxt = self.morphable_model.compute_expression_delta(diff_expression_tmp_cur_nxt)
diff_expression_tmp_prv_cur = (self.fake_expressions_prv - self.fake_expressions) - (self.expressions_prv - self.expressions)
diff_vertices_tmp_prv_cur = self.morphable_model.compute_expression_delta(diff_expression_tmp_prv_cur)
# relative (temporal 2 timesteps) nxt - prv
diff_expression_tmp_nxt_prv = (self.fake_expressions_nxt - self.fake_expressions_prv) - (self.expressions_nxt - self.expressions_prv)
diff_vertices_tmp_nxt_prv = self.morphable_model.compute_expression_delta(diff_expression_tmp_nxt_prv)
#print('mask: ', mask.shape)
#print('diff_vertices: ', diff_vertices.shape)
self.loss_G_L1_ABSOLUTE = 0.0
self.loss_G_L1_RELATIVE = 0.0
if self.opt.lossType == 'L1':
self.loss_G_L1_ABSOLUTE += 1000.0 * torch.mean(mask * torch.abs(diff_vertices))
self.loss_G_L1_ABSOLUTE += 1000.0 * torch.mean(mask * torch.abs(diff_vertices_prv))
self.loss_G_L1_ABSOLUTE += 1000.0 * torch.mean(mask * torch.abs(diff_vertices_nxt))
self.loss_G_L1_ABSOLUTE += 3000.0 * torch.mean(mask * torch.abs(diff_vertices_intermediate))
self.loss_G_L1_RELATIVE += 20000.0 * torch.mean(mask * torch.abs(diff_vertices_tmp_cur_nxt))
self.loss_G_L1_RELATIVE += 20000.0 * torch.mean(mask * torch.abs(diff_vertices_tmp_prv_cur))
self.loss_G_L1_RELATIVE += 20000.0 * torch.mean(mask * torch.abs(diff_vertices_tmp_nxt_prv))
elif self.opt.lossType == 'L2':
self.loss_G_L1_ABSOLUTE += 1000.0 * torch.mean(mask * diff_vertices * diff_vertices)
self.loss_G_L1_ABSOLUTE += 1000.0 * torch.mean(mask * diff_vertices_prv * diff_vertices_prv)
self.loss_G_L1_ABSOLUTE += 1000.0 * torch.mean(mask * diff_vertices_nxt * diff_vertices_nxt)
self.loss_G_L1_ABSOLUTE += 3000.0 * torch.mean(mask * diff_vertices_intermediate * diff_vertices_intermediate)
self.loss_G_L1_RELATIVE += 20000.0 * torch.mean(mask * diff_vertices_tmp_cur_nxt * diff_vertices_tmp_cur_nxt)
self.loss_G_L1_RELATIVE += 20000.0 * torch.mean(mask * diff_vertices_tmp_prv_cur * diff_vertices_tmp_prv_cur)
self.loss_G_L1_RELATIVE += 20000.0 * torch.mean(mask * diff_vertices_tmp_nxt_prv * diff_vertices_tmp_nxt_prv)
elif self.opt.lossType == 'RMS':
self.loss_G_L1_ABSOLUTE += 1000.0 * torch.sqrt(torch.mean(mask * diff_vertices * diff_vertices))
self.loss_G_L1_ABSOLUTE += 1000.0 * torch.sqrt(torch.mean(mask * diff_vertices_prv * diff_vertices_prv))
self.loss_G_L1_ABSOLUTE += 1000.0 * torch.sqrt(torch.mean(mask * diff_vertices_nxt * diff_vertices_nxt))
self.loss_G_L1_ABSOLUTE += 3000.0 * torch.sqrt(torch.mean(mask * diff_vertices_intermediate * diff_vertices_intermediate))
self.loss_G_L1_RELATIVE += 20000.0 * torch.sqrt(torch.mean(mask * diff_vertices_tmp_cur_nxt * diff_vertices_tmp_cur_nxt))
self.loss_G_L1_RELATIVE += 20000.0 * torch.sqrt(torch.mean(mask * diff_vertices_tmp_prv_cur * diff_vertices_tmp_prv_cur))
self.loss_G_L1_RELATIVE += 20000.0 * torch.sqrt(torch.mean(mask * diff_vertices_tmp_nxt_prv * diff_vertices_tmp_nxt_prv))
else: # L1
self.loss_G_L1_ABSOLUTE += 1000.0 * torch.mean(mask * torch.abs(diff_vertices))
self.loss_G_L1_ABSOLUTE += 1000.0 * torch.mean(mask * torch.abs(diff_vertices_prv))
self.loss_G_L1_ABSOLUTE += 1000.0 * torch.mean(mask * torch.abs(diff_vertices_nxt))
self.loss_G_L1_ABSOLUTE += 3000.0 * torch.mean(mask * torch.abs(diff_vertices_intermediate))
self.loss_G_L1_RELATIVE += 20000.0 * torch.mean(mask * torch.abs(diff_vertices_tmp_cur_nxt))
self.loss_G_L1_RELATIVE += 20000.0 * torch.mean(mask * torch.abs(diff_vertices_tmp_prv_cur))
self.loss_G_L1_RELATIVE += 20000.0 * torch.mean(mask * torch.abs(diff_vertices_tmp_nxt_prv))
self.loss_G_L1 = self.loss_G_L1_ABSOLUTE + self.loss_G_L1_RELATIVE
self.loss_G_Regularizer = self.netG.regularizer()
self.loss_G = self.loss_G_L1 + self.loss_G_Regularizer #self.loss_G_GAN + self.loss_G_L1 + self.regularizerTex
self.loss_G.backward()
def optimize_parameters(self, epoch_iter):
self.forward()
# update Generator
self.optimizer_G.zero_grad()
self.backward_G(epoch_iter)
self.optimizer_G.step()
| 16,974 | 47.5 | 172 | py |
NeuralVoicePuppetry | NeuralVoicePuppetry-master/Audio2ExpressionNet/Inference/util/image_pool.py | import random
import torch
class ImagePool():
def __init__(self, pool_size):
self.pool_size = pool_size
if self.pool_size > 0:
self.num_imgs = 0
self.images = []
def query(self, images):
if self.pool_size == 0:
return images
return_images = []
for image in images:
image = torch.unsqueeze(image.data, 0)
if self.num_imgs < self.pool_size:
self.num_imgs = self.num_imgs + 1
self.images.append(image)
return_images.append(image)
else:
p = random.uniform(0, 1)
if p > 0.5:
random_id = random.randint(0, self.pool_size - 1) # randint is inclusive
tmp = self.images[random_id].clone()
self.images[random_id] = image
return_images.append(tmp)
else:
return_images.append(image)
return_images = torch.cat(return_images, 0)
return return_images
| 1,072 | 31.515152 | 93 | py |
NeuralVoicePuppetry | NeuralVoicePuppetry-master/Audio2ExpressionNet/Inference/util/util.py | from __future__ import print_function
import torch
import numpy as np
from PIL import Image
import os
import sys
import array
import OpenEXR
import Imath
def load_exr(image_path):
# Open the input file
file = OpenEXR.InputFile(image_path)
# Compute the size
dw = file.header()['dataWindow']
w, h = (dw.max.x - dw.min.x + 1, dw.max.y - dw.min.y + 1)
# Read the three color channels as 32-bit floats
FLOAT = Imath.PixelType(Imath.PixelType.FLOAT)
#(R,G,B) = [np.array(array.array('f', file.channel(Chan, FLOAT)).tolist()).reshape((w, h, 1)) for Chan in ("R", "G", "B") ]
(r, g, b) = file.channels("RGB")
R = np.array(array.array('f', r).tolist()).reshape((w, h, 1))
G = np.array(array.array('f', g).tolist()).reshape((w, h, 1))
B = np.array(array.array('f', b).tolist()).reshape((w, h, 1))
return np.concatenate((R, G, B), axis=2)
# Converts a Tensor into an image array (numpy)
# |imtype|: the desired type of the converted numpy array
def tensor2im(input_image, imtype=np.uint8):
if isinstance(input_image, torch.Tensor):
input_image = torch.clamp(input_image, -1.0, 1.0)
image_tensor = input_image.data
else:
return input_image
image_numpy = image_tensor[0].cpu().float().numpy()
if image_numpy.shape[0] == 1:
image_numpy = np.tile(image_numpy, (3, 1, 1))
image_numpy = (np.transpose(image_numpy, (1, 2, 0)) + 1) / 2.0 * 255.0
return image_numpy.astype(imtype)
def diagnose_network(net, name='network'):
mean = 0.0
count = 0
for param in net.parameters():
if param.grad is not None:
mean += torch.mean(torch.abs(param.grad.data))
count += 1
if count > 0:
mean = mean / count
print(name)
print(mean)
def save_image(image_numpy, image_path):
image_pil = Image.fromarray(image_numpy)
image_pil.save(image_path)
def print_numpy(x, val=True, shp=False):
x = x.astype(np.float64)
if shp:
print('shape,', x.shape)
if val:
x = x.flatten()
print('mean = %3.3f, min = %3.3f, max = %3.3f, median = %3.3f, std=%3.3f' % (
np.mean(x), np.min(x), np.max(x), np.median(x), np.std(x)))
def mkdirs(paths):
if isinstance(paths, list) and not isinstance(paths, str):
for path in paths:
mkdir(path)
else:
mkdir(paths)
def mkdir(path):
if not os.path.exists(path):
os.makedirs(path)
| 2,454 | 28.578313 | 127 | py |
NeuralVoicePuppetry | NeuralVoicePuppetry-master/Audio2ExpressionNet/Inference/BaselModel/basel_model.py | import os
import torch
import torch.nn as nn
import torchvision.transforms as transforms
import numpy as np
#########################
N_EXPRESSIONS=76 # <<<<<< NEEDS TO BE SPECIFIED ACCORDING TO THE USED FACE MODEL
#########################
#import soft_renderer as sr
#
#class MorphableModel(nn.Module):
# def __init__(self, filename_average=''):
# super(MorphableModel, self).__init__()
#
# print('Load Morphable Model (Basel)')
#
# #filename_mesh = os.path.join(opt.dataroot, opt.phase + '/average_model.obj')
# filename_mesh = filename_average
# if filename_average=='':
# print('use default identity')
# filename_mesh = './BaselModel/average.obj'
# mesh = sr.Mesh.from_obj(filename_mesh, normalization=False, load_texture=True)
# self.average_vertices = mesh.vertices[0]
# self.faces = mesh.faces[0]
# self.average_vertices = self.average_vertices[None, :, :] # [num_vertices, XYZ] -> [batch_size=1, num_vertices, XYZ]
# self.faces = self.faces[None, :, :] # [num_faces, 3] -> [batch_size=1, num_faces, 3]
# self.textures = mesh.textures
#
# self.num_vertices = self.average_vertices.shape[1]
# self.num_faces = self.faces.shape[1]
# print('vertices:', self.average_vertices.shape)
# print('faces:', self.faces.shape)
#
# ## basis function
# self.expression_basis = np.memmap('./BaselModel/ExpressionBasis.matrix', dtype='float32', mode='r').__array__()[1:] # first entry is the size
# self.expression_basis = np.resize(self.expression_basis, (N_EXPRESSIONS, self.num_vertices, 4))[:,:,0:3]
# self.expression_basis = torch.tensor(self.expression_basis.astype(np.float32)).cuda() # N_EXPRESSIONS x num_vertices x 3
# self.expression_basis = torch.transpose(self.expression_basis,0,2) # transpose for matmul
# print('expression_basis', self.expression_basis.shape)
#
# # default expression
# zeroExpr = torch.zeros(1, N_EXPRESSIONS, dtype=torch.float32).cuda()
# self.morph(zeroExpr)
#
#
# def save_model_to_obj_file(self, filename, mask=None):
# faces_cpu = self.faces.detach().cpu().numpy()
# vertices_cpu = self.vertices.detach().cpu().numpy()
#
# mask_cpu = None
# if not type(mask) == type(None):
# mask_cpu = mask.detach().cpu().numpy()
#
# f = open(filename, 'w')
# if type(mask) == type(None):
# for i in range(0, self.num_vertices):
# f.write('v ' + str(vertices_cpu[0, i, 0]) + ' ' + str(vertices_cpu[0, i, 1]) + ' ' + str(vertices_cpu[0, i, 2]) + '\n')
# else:
# for i in range(0, self.num_vertices):
# f.write('v ' + str(vertices_cpu[0, i, 0]) + ' ' + str(vertices_cpu[0, i, 1]) + ' ' + str(vertices_cpu[0, i, 2]) + ' ' + str(mask_cpu[i]) + ' ' + str(mask_cpu[i]) + ' ' + str(mask_cpu[i]) + ' 1'+ '\n')
#
# for i in range(0, self.num_faces):
# f.write('f ' + str(faces_cpu[0, i, 0]+1) + '// ' + str(faces_cpu[0, i, 1]+1) + '// ' + str(faces_cpu[0, i, 2]+1) + '//\n')
#
# f.close()
#
# def compute_expression_delta(self, expressions):
# return torch.transpose(torch.matmul(self.expression_basis, torch.transpose(expressions, 0,1)), 0, 2) # note that matmul wants to have this order: (a x b x c) x (c x m) => (a x b x m)
#
# def morph(self, expressions):
# self.vertices = self.average_vertices + self.compute_expression_delta(expressions)
# return self.vertices
#
#
#
# def LoadMask(self, filename=''):
# if filename=='':
# print('use default mask')
# filename = './BaselModel/mask/defaultMask_mouth.obj'
#
# mask = np.zeros(self.num_vertices)
# file = open(filename, 'r')
# i=0
# for line in file:
# if line[0] == 'v':
# floats = [float(x) for x in line[1:].split()]
# if floats[3] == 1.0 and floats[4] == 0.0 and floats[5] == 0.0:
# mask[i] = 1.0
# i += 1
# file.close()
# return torch.tensor(mask.astype(np.float32)).cuda() | 4,171 | 43.860215 | 222 | py |
NeuralVoicePuppetry | NeuralVoicePuppetry-master/Audio2ExpressionNet/Inference/data/audio_dataset.py | import os.path
import random
import torchvision.transforms as transforms
import torch
import numpy as np
from data.base_dataset import BaseDataset
from data.audio import Audio
#from data.image_folder import make_dataset
from PIL import Image
def make_dataset(dir):
images = []
ids = []
assert os.path.isdir(dir), '%s is not a valid directory' % dir
for root, _, fnames in sorted(os.walk(dir)):
for fname in fnames:
if any(fname.endswith(extension) for extension in ['.deepspeech.npy']):
id_str = fname[:-15]
i = int(id_str)
ids.append(i)
ids = sorted(ids)
for id in ids:
fname=str(id)+'.deepspeech.npy'
path = os.path.join(root, fname)
images.append(path)
return images
def make_ids(paths, root_dir):
ids = []
for fname in paths:
l = fname.rfind('/')
#id_str = fname[l+1:-4]
id_str = fname[l+1:-15]
i = int(id_str)
#print(fname, ': ', i)
ids.append(i)
return ids
class AudioDataset(BaseDataset):
@staticmethod
def modify_commandline_options(parser, is_train):
return parser
def initialize(self, opt):
self.opt = opt
self.root = opt.dataroot
self.data_dir = os.path.join(opt.dataroot, 'audio_feature')
self.frame_paths = make_dataset(self.data_dir)
self.frame_ids = make_ids(self.frame_paths, self.root)
opt.nObjects = 1
opt.nTrainObjects = 116 # TODO
opt.nTestObjects = 1
opt.test_sequence_names = [[opt.dataroot.split("/")[-1], 'test']]
assert(opt.resize_or_crop == 'resize_and_crop')
if opt.isTrain:
print('ERROR: audio_dataset only allowed for test')
exit()
def getSampleWeights(self):
weights = np.ones((len(self.frame_paths)))
return weights
def getAudioFilename(self):
return os.path.join(self.root, 'audio.wav')
def getAudioFeatureFilename(self, idx):
return self.frame_paths[idx % len(self.frame_paths)]
def __getitem__(self, index):
#print('GET ITEM: ', index)
frame_path = self.frame_paths[index]
frame_id = self.frame_ids[index]
# load deepspeech feature
feature_array = np.load(frame_path)
dsf_np = np.resize(feature_array, (16,29,1))
dsf = transforms.ToTensor()(dsf_np.astype(np.float32))
# load sequence data if necessary
if self.opt.look_ahead:# use prev and following frame infos
r = self.opt.seq_len//2
for i in range(1,r): # prev frames
index_seq = index - i
if index_seq < 0: index_seq = 0
dsf_fname = self.frame_paths[index_seq]
feature_array = np.load(dsf_fname)
dsf_np = np.resize(feature_array, (16,29,1))
dsf_seq = transforms.ToTensor()(dsf_np.astype(np.float32)) # 1 x 16 x 29
dsf = torch.cat([dsf_seq, dsf], 0) # seq_len x 16 x 29
# note the ordering [old ... current]
for i in range(1,self.opt.seq_len - r + 1): # following frames
index_seq = index + i
max_idx = len(self.frame_paths)-1
if index_seq > max_idx: index_seq = max_idx
dsf_fname = self.frame_paths[index_seq]
feature_array = np.load(dsf_fname)
dsf_np = np.resize(feature_array, (16,29,1))
dsf_seq = transforms.ToTensor()(dsf_np.astype(np.float32)) # 1 x 16 x 29
dsf = torch.cat([dsf, dsf_seq], 0) # seq_len x 16 x 29
# note the ordering [old ... current ... future]
else:
for i in range(1,self.opt.seq_len):
index_seq = index - i
if index_seq < 0: index_seq = 0
dsf_fname = self.frame_paths[index_seq]
feature_array = np.load(dsf_fname)
dsf_np = np.resize(feature_array, (16,29,1))
dsf_seq = transforms.ToTensor()(dsf_np.astype(np.float32)) # 1 x 16 x 29
dsf = torch.cat([dsf_seq, dsf], 0) # seq_len x 16 x 29
# note the ordering [old ... current]
#################################
zeroIdentity = torch.zeros(100)
zeroExpressions = torch.zeros(76)
target_id = -1
internal_sequence_id = 0
weight = 1.0 / len(self.frame_paths)
return {'paths': frame_path,
'expressions': zeroExpressions,
'identity': zeroIdentity,
'intrinsics': np.zeros((4)),
'extrinsics': np.zeros((4,4)),
'audio_deepspeech': dsf, # deepspeech feature
'target_id':target_id,
'internal_id':internal_sequence_id,
'weight': np.array([weight]).astype(np.float32)}
def __len__(self):
return len(self.frame_paths)
def name(self):
return 'AudioDataset'
| 5,048 | 33.582192 | 88 | py |
NeuralVoicePuppetry | NeuralVoicePuppetry-master/Audio2ExpressionNet/Inference/data/base_dataset.py | import torch.utils.data as data
from PIL import Image
import torchvision.transforms as transforms
class BaseDataset(data.Dataset):
def __init__(self):
super(BaseDataset, self).__init__()
def name(self):
return 'BaseDataset'
@staticmethod
def modify_commandline_options(parser, is_train):
return parser
def initialize(self, opt):
pass
def getSampleWeights(self):
return torch.ones((len(self)))
def __len__(self):
return 0
def get_transform(opt):
transform_list = []
if opt.resize_or_crop == 'resize_and_crop':
osize = [opt.loadSize, opt.loadSize]
transform_list.append(transforms.Resize(osize, Image.BICUBIC))
transform_list.append(transforms.RandomCrop(opt.fineSize))
elif opt.resize_or_crop == 'crop':
transform_list.append(transforms.RandomCrop(opt.fineSize))
elif opt.resize_or_crop == 'scale_width':
transform_list.append(transforms.Lambda(
lambda img: __scale_width(img, opt.fineSize)))
elif opt.resize_or_crop == 'scale_width_and_crop':
transform_list.append(transforms.Lambda(
lambda img: __scale_width(img, opt.loadSize)))
transform_list.append(transforms.RandomCrop(opt.fineSize))
elif opt.resize_or_crop == 'none':
transform_list.append(transforms.Lambda(
lambda img: __adjust(img)))
else:
raise ValueError('--resize_or_crop %s is not a valid option.' % opt.resize_or_crop)
if opt.isTrain and not opt.no_flip:
transform_list.append(transforms.RandomHorizontalFlip())
transform_list += [transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5),
(0.5, 0.5, 0.5))]
return transforms.Compose(transform_list)
# just modify the width and height to be multiple of 4
def __adjust(img):
ow, oh = img.size
# the size needs to be a multiple of this number,
# because going through generator network may change img size
# and eventually cause size mismatch error
mult = 4
if ow % mult == 0 and oh % mult == 0:
return img
w = (ow - 1) // mult
w = (w + 1) * mult
h = (oh - 1) // mult
h = (h + 1) * mult
if ow != w or oh != h:
__print_size_warning(ow, oh, w, h)
return img.resize((w, h), Image.BICUBIC)
def __scale_width(img, target_width):
ow, oh = img.size
# the size needs to be a multiple of this number,
# because going through generator network may change img size
# and eventually cause size mismatch error
mult = 4
assert target_width % mult == 0, "the target width needs to be multiple of %d." % mult
if (ow == target_width and oh % mult == 0):
return img
w = target_width
target_height = int(target_width * oh / ow)
m = (target_height - 1) // mult
h = (m + 1) * mult
if target_height != h:
__print_size_warning(target_width, target_height, w, h)
return img.resize((w, h), Image.BICUBIC)
def __print_size_warning(ow, oh, w, h):
if not hasattr(__print_size_warning, 'has_printed'):
print("The image size needs to be a multiple of 4. "
"The loaded image size was (%d, %d), so it was adjusted to "
"(%d, %d). This adjustment will be done to all images "
"whose sizes are not multiples of 4" % (ow, oh, w, h))
__print_size_warning.has_printed = True
| 3,469 | 31.735849 | 91 | py |
NeuralVoicePuppetry | NeuralVoicePuppetry-master/Audio2ExpressionNet/Inference/data/multi_face_audio_eq_tmp_cached_dataset.py | import os.path
import random
import torchvision.transforms as transforms
import torch
import numpy as np
from data.base_dataset import BaseDataset
from data.audio import Audio
#from data.image_folder import make_dataset
from PIL import Image
import progressbar
#def make_dataset(dir):
# images = []
# assert os.path.isdir(dir), '%s is not a valid directory' % dir
# for root, _, fnames in sorted(os.walk(dir)):
# for fname in fnames:
# if any(fname.endswith(extension) for extension in ['.bin', '.BIN']):
# path = os.path.join(root, fname)
# images.append(path)
# return sorted(images)
def make_dataset(dir):
images = []
ids = []
assert os.path.isdir(dir), '%s is not a valid directory' % dir
for root, _, fnames in sorted(os.walk(dir)):
for fname in fnames:
if any(fname.endswith(extension) for extension in ['.npy', '.NPY']):
#.deepspeech.npy
id_str = fname[:-15] #4]
i = int(id_str)
ids.append(i)
ids = sorted(ids)
for id in ids:
fname=str(id)+'.deepspeech.npy'
path = os.path.join(root, fname)
images.append(path)
return images
def make_ids(paths, root_dir):
ids = []
for fname in paths:
l = fname.rfind('/')
id_str = fname[l+1:-15]#4]
i = int(id_str)
#print(fname, ': ', i)
ids.append(i)
return ids
def make_dataset_ids_png(dir):
images = []
ids = []
assert os.path.isdir(dir), '%s is not a valid directory' % dir
for root, _, fnames in sorted(os.walk(dir)):
for fname in fnames:
if any(fname.endswith(extension) for extension in ['.png', '.png']):
id_str = fname[:-4]
i = int(id_str)
ids.append(i)
ids = sorted(ids)
return ids
def load_intrinsics(input_dir):
file = open(input_dir+"/intrinsics.txt", "r")
intrinsics = [[(float(x) for x in line.split())] for line in file]
file.close()
intrinsics = list(intrinsics[0][0])
return intrinsics
def load_rigids(input_dir):
file = open(input_dir+"/rigid.txt", "r")
rigid_floats = [[float(x) for x in line.split()] for line in file] # note that it stores 5 lines per matrix (blank line)
file.close()
all_rigids = [ [rigid_floats[4*idx + 0],rigid_floats[4*idx + 1],rigid_floats[4*idx + 2],rigid_floats[4*idx + 3]] for idx in range(0, len(rigid_floats)//4) ]
return all_rigids
def load_expressions(input_dir):
file = open(input_dir+"/expression.txt", "r")
expressions = [[float(x) for x in line.split()] for line in file]
file.close()
return expressions
def load_identity(input_dir):
file = open(input_dir+"/identities.txt", "r")
identities = [[float(x) for x in line.split()] for line in file]
file.close()
return identities
class MultiFaceAudioEQTmpCachedDataset(BaseDataset):
@staticmethod
def modify_commandline_options(parser, is_train):
return parser
def initialize(self, opt):
self.opt = opt
self.root = opt.dataroot
# read dataset file that contains the filenames for the train, val and test lists
file = open(self.root+"/dataset.txt", "r")
self.filename_train_list, self.filename_val_list, self.filename_test_list = [str(line) for line in file]
file.close()
if self.filename_train_list[-1] == '\n': self.filename_train_list = self.filename_train_list[:-1]
if self.filename_val_list[-1] == '\n': self.filename_val_list = self.filename_val_list[:-1]
if self.filename_test_list[-1] == '\n': self.filename_test_list = self.filename_test_list[:-1]
# get list of train sequences
file = open(self.root+"/" + self.filename_train_list, "r")
self.train_sequence_names = [str(line) for line in file]
file.close()
for i in range(0,len(self.train_sequence_names)):
if self.train_sequence_names[i][-1] == '\n':
self.train_sequence_names[i] = self.train_sequence_names[i][:-1]
# get list of val sequences
file = open(self.root+"/" + self.filename_val_list, "r")
self.val_sequence_names = [[str(w) for w in line.split()] for line in file]
file.close()
for i in range(0,len(self.val_sequence_names)):
if self.val_sequence_names[i][0][-1] == '\n': self.val_sequence_names[i][0] = self.val_sequence_names[i][0][:-1]
if self.val_sequence_names[i][1][-1] == '\n': self.val_sequence_names[i][1] = self.val_sequence_names[i][1][:-1]
# get list of test sequences
file = open(self.root+"/" + self.filename_test_list, "r")
self.test_sequence_names = [[str(w) for w in line.split()] for line in file]
if opt.output_audio_expressions: self.test_sequence_names = self.test_sequence_names[0:1]
file.close()
for i in range(0,len(self.test_sequence_names)):
if self.test_sequence_names[i][0][-1] == '\n': self.test_sequence_names[i][0] = self.test_sequence_names[i][0][:-1]
if self.test_sequence_names[i][1][-1] == '\n': self.test_sequence_names[i][1] = self.test_sequence_names[i][1][:-1]
# print some stats
print('filename_train_list:', self.filename_train_list)
print('\tnum_seq:', len(self.train_sequence_names))
print('filename_val_list: ', self.filename_val_list)
print('\tnum_seq:', len(self.val_sequence_names))
print('filename_test_list: ', self.filename_test_list)
print('\tnum_seq:', len(self.test_sequence_names))
opt.train_sequence_names = self.train_sequence_names
opt.val_sequence_names = self.val_sequence_names
opt.test_sequence_names = self.test_sequence_names
# search mapping from val, test to train sequences that are used as targets
self.val_sequence_targets = []
for i in range(0,len(self.val_sequence_names)):
target_name = self.val_sequence_names[i][1]
target_id = -1
for j in range(0,len(self.train_sequence_names)):
if self.train_sequence_names[j] == target_name:
target_id = j
break
if target_id == -1:
print('Target sequence not in train set! ', target_name)
exit()
self.val_sequence_targets.append(target_id)
self.test_sequence_targets = []
for i in range(0,len(self.test_sequence_names)):
target_name = self.test_sequence_names[i][1]
target_id = -1
for j in range(0,len(self.train_sequence_names)):
if self.train_sequence_names[j] == target_name:
target_id = j
break
if target_id == -1:
print('Target sequence not in train set! ', target_name)
exit()
self.test_sequence_targets.append(target_id)
print('test: ', self.test_sequence_names[i])
print('\t target:', target_id)
# store len values
opt.nTrainObjects = len(self.train_sequence_names)
opt.nValObjects = len(self.val_sequence_names)
opt.nTestObjects = len(self.test_sequence_names)
################################################
################################################
################################################
# prepare dataloader paths / data
self.audio_feature_dir = []
self.image_dir = []
self.uvs_dir = []
self.audio_ids = []
self.image_ids = []
self.intrinsics = []
self.extrinsics = []
self.expressions = []
self.identities = []
self.target_id = []
self.n_frames_total = 0
if opt.phase == 'train':
self.sequence_names = self.train_sequence_names
for i in range(0,len(self.train_sequence_names)):
dataroot = os.path.join(opt.dataroot, self.train_sequence_names[i])
audio_feature_dir = os.path.join(opt.dataroot, self.train_sequence_names[i], 'audio_feature')
image_dir = os.path.join(opt.dataroot, self.train_sequence_names[i], 'images')
uvs_dir = os.path.join(opt.dataroot, self.train_sequence_names[i], 'uvs')
print('load train sequence:', self.train_sequence_names[i])
print('\tidentity_dir:', dataroot)
print('\taudio_feature_dir:', audio_feature_dir)
print('\timage_dir:', image_dir)
print('\tuvs_dir:', uvs_dir)
audio_ids = make_ids(make_dataset(audio_feature_dir), dataroot)
image_ids = make_dataset_ids_png(image_dir) # [-1] * len(audio_ids) #make_ids(make_dataset(image_dir), dataroot)
intrinsics = load_intrinsics(dataroot)
extrinsics = load_rigids(dataroot)
expressions = load_expressions(dataroot)
identity = load_identity(dataroot)
min_len = min(len(audio_ids), len(image_ids), len(extrinsics), len(expressions))
self.audio_feature_dir.append(audio_feature_dir)
self.image_dir.append(image_dir)
self.uvs_dir.append(uvs_dir)
self.audio_ids.append(audio_ids[:min_len])
self.image_ids.append(image_ids[:min_len])
self.intrinsics.append(intrinsics)
self.extrinsics.append(extrinsics[:min_len])
self.expressions.append(expressions[:min_len])
self.identities.append(identity[:min_len])
self.target_id.append(i)
self.n_frames_total += min_len
elif opt.phase == 'val':
for i in range(0,len(self.val_sequence_names)):
target_id = self.val_sequence_targets[i]
dataroot = os.path.join(opt.dataroot, self.train_sequence_names[target_id])
audio_feature_dir = os.path.join(opt.dataroot, self.val_sequence_names[i][0], 'audio_feature')
image_dir = os.path.join(opt.dataroot, self.train_sequence_names[target_id], 'images')
uvs_dir = os.path.join(opt.dataroot, self.train_sequence_names[target_id], 'uvs')
print('load val sequence:', self.val_sequence_names[i])
print('\tidentity_dir:', dataroot)
print('\taudio_feature_dir:', audio_feature_dir)
print('\timage_dir:', image_dir)
print('\tuvs_dir:', uvs_dir)
audio_ids = make_ids(make_dataset(audio_feature_dir), os.path.join(opt.dataroot, self.val_sequence_names[i][0]))
image_ids = make_dataset_ids_png(image_dir) # [-1] * len(audio_ids) #make_ids(make_dataset(image_dir), dataroot)
intrinsics = load_intrinsics(dataroot)
extrinsics = load_rigids(dataroot)
expressions = load_expressions(os.path.join(opt.dataroot, self.val_sequence_names[i][0]))
identity = load_identity(dataroot)
min_len = min(len(audio_ids), len(image_ids), len(extrinsics), len(expressions))
self.audio_feature_dir.append(audio_feature_dir)
self.image_dir.append(image_dir)
self.uvs_dir.append(uvs_dir)
self.audio_ids.append(audio_ids[:min_len])
self.image_ids.append(image_ids[:min_len])
self.intrinsics.append(intrinsics)
self.extrinsics.append(extrinsics[:min_len])
self.expressions.append(expressions[:min_len])
self.identities.append(identity[:min_len])
self.target_id.append(target_id)
self.n_frames_total += min_len
else: # test
for i in range(0,len(self.test_sequence_names)):
target_id = self.test_sequence_targets[i]
dataroot = os.path.join(opt.dataroot, self.train_sequence_names[target_id])
audio_feature_dir = os.path.join(opt.dataroot, self.test_sequence_names[i][0], 'audio_feature')
image_dir = os.path.join(opt.dataroot, self.train_sequence_names[target_id], 'images')
uvs_dir = os.path.join(opt.dataroot, self.train_sequence_names[target_id], 'uvs')
print('load test sequence:', self.test_sequence_names[i])
print('\tidentity_dir:', dataroot)
print('\taudio_feature_dir:', audio_feature_dir)
print('\timage_dir:', image_dir)
print('\tuvs_dir:', uvs_dir)
audio_ids = make_ids(make_dataset(audio_feature_dir), os.path.join(opt.dataroot, self.test_sequence_names[i][0]))
image_ids = make_dataset_ids_png(image_dir) # [-1] * len(audio_ids) #make_ids(make_dataset(image_dir), dataroot)
intrinsics = load_intrinsics(dataroot)
extrinsics = load_rigids(dataroot)
expressions = load_expressions(os.path.join(opt.dataroot, self.test_sequence_names[i][0]))
identity = load_identity(dataroot)
min_len = min(len(audio_ids), len(image_ids), len(extrinsics), len(expressions))
self.audio_feature_dir.append(audio_feature_dir)
self.image_dir.append(image_dir)
self.uvs_dir.append(uvs_dir)
self.audio_ids.append(audio_ids[:min_len])
self.image_ids.append(image_ids[:min_len])
self.intrinsics.append(intrinsics)
self.extrinsics.append(extrinsics[:min_len])
self.expressions.append(expressions[:min_len])
self.identities.append(identity[:min_len])
self.target_id.append(target_id)
self.n_frames_total += min_len
print('frames_total:', self.n_frames_total)
#global_target_ids = []
#for i in range(0,len(self.audio_ids)):
# for j in range(0,len(self.audio_ids[i])):
# global_target_ids.append(self.target_id[i])
#global_target_ids=np.array(global_target_ids)
#self.weights = np.where(global_target_ids==2, 1.0 * np.ones((self.n_frames_total)), 0.01 * np.ones((self.n_frames_total)) )
self.weights = []
for i in range(0,len(self.audio_ids)):
l = len(self.audio_ids[i])
for j in range(0,l):
self.weights.append(1.0 / l)
self.weights = np.array(self.weights)
assert(opt.resize_or_crop == 'resize_and_crop')
# mapping global to internal
self.mapping_global2internal = []
self.mapping_global2internal_offset = []
self.dsf = []
offset = 0
with progressbar.ProgressBar(max_value=self.n_frames_total) as bar:
for i in range(0,len(self.audio_ids)):
l = len(self.audio_ids[i])
dsf_seq = []
for k in range(0,l):
self.mapping_global2internal.append(i)
self.mapping_global2internal_offset.append(offset)
dsf_fname = os.path.join(self.audio_feature_dir[i], str(self.audio_ids[i][k]) + '.deepspeech.npy')
feature_array = np.load(dsf_fname)
dsf_np = np.resize(feature_array, (16,29,1))
dsf_seq.append(dsf_np.astype(np.float32))
bar.update(offset + k)
self.dsf.append(dsf_seq)
offset += l
def getSampleWeights(self):
return self.weights
def getitem(self, global_index):
# select sequence
internal_sequence_id = self.mapping_global2internal[global_index]
sum_frames = self.mapping_global2internal_offset[global_index]
# select frame from sequence
index = (global_index-sum_frames) % len(self.audio_ids[internal_sequence_id])
# get data ids
audio_id = self.audio_ids[internal_sequence_id][index]
image_id = self.image_ids[internal_sequence_id][index]
#print('GET ITEM: ', index)
#img_path = self.frame_paths[sequence_id][index]
# intrinsics and extrinsics
intrinsics = self.intrinsics[internal_sequence_id]
extrinsics = self.extrinsics[internal_sequence_id][image_id]
# expressions
expressions = np.asarray(self.expressions[internal_sequence_id][audio_id], dtype=np.float32)
#print('expressions:', expressions.shape)
expressions[32] *= 0.0 # remove eye brow movements
expressions[41] *= 0.0 # remove eye brow movements
expressions[71:75] *= 0.0 # remove eye brow movements
expressions = torch.tensor(expressions)
# identity
identity = torch.tensor(self.identities[internal_sequence_id][image_id])
target_id = self.target_id[internal_sequence_id] # sequence id refers to the target sequence (of the training corpus)
# load deepspeech feature
#print('audio_id', audio_id)
dsf_fname = os.path.join(self.audio_feature_dir[internal_sequence_id], str(audio_id) + '.deepspeech.npy')
dsf_np = self.dsf[internal_sequence_id][index]
dsf = transforms.ToTensor()(dsf_np)
# load sequence data if necessary
if self.opt.look_ahead:# use prev and following frame infos
r = self.opt.seq_len//2
for i in range(1,r): # prev frames
index_seq = index - i
if index_seq < 0: index_seq = 0
dsf_np = self.dsf[internal_sequence_id][index_seq]
dsf_seq = transforms.ToTensor()(dsf_np) # 1 x 16 x 29
dsf = torch.cat([dsf_seq, dsf], 0) # seq_len x 16 x 29
# note the ordering [old ... current]
for i in range(1,self.opt.seq_len - r + 1): # following frames
index_seq = index + i
max_idx = len(self.audio_ids[internal_sequence_id])-1
if index_seq > max_idx: index_seq = max_idx
dsf_np = self.dsf[internal_sequence_id][index_seq]
dsf_seq = transforms.ToTensor()(dsf_np) # 1 x 16 x 29
dsf = torch.cat([dsf, dsf_seq], 0) # seq_len x 16 x 29
# note the ordering [old ... current ... future]
else:
for i in range(1,self.opt.seq_len):
index_seq = index - i
if index_seq < 0: index_seq = 0
dsf_np = self.dsf[internal_sequence_id][index_seq]
dsf_seq = transforms.ToTensor()(dsf_np) # 1 x 16 x 29
dsf = torch.cat([dsf_seq, dsf], 0) # seq_len x 16 x 29
# note the ordering [old ... current]
#weight = 1.0 / len(self.audio_feature_dir[internal_sequence_id])
weight = self.weights[global_index]
return {'paths': dsf_fname, #img_path,
'intrinsics': np.array(intrinsics),
'extrinsics': np.array(extrinsics),
'expressions': expressions,
'identity': identity,
'audio_deepspeech': dsf, # deepspeech feature
'target_id':target_id,
'internal_id':internal_sequence_id,
'weight': np.array([weight]).astype(np.float32)}
def __getitem__(self, global_index):
# select frame from sequence
index = global_index
current = self.getitem(index)
prv = self.getitem(max(index-1, 0))
nxt = self.getitem(min(index+1, self.n_frames_total-1))
return {
'paths': current['paths'], #img_path,
'target_id': current['target_id'],
'internal_id': current['internal_id'],
'weight': current['weight'],
'identity': current['identity'],
'intrinsics': current['intrinsics'],
'extrinsics': current['extrinsics'],
'expressions': current['expressions'],
'audio_deepspeech': current['audio_deepspeech'],
'extrinsics_prv': prv['extrinsics'],
'expressions_prv': prv['expressions'],
'audio_deepspeech_prv': prv['audio_deepspeech'],
'extrinsics_nxt': nxt['extrinsics'],
'expressions_nxt': nxt['expressions'],
'audio_deepspeech_nxt': nxt['audio_deepspeech'],
}
def __len__(self):
return self.n_frames_total #len(self.frame_paths[0])
def name(self):
return 'MultiFaceAudioEQTmpCachedDataset'
| 20,838 | 44.400871 | 160 | py |
NeuralVoicePuppetry | NeuralVoicePuppetry-master/Audio2ExpressionNet/Inference/data/audio.py | import time
import random
import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.transforms as transforms
import torchaudio
import torchaudio.transforms
import librosa
import scipy.signal
import librosa.display
import matplotlib.pyplot as plt
class Audio():
def name(self):
return 'Audio'
def __init__(self, filename, write_mel_spectogram = False):
self.n_mels=128
self.fmax=8000
self.hop_length_ms = 20
sound, sample_rate = librosa.load(filename)#torchaudio.load(filename)
self.raw_audio = sound
self.sample_rate = sample_rate
print('sample_rate = %d' % self.sample_rate)
self.n_samples = sound.shape[0]
self.time_total = self.n_samples / self.sample_rate
print('length = %ds' % self.time_total)
print('compute mel spectrogram...')
self.hop_length = int(sample_rate / 1000.0 * self.hop_length_ms)
print('hop_length: ', self.hop_length)
self.mel_spectrogram = librosa.feature.melspectrogram(y=self.raw_audio, sr=self.sample_rate, hop_length=self.hop_length, n_mels=self.n_mels, fmax=self.fmax)
if write_mel_spectogram:
print('write spectrogram to file')
plt.figure(figsize=(100, 15))
librosa.display.specshow(librosa.power_to_db(self.mel_spectrogram, ref=np.max), y_axis='mel', fmax=self.fmax, x_axis='time')
plt.colorbar(format='%+2.0f dB')
plt.title('Mel spectrogram')
plt.tight_layout()
plt.savefig('mel_features.png', dpi=None, facecolor='w', edgecolor='w', orientation='portrait', papertype=None, format=None, transparent=False, bbox_inches=None, pad_inches=0.1, frameon=None, metadata=None)
print('mel: ', self.mel_spectrogram.shape) # (128, 18441)
self.n_mel_frames = self.mel_spectrogram.shape[1]
self.mel_sample_rate = self.mel_spectrogram.shape[1] / self.time_total
print('n_mel_frames: ', self.n_mel_frames)
print('mel_sample_rate: ', self.mel_sample_rate)
# convert to torch
self.mel_spectrogram = torch.FloatTensor(self.mel_spectrogram)
def getWindow(self, mel_frame_idx, window_size):
# get audio mel sample window
audio_start = mel_frame_idx - (window_size//2)
audio_end = mel_frame_idx + (window_size//2)
if audio_start < 0:
audio_input = self.mel_spectrogram[0:self.n_mels, 0:audio_end]
zeros = torch.zeros((self.n_mels,-audio_start))
audio_input = torch.cat([zeros, audio_input], 1)
elif audio_end >= self.n_mel_frames:
audio_input = self.mel_spectrogram[:, audio_start:-1]
zeros = torch.zeros((self.n_mels,audio_end-self.n_mel_frames + 1))
audio_input = torch.cat([audio_input, zeros], 1)
else:
audio_input = self.mel_spectrogram[:, audio_start:audio_end]
return torch.reshape(audio_input, (1, 1, self.n_mels, window_size)) | 3,047 | 38.584416 | 218 | py |
NeuralVoicePuppetry | NeuralVoicePuppetry-master/Audio2ExpressionNet/Inference/data/face_dataset.py | import os.path
import random
import torchvision.transforms as transforms
import torch
import numpy as np
from data.base_dataset import BaseDataset
from data.audio import Audio
from PIL import Image
from util import util
def make_dataset(dir):
images = []
ids = []
assert os.path.isdir(dir), '%s is not a valid directory' % dir
for root, _, fnames in sorted(os.walk(dir)):
for fname in fnames:
if any(fname.endswith(extension) for extension in ['.npy', '.NPY']):
#.deepspeech.npy
id_str = fname[:-15] #4]
i = int(id_str)
ids.append(i)
ids = sorted(ids)
for id in ids:
fname=str(id)+'.deepspeech.npy'
path = os.path.join(root, fname)
images.append(path)
return images
def make_ids(paths, root_dir):
ids = []
for fname in paths:
l = fname.rfind('/')
id_str = fname[l+1:-15]#4]
i = int(id_str)
#print(fname, ': ', i)
ids.append(i)
return ids
def load_expressions(input_dir):
file = open(input_dir+"/expression.txt", "r")
expressions = [[float(x) for x in line.split()] for line in file]
file.close()
return expressions
class FaceDataset(BaseDataset):
@staticmethod
def modify_commandline_options(parser, is_train):
return parser
def initialize(self, opt):
self.opt = opt
# directories
self.dataroot = opt.dataroot
self.audio_feature_dir = os.path.join(opt.dataroot, 'audio_feature')
# debug print
print('load sequence:', self.dataroot)
print('\taudio_feature_dir:', self.audio_feature_dir)
# generate index maps
audio_ids = make_ids(make_dataset(self.audio_feature_dir), self.dataroot)
# get model parameters
expressions = load_expressions(self.dataroot)
# set data
min_len = min(len(audio_ids), len(expressions))
self.audio_ids = audio_ids[:min_len]
self.expressions = expressions[:]
self.n_frames_total = min_len
print('\tnum frames:', self.n_frames_total)
opt.nTrainObjects = 1
opt.nValObjects = 1
opt.nTestObjects = 1
opt.test_sequence_names = [[opt.dataroot.split("/")[-1], 'train']]
assert(opt.resize_or_crop == 'resize_and_crop')
def getSampleWeights(self):
weights = np.ones((self.n_frames_total))
return weights
def getExpressions(self, idx):
return self.expressions[self.uvs_ids[idx % self.n_frames_total]]
def getAudioFilename(self):
return os.path.join(self.dataroot, 'audio.wav')
def getAudioFeatureFilename(self, idx):
#return self.frame_paths[idx % len(self.frame_paths)]
audio_id = self.audio_ids[idx]
return os.path.join(self.audio_feature_dir, str(audio_id) + '.deepspeech.npy')
def __getitem__(self, global_index):
# select frame from sequence
index = global_index
# get data ids
audio_id = self.audio_ids[index]
# intrinsics and extrinsics
intrinsics = np.zeros((4)) # not used
extrinsics = np.zeros((4,4))# not used
# expressions
expressions = np.asarray(self.expressions[audio_id], dtype=np.float32)
expressions = torch.tensor(expressions)
# identity
identity = torch.zeros(100) # not used
# load deepspeech feature
dsf_fname = os.path.join(self.audio_feature_dir, str(audio_id) + '.deepspeech.npy')
feature_array = np.load(dsf_fname)
dsf_np = np.resize(feature_array, (16,29,1))
dsf = transforms.ToTensor()(dsf_np.astype(np.float32))
# load sequence data if necessary
if self.opt.look_ahead:# use prev and following frame infos
r = self.opt.seq_len//2
last_valid_idx = audio_id
for i in range(1,r): # prev frames
index_seq = index - i
if index_seq < 0: index_seq = 0
audio_id_seq = self.audio_ids[index_seq]
if audio_id_seq == audio_id - i: last_valid_idx = audio_id_seq
else: audio_id_seq = last_valid_idx
dsf_fname = os.path.join(self.audio_feature_dir, str(audio_id_seq) + '.deepspeech.npy')
feature_array = np.load(dsf_fname)
dsf_np = np.resize(feature_array, (16,29,1))
dsf_seq = transforms.ToTensor()(dsf_np.astype(np.float32)) # 1 x 16 x 29
dsf = torch.cat([dsf_seq, dsf], 0) # seq_len x 16 x 29
# note the ordering [old ... current]
last_valid_idx = audio_id
for i in range(1,self.opt.seq_len - r + 1): # following frames
index_seq = index + i
max_idx = len(self.audio_ids)-1
if index_seq > max_idx: index_seq = max_idx
audio_id_seq = self.audio_ids[index_seq]
if audio_id_seq == audio_id + i: last_valid_idx = audio_id_seq
else: audio_id_seq = last_valid_idx
dsf_fname = os.path.join(self.audio_feature_dir, str(audio_id_seq) + '.deepspeech.npy')
feature_array = np.load(dsf_fname)
dsf_np = np.resize(feature_array, (16,29,1))
dsf_seq = transforms.ToTensor()(dsf_np.astype(np.float32)) # 1 x 16 x 29
dsf = torch.cat([dsf, dsf_seq], 0) # seq_len x 16 x 29
# note the ordering [old ... current ... future]
else:
last_valid_idx = audio_id
for i in range(1,self.opt.seq_len):
index_seq = index - i
if index_seq < 0: index_seq = 0
audio_id_seq = self.audio_ids[index_seq]
if audio_id_seq == audio_id - i: last_valid_idx = audio_id_seq
else: audio_id_seq = last_valid_idx
dsf_fname = os.path.join(self.audio_feature_dir, str(audio_id_seq) + '.deepspeech.npy')
feature_array = np.load(dsf_fname)
dsf_np = np.resize(feature_array, (16,29,1))
dsf_seq = transforms.ToTensor()(dsf_np.astype(np.float32)) # 1 x 16 x 29
dsf = torch.cat([dsf_seq, dsf], 0) # seq_len x 16 x 29
# note the ordering [old ... current]
#################################
weight = 1.0 / self.n_frames_total
return {#'TARGET': TARGET, 'UV': UV,
'paths': dsf_fname, #img_path,
'intrinsics': np.array(intrinsics),
'extrinsics': np.array(extrinsics),
'expressions': expressions,
'identity': identity,
'audio_deepspeech': dsf, # deepspeech feature
'target_id': -1,
'internal_id': 0,
'weight': np.array([weight]).astype(np.float32)}
def __len__(self):
return self.n_frames_total
def name(self):
return 'FaceDataset'
| 7,029 | 34.505051 | 103 | py |
NeuralVoicePuppetry | NeuralVoicePuppetry-master/Audio2ExpressionNet/Inference/data/__init__.py | import importlib
import torch.utils.data
from data.base_data_loader import BaseDataLoader
from data.base_dataset import BaseDataset
def find_dataset_using_name(dataset_name):
# Given the option --dataset_mode [datasetname],
# the file "data/datasetname_dataset.py"
# will be imported.
dataset_filename = "data." + dataset_name + "_dataset"
datasetlib = importlib.import_module(dataset_filename)
# In the file, the class called DatasetNameDataset() will
# be instantiated. It has to be a subclass of BaseDataset,
# and it is case-insensitive.
dataset = None
target_dataset_name = dataset_name.replace('_', '') + 'dataset'
for name, cls in datasetlib.__dict__.items():
if name.lower() == target_dataset_name.lower() \
and issubclass(cls, BaseDataset):
dataset = cls
if dataset is None:
print("In %s.py, there should be a subclass of BaseDataset with class name that matches %s in lowercase." % (dataset_filename, target_dataset_name))
exit(0)
return dataset
def get_option_setter(dataset_name):
dataset_class = find_dataset_using_name(dataset_name)
return dataset_class.modify_commandline_options
def create_dataset(opt):
dataset = find_dataset_using_name(opt.dataset_mode)
instance = dataset()
instance.initialize(opt)
print("dataset [%s] was created" % (instance.name()))
return instance
def CreateDataLoader(opt):
data_loader = CustomDatasetDataLoader()
data_loader.initialize(opt)
return data_loader
# Wrapper class of Dataset class that performs
# multi-threaded data loading
class CustomDatasetDataLoader(BaseDataLoader):
def name(self):
return 'CustomDatasetDataLoader'
def initialize(self, opt):
BaseDataLoader.initialize(self, opt)
self.dataset = create_dataset(opt)
if opt.serial_batches:
self.dataloader = torch.utils.data.DataLoader(
self.dataset,
batch_size=opt.batch_size,
shuffle=not opt.serial_batches,
num_workers=int(opt.num_threads))
else:
#weights = make_weights_for_balanced_classes(dataset_train.imgs, len(dataset_train.classes))
weights = self.dataset.getSampleWeights()
weights = torch.DoubleTensor(weights)
sampler = torch.utils.data.sampler.WeightedRandomSampler(weights, len(weights))
self.dataloader = torch.utils.data.DataLoader(
self.dataset,
batch_size=opt.batch_size,
#shuffle=True,
sampler=sampler,
pin_memory=True,
num_workers=int(opt.num_threads))
def load_data(self):
return self
def __len__(self):
return min(len(self.dataset), self.opt.max_dataset_size)
def __iter__(self):
for i, data in enumerate(self.dataloader):
if i * self.opt.batch_size >= self.opt.max_dataset_size:
break
yield data
| 3,157 | 33.703297 | 168 | py |
z2fsl | z2fsl-main/setup.py | '''Setup script
Usage: pip install .
To install development dependencies too, run: pip install .[dev]
'''
from setuptools import setup, find_packages
setup(
name='z2fsl',
version='v1',
packages=find_packages(),
url = 'https://github.com/gchochla/z2fsl',
author='Georgios Chochlakis',
scripts=[],
install_requires=[],
extras_require={
'dev': [
'torch',
'torchvision',
'sklearn',
'numpy'
],
},
)
| 495 | 19.666667 | 64 | py |
z2fsl | z2fsl-main/z2fsl/z2fsl_vaegan.py | """Z2FSL(VAEGAN, PN) trainer class and script."""
import os
import argparse
from copy import deepcopy
import torch
import torch.nn as nn
import torch.optim as optim
from sklearn.model_selection import ParameterGrid
from z2fsl.modules.classifiers import PrototypicalNet
from z2fsl.utils.losses import EpisodeCrossEntropyLoss
from z2fsl.utils.submodules import CVAE, ConcatMLP
from z2fsl.utils.datasets import FeatureEpisodeFactory
from z2fsl.utils.losses import kl_divergence, gradient_penalty
from z2fsl.utils.general import (
str2bool,
set_parameter_requires_grad,
configuration_filename,
dataset_name,
manual_seed,
)
class Z2FSL_VAEGAN_Trainer:
"""Z2FSL(VAEGAN, PN) trainer class.
Attributes:
vae (`nn.Module`): CVAE.
critic (`nn.Module`): Discriminator.
fsl_classifier (`nn.Module`): Prototypical Net.
episode_factory_kwargs (`dict`): episode factory keywords.
train_split (`str`): which split to train on (basically
`'train'` or `'trainval'`).
eval_split (`str`): which split to evaluate on (basically
`'val'` or `'test'`).
generalized (`bool`): whether to train and evaluate on GZSL.
way (`int`): way of training episodes.
shot (`int`): shot of training episodes (provided by generator).
queries_per (`int`): number of samples per class in query set.
device (`str`): device to run on.
noise_dim (`int`): dimension of generator noise / VAE's latent dim.
lambda_z2fsl (`float`): FSL classifier loss coefficient
(\gamma in paper).
lambda_wgan (`float`): WGAN loss coefficient (\\beta in paper).
lambda_vae (`float`): VAE loss coeffcient (basically used
to turn off the VAE).
lambda_pen (`float`): WGAN regularization term coefficient.
train_fsl (`bool`): whether to train the FSL classifier jointly
with the generator.
"""
def __init__(
self,
dataset_dir,
feature_dir='features',
proposed_splits=True,
l2_attr_norm=True,
augmentation=True,
feature_norm=True,
train_split='train',
eval_split='val',
generalized=False,
feature_dim=2048,
metadata_dim=312,
noise_dim=None,
generator_hidden_layers=None,
classifier_hidden_layers=None,
embedding_dim=128,
critic_hidden_layers=None,
way=20,
shot=5,
queries_per=10,
pretrained_fsl_dir='',
lambda_z2fsl=1e2,
lambda_pen=10,
lambda_wgan=1e2,
lambda_vae=1,
device='cuda:0',
**kwargs
):
"""Init.
Args:
dataset_dir (`str`): root dir of dataset.
feature_dir (`str`, optional): directory of features relative to `dataset_dir`.
proposed_splits (`bool`, optional): whether to use proposed splits,
default `True`.
l2_attr_norm (`bool`, optional): whether to L2 normalize attributes,
default `True`.
augmentation (`bool`, optional): whether ot use augmentation, default `True`.
feature_norm (`bool`, optional): whether to minmax normalize features,
default `True`.
train_split (`str`, optional): which split to train on (basically
`'train'` or `'trainval'`), default `'train'`.
eval_split (`str`, optional): which split to evaluate on (basically
`'val'` or `'test'`), default `'val'`.
generalized (`bool`, optional): whether to train and evaluate on GZSL,
default `False`.
feature_dim (`int`, optional): dimension of input features, default `2048`.
metadata_dim (`int`, optional): dimension of auxiliary descriptions.
noise_dim (`int`, optional): dimension of generator's noise dimension,
default is same as `metadata_dim`.
generator_hidden_layers (`int` | `list` of `int`s | `None`, optional):
generator hidden layers, default `None`.
classifier_hidden_layers (`int` | `list` of `int`s | `None`, optional):
classifier hidden layers, default `None`.
embedding_dim (`int`, optional): dimension of PN output, default `128`.
critic_hidden_layers (`int` | `list` of `int`s | `None`, optional):
discriminator hidden layers, default `None`.
way (`int`, optional): way of training episodes, default `20`.
shot (`int`, optional): shot of training episodes (provided by generator),
default `5`.
queries_per (`int`, optional): number of samples per class in query set,
default `10`.
pretrained_fsl_dir (`str`, optional): directory where pre-trained FSL
classifier is stored, default `''` (no pretrained classifier,
classifiers are names a certain way based on hyperparameters
and benchmarks, so script picks them up automatically).
lambda_z2fsl (`float`, optional): FSL classifier loss coefficient
(\gamma in paper), default `100`.
lambda_pen (`float`, optional): WGAN regularization term coefficient,
default `10`.
lambda_wgan (`float`, optional): WGAN loss coefficient (\\beta in paper),
default `100`.
lambda_vae (`float`, optional): VAE loss coeffcient (basically used
to turn off the VAE), default `1`.
device (`str`, optional): device to run on, default `'cuda:0'`.
`train_fsl` (`bool`, optional): whether to train FSL classifier.
"""
if noise_dim is None:
noise_dim = metadata_dim
self.vae = CVAE(
feature_dim=feature_dim,
latent_dim=noise_dim,
cond_dim=metadata_dim,
hidden_layers=generator_hidden_layers,
).to(device)
input_dim = metadata_dim + feature_dim
self.critic = ConcatMLP(
in_features=input_dim,
out_features=1,
hidden_layers=critic_hidden_layers,
output_actf=nn.Identity(),
).to(device)
self.episode_factory_kwargs = dict(
dataset_dir=dataset_dir,
feature_dir=feature_dir,
proposed_splits=proposed_splits,
l2_attr_norm=l2_attr_norm,
augmentation=augmentation,
feature_norm=feature_norm,
)
self.train_split = train_split
self.eval_split = eval_split
self.generalized = generalized
self.way = way
self.shot = shot
self.queries_per = queries_per
self.device = device
self.noise_dim = noise_dim
self.lambda_z2fsl = lambda_z2fsl
self.lambda_wgan = lambda_wgan
self.lambda_vae = lambda_vae
self.lambda_pen = lambda_pen
self.fsl_classifier = PrototypicalNet(
in_features=feature_dim,
out_features=embedding_dim,
hidden_layers=classifier_hidden_layers,
)
self.train_fsl = lambda_z2fsl > 0
if pretrained_fsl_dir:
ds_name = dataset_name(dataset_dir)
fsl_fn = 'fsl_{}{}_{}'.format(
ds_name,
'_noaug' if not augmentation else '',
configuration_filename(feature_dir, proposed_splits, train_split, generalized),
)
fsl_fn = os.path.join(pretrained_fsl_dir, fsl_fn)
state_dict = torch.load(fsl_fn)
self.fsl_classifier.load_state_dict(state_dict)
self.train_fsl = kwargs.get('train_fsl', self.train_fsl)
if not self.train_fsl:
set_parameter_requires_grad(self.fsl_classifier, False)
self.fsl_classifier.to(device)
def train(
self,
episodes,
gen_lr,
gen_beta1=0.5,
fsl_lr=None,
fsl_beta1=None,
val_epochs=25,
val_samples=350,
train_val_samples=None,
val_lr=1e-3,
val_beta1=0.5,
print_interval=25,
eval_interval=None,
n_fsl=1,
n_critic=5,
mix=False,
real_support=True,
softmax=True,
):
"""Trains all components (FSL classifier, generator, encoder, discriminator)
and provides evaluation metrics.
Args:
episodes (`int`): number of training iterations.
gen_lr (`float`): generator (and all backbone VAEGAN components)
learning rate.
gen_beta1 (`float`): beta1 of Adam optimizer for VAEGAN backbone,
default `0.5`.
fsl_lr (`float` | `None`, optional): FSL classifier's learning rate,
default is the same as `gen_lr`.
fsl_beta1 (`float` | `None`, optional): FSL classifier's beta1 for Adam,
default is the same as `gen_beta1`.
val_epochs (`int`, optional): epochs of finetuning the FSL classifier on
unseen classes or linear classifier's training, default `25`.
val_samples (`int`, optional): number of samples to produce per unseen class
during evaluation, default `350`.
train_val_samples (`list` of `int`s | `None`, optional): number of samples to
produce per seen class during evaluation, default is same as
`val_samples`.
val_lr (`float`, optional): learning rate of linear classifier, default `1e-3`.
val_beta1 (`float`, optional): beta1 of linear classifier's Adam, default `0.5`.
print_interval (`int`, optional): episodes to wait before printing several training
losses, default `25`.
eval_interval (`int` | `None`, optional): episodes to wait before printing evaluation
metrics, default is at the end of training.
n_fsl (`int`, optional): number of FSL classifier updates per training iteration,
default `1`.
n_critic (`int`, optional): number of FSL classifier updates per training iteration,
default `5`.
mix (`bool`, optional): mix support and query sets during training, default `False`.
real_support (`bool`, optional): if GZSL, whether to use real or synthetic support,
default `True`.
softmax (`bool`, optional): whether to evaluate with a linear classifier as well,
default `True`.
Returns:
`dict` of `dict`, indexed by the number of training iterations, that includes
evaluation method `str`: accuracy key-value pairs, e.g.
{
1000: {
'normal': 0.45, # if shot remains the same as training
'more-shots': 0.58,
'finetune': 0.59,
'softmax': 0.55,
},
2000: {
'normal': 0.48,
'more-shots': 0.61,
'finetune': 0.62,
'softmax': 0.56,
}
}
"""
if eval_interval is None:
eval_interval = episodes
if fsl_lr is None:
fsl_lr = gen_lr
if fsl_beta1 is None:
fsl_beta1 = gen_beta1
evals = {}
optimizer = optim.Adam(self.vae.parameters(), lr=gen_lr, betas=(gen_beta1, 0.999))
c_optimizer = optim.Adam(self.critic.parameters(), lr=gen_lr, betas=(gen_beta1, 0.999))
if self.train_fsl:
fsl_optimizer = optim.Adam(
self.fsl_classifier.parameters(), lr=fsl_lr, betas=(fsl_beta1, 0.999)
)
episode_factory = FeatureEpisodeFactory(
split=self.train_split,
inner_split='train',
generalized=self.generalized,
**self.episode_factory_kwargs
)
criterion = EpisodeCrossEntropyLoss()
_bce = nn.BCELoss(reduction='sum')
bce = lambda pred, tar: _bce(pred, tar) / pred.size(0)
for episode in range(episodes):
for i in range(n_critic + n_fsl + 1):
features, attributes, _ = episode_factory(
queries_per=self.queries_per, way=self.way
)
if i < n_critic:
#######################
### critic training ###
features = torch.cat(features)
attributes = torch.stack(attributes)
attributes = attributes.repeat_interleave(self.queries_per, dim=0)
features, attributes = features.to(self.device), attributes.to(self.device)
noise = torch.randn(attributes.size(0), self.noise_dim, device=self.device)
with torch.no_grad():
gen_features = self.vae.decode(attributes, noise)
gen_critic_outs = self.critic(gen_features, attributes)
real_critic_outs = self.critic(features, attributes)
grad_pen = gradient_penalty(self.critic, features, gen_features, attributes)
c_loss = (
gen_critic_outs.mean(dim=0)
- real_critic_outs.mean(dim=0)
+ self.lambda_pen * grad_pen
)
c_optimizer.zero_grad()
c_loss.backward()
c_optimizer.step()
### critic training ###
#######################
elif n_critic <= i < n_critic + n_fsl and self.train_fsl:
#########################
### fsl algo training ###
features = [class_features.to(self.device) for class_features in features]
support = []
for class_attributes in attributes:
class_attributes = class_attributes.repeat(self.shot, 1).to(self.device)
noise = torch.randn(self.shot, self.noise_dim, device=self.device)
with torch.no_grad():
support.append(self.vae.decode(class_attributes, noise))
logits = self.fsl_classifier(support, features, mix=mix)
clsf_loss = criterion(logits)
fsl_optimizer.zero_grad()
clsf_loss.backward()
fsl_optimizer.step()
### fsl algo training ###
#########################
else:
#######################
### vaegan training ###
### classification
features = [class_features.to(self.device) for class_features in features]
support = []
for class_attributes in attributes:
class_attributes = class_attributes.repeat(self.shot, 1).to(self.device)
noise = torch.randn(self.shot, self.noise_dim, device=self.device)
support.append(self.vae.decode(class_attributes, noise))
if self.lambda_z2fsl > 0:
logits = self.fsl_classifier(support, features, mix=mix)
clsf_loss = criterion(logits)
else:
clsf_loss = torch.tensor(0.0)
### vae
attributes = torch.stack(attributes).to(self.device)
if self.lambda_vae > 0:
features = torch.cat(features)
rec_features, mean, logvar = self.vae(
features, attributes.repeat_interleave(self.queries_per, dim=0)
)
bce_loss = bce(rec_features, features)
kl_loss = kl_divergence(mean, logvar)
vae_loss = bce_loss + kl_loss
else:
vae_loss = torch.tensor(0.0)
### wgan
if self.lambda_wgan > 0:
gen_features = torch.cat(support)
critic_outs = self.critic(
gen_features, attributes.repeat_interleave(self.shot, dim=0)
)
wgan_loss = -critic_outs.mean(dim=0)
else:
wgan_loss = torch.tensor(0.0)
if torch.isnan(wgan_loss).any():
print('WGAN nan, episode {}'.format(episode))
if torch.isnan(vae_loss).any():
print('VAE nan, episode {}'.format(episode))
if torch.isnan(bce_loss).any():
print('BCE')
if torch.isnan(kl_loss).any():
print('KL')
if torch.isnan(clsf_loss).any():
print('Classification nan, episode {}'.format(episode))
loss = (
self.lambda_vae * vae_loss
+ self.lambda_wgan * wgan_loss
+ self.lambda_z2fsl * clsf_loss
)
if torch.isnan(loss):
break
optimizer.zero_grad()
loss.backward()
nn.utils.clip_grad_value_(self.vae.parameters(), 5)
optimizer.step()
### vaegan training ###
#######################
break # to avoid repeating if train_fsl is false
if torch.isnan(loss):
self.vae.to('cpu')
dire = 'models'
if not os.path.exists(dire):
os.makedirs(dire)
torch.save(self.vae.to('cpu').state_dict(), os.path.join(dire, 'vae.pt'))
break
if episode == 0 or (episode + 1) % print_interval == 0:
##########################
### renew measurements ###
features, attributes, _ = episode_factory(
queries_per=self.queries_per, way=self.way
)
features = [class_features.to(self.device) for class_features in features]
support = []
for class_attributes in attributes:
class_attributes = class_attributes.repeat(self.shot, 1).to(self.device)
noise = torch.randn(self.shot, self.noise_dim, device=self.device)
with torch.no_grad():
support.append(self.vae.decode(class_attributes, noise))
# get classification accuracy
with torch.no_grad():
logits = self.fsl_classifier(support, features)
c_loss = criterion(logits)
accs = []
for i, class_logits in enumerate(logits):
preds = class_logits.cpu().argmax(dim=-1)
corr = (preds == i).sum().item()
tot = preds.size(0)
accs.append(corr / tot)
acc = sum(accs) / len(accs)
# get vae metrics
real_features = torch.cat(features)
attributes = torch.stack(attributes).to(self.device)
with torch.no_grad():
rec_features, mean, logvar = self.vae(
real_features, attributes.repeat_interleave(self.queries_per, dim=0)
)
bce_loss = bce(rec_features, real_features)
kl_loss = kl_divergence(mean, logvar)
# get wasserstein distance proxy
gen_features = torch.cat(support)
with torch.no_grad():
gen_critic_outs = self.critic(
gen_features, attributes.repeat_interleave(self.shot, dim=0)
)
real_critic_outs = self.critic(
real_features, attributes.repeat_interleave(self.queries_per, dim=0)
)
w_dist = real_critic_outs.mean(dim=0) - gen_critic_outs.mean(dim=0)
### renew measurements ###
##########################
epoch_msg = (
'Episode {} - Rec loss = {:.3f} KL = {:.3f} W dist = {:.3f} '
'Clsf loss = {:.3f} Clsf acc = {:.1f}%'
).format(
episode + 1,
bce_loss.item(),
kl_loss.item(),
w_dist.item(),
c_loss.item(),
acc * 100,
)
print(epoch_msg)
if (episode + 1) % eval_interval == 0:
evals[episode + 1] = {}
if self.generalized:
if train_val_samples is None:
train_val_samples = [val_samples]
acc = self.eval()
acc_ms = {}
for train_samples in train_val_samples:
acc_ms[train_samples] = self.eval(
shot=val_samples,
train_shot=train_samples,
real_support=real_support,
)
max_train_samples = max(acc_ms.keys(), key=(lambda key: acc_ms[key]))
acc_ms_finetune = self.eval(
shot=val_samples,
train_shot=max_train_samples,
episodes=val_epochs,
fsl_lr=fsl_lr,
fsl_beta1=fsl_beta1,
real_support=real_support,
)
accs = {
'normal': acc,
'more-shots-{}'.format(max_train_samples): acc_ms[max_train_samples],
'finetune': acc_ms_finetune,
}
if softmax:
softmax_acc = self.eval_softmax(
epochs=val_epochs, lr=val_lr, beta1=val_beta1, per_class=val_samples
)
accs['softmax'] = softmax_acc
else:
acc = self.eval()
acc_ms = self.eval(shot=val_samples)
acc_ms_finetune = self.eval(
shot=val_samples, episodes=val_epochs, fsl_lr=fsl_lr, fsl_beta1=fsl_beta1
)
accs = {
'normal': acc,
'more-shots': acc_ms,
'finetune': acc_ms_finetune,
}
if softmax:
softmax_acc = self.eval_softmax(
epochs=val_epochs, lr=val_lr, beta1=val_beta1, per_class=val_samples
)
softmax_mapped_acc = self.eval_softmax(
epochs=val_epochs,
lr=val_lr,
beta1=val_beta1,
per_class=val_samples,
use_mapper=True,
)
accs['softmax'] = softmax_acc
accs['mapped-softmax'] = softmax_mapped_acc
print('Evaluation: ', end='')
print(
' '.join(
[
'{} {}'.format(
key.title().replace('-', ' '),
' '.join(['{:.1f}%'.format(acc * 100) for acc in accs[key]]),
)
for key in accs
]
)
)
evals[episode + 1] = accs
return evals
def eval(
self, episodes=0, shot=None, train_shot=None, fsl_lr=5e-5, fsl_beta1=0.5, real_support=True
):
"""Evaluates generator in the desired ZSL setting
within the Z2FSL framework.
Args:
episodes (`int`, optional): number of episodes to finetune FSL classifier,
default `0`.
shot (`int` | `None`, optional): shot in test episode of unseen classes,
default is same as training shot.
train_shot (`int` | `None`, optional): shot in test episode of seen classes,
default is same as `shot`.
fsl_lr (`float`, optional): learning rate of FSL classifier, default `5e-5`.
fsl_beta1 (`float`, optional): beta1 of FSL classifier, default `0.5`.
real_support (`bool`, optional): whether to use real support in GZSL.
Returns:
`float` accuracy.
"""
self.vae.eval()
set_parameter_requires_grad(self.vae, False)
if shot is None:
shot = self.shot
if train_shot is None:
train_shot = shot
eval_episode_factory = FeatureEpisodeFactory(
split=self.eval_split,
stat_split=self.train_split,
dataset_dir=self.episode_factory_kwargs['dataset_dir'],
feature_dir=self.episode_factory_kwargs['feature_dir'],
proposed_splits=self.episode_factory_kwargs['proposed_splits'],
l2_attr_norm=self.episode_factory_kwargs['l2_attr_norm'],
feature_norm=self.episode_factory_kwargs['feature_norm'],
augmentation=False,
)
features, attributes, _ = eval_episode_factory()
if self.generalized:
train_episode_factory = FeatureEpisodeFactory(
split=self.train_split,
inner_split='test',
dataset_dir=self.episode_factory_kwargs['dataset_dir'],
feature_dir=self.episode_factory_kwargs['feature_dir'],
proposed_splits=self.episode_factory_kwargs['proposed_splits'],
l2_attr_norm=self.episode_factory_kwargs['l2_attr_norm'],
feature_norm=self.episode_factory_kwargs['feature_norm'],
augmentation=False,
generalized=True,
)
seen_test_features, train_attributes, train_ids = train_episode_factory()
if real_support:
episode_factory = FeatureEpisodeFactory(
split=self.train_split,
inner_split='train',
generalized=True,
augmentation=False,
dataset_dir=self.episode_factory_kwargs['dataset_dir'],
feature_dir=self.episode_factory_kwargs['feature_dir'],
proposed_splits=self.episode_factory_kwargs['proposed_splits'],
l2_attr_norm=self.episode_factory_kwargs['l2_attr_norm'],
feature_norm=self.episode_factory_kwargs['feature_norm'],
)
seen_train_features, _, train_ids_perm = episode_factory()
# align train and test seen-classes features
seen_train_features = [
seen_train_features[train_ids_perm.index(tid)][:train_shot] for tid in train_ids
]
fsl_classifier = deepcopy(self.fsl_classifier) # copy to finetune
set_parameter_requires_grad(fsl_classifier, True)
###########################
### finetuning fsl algo ###
fsl_optimizer = optim.Adam(fsl_classifier.parameters(), lr=fsl_lr, betas=(fsl_beta1, 0.999))
criterion = EpisodeCrossEntropyLoss()
for _ in range(episodes):
rand_inds = torch.randperm(len(attributes)).numpy()[: self.way]
support = []
query = []
for i in rand_inds:
class_attributes = attributes[i].repeat(self.shot, 1).to(self.device)
noise = torch.randn(self.shot, self.noise_dim, device=self.device)
support.append(self.vae.decode(class_attributes, noise))
class_attributes = attributes[i].repeat(self.queries_per, 1).to(self.device)
noise = torch.randn(self.queries_per, self.noise_dim, device=self.device)
query.append(self.vae.decode(class_attributes, noise))
logits = fsl_classifier(support, query)
clsf_loss = criterion(logits)
fsl_optimizer.zero_grad()
clsf_loss.backward()
fsl_optimizer.step()
### finetuning fsl algo ###
###########################
fsl_classifier.eval()
set_parameter_requires_grad(fsl_classifier, False)
support = []
for class_attributes in attributes:
class_attributes = class_attributes.repeat(shot, 1).to(self.device)
noise = torch.randn(shot, self.noise_dim, device=self.device)
support.append(self.vae.decode(class_attributes, noise))
if self.generalized:
features.extend(seen_test_features)
if real_support:
seen_train_features = [
class_features.to(self.device) for class_features in seen_train_features
]
support.extend(seen_train_features)
else:
for class_attributes in train_attributes:
class_attributes = class_attributes.repeat(train_shot, 1).to(self.device)
noise = torch.randn(train_shot, self.noise_dim, device=self.device)
support.append(self.vae.decode(class_attributes, noise))
features = [class_features.to(self.device) for class_features in features]
logits = fsl_classifier(support, features)
accs = []
for i, class_logits in enumerate(logits):
preds = class_logits.cpu().argmax(dim=-1)
corr = (preds == i).sum().item()
tot = preds.size(0)
accs.append(corr / tot)
self.vae.train()
set_parameter_requires_grad(self.vae, True)
if self.generalized:
split_dim = len(features) - len(seen_test_features)
acc_u = sum(accs[:split_dim]) / len(accs[:split_dim])
acc_s = sum(accs[split_dim:]) / len(accs[split_dim:])
acc = 2 * acc_s * acc_u / (acc_s + acc_u)
return [acc, acc_s, acc_u]
return [sum(accs) / len(accs)]
def eval_softmax(self, epochs, lr, beta1, per_class, weight_decay=0, use_mapper=False):
"""Evaluates generator in the desired ZSL setting
within the Z2FSL framework.
Args:
epochs (`int`): number of epochs to finetune FSL classifier.
lr (`float`): learning rate of linear classifier.
beta1 (`float`): beta1 of linear classifier, default `0.5`.
per_class (`int`): number of generations for unseen classes.
weight_decay (`float`, optional): weight decay (L2 regularization), default `0`.
use_mapper (`bool`, optional): use FSL classifier's net to map features to
its space, default `False`.
Returns:
`float` accuracy.
"""
if use_mapper:
mapper = deepcopy(self.fsl_classifier.mapper)
mapper.eval()
set_parameter_requires_grad(mapper, False)
else:
mapper = nn.Identity().to(self.device)
self.vae.eval()
set_parameter_requires_grad(self.vae, False)
eval_episode_factory = FeatureEpisodeFactory(
split=self.eval_split,
stat_split=self.train_split,
dataset_dir=self.episode_factory_kwargs['dataset_dir'],
feature_dir=self.episode_factory_kwargs['feature_dir'],
proposed_splits=self.episode_factory_kwargs['proposed_splits'],
l2_attr_norm=self.episode_factory_kwargs['l2_attr_norm'],
feature_norm=self.episode_factory_kwargs['feature_norm'],
augmentation=False,
generalized=self.generalized,
)
features, attributes, class_ids = eval_episode_factory()
if self.generalized:
train_episode_factory = FeatureEpisodeFactory(
split=self.train_split,
inner_split='test',
dataset_dir=self.episode_factory_kwargs['dataset_dir'],
feature_dir=self.episode_factory_kwargs['feature_dir'],
proposed_splits=self.episode_factory_kwargs['proposed_splits'],
l2_attr_norm=self.episode_factory_kwargs['l2_attr_norm'],
feature_norm=self.episode_factory_kwargs['feature_norm'],
augmentation=False,
generalized=True,
)
seen_test_features, _, train_class_ids = train_episode_factory()
train_class_ids = [cid + len(class_ids) for cid in train_class_ids]
episode_factory = FeatureEpisodeFactory(
split=self.train_split,
inner_split='train',
dataset_dir=self.episode_factory_kwargs['dataset_dir'],
feature_dir=self.episode_factory_kwargs['feature_dir'],
proposed_splits=self.episode_factory_kwargs['proposed_splits'],
l2_attr_norm=self.episode_factory_kwargs['l2_attr_norm'],
feature_norm=self.episode_factory_kwargs['feature_norm'],
augmentation=False,
generalized=True,
)
seen_train_features, _, train_class_ids_perm = episode_factory()
train_class_ids_perm = [cid + len(class_ids) for cid in train_class_ids_perm]
# align train and test seen-classes features
seen_train_features = [
seen_train_features[train_class_ids_perm.index(tid)] for tid in train_class_ids
]
features.extend(seen_test_features)
# class_ids.extend(train_class_ids)
attributes = torch.stack(attributes).to(self.device)
attributes = attributes.repeat_interleave(per_class, dim=0)
noise = torch.randn(attributes.size(0), self.noise_dim, device=self.device)
gen_features = self.vae.decode(attributes, noise)
gen_features = mapper(gen_features)
gen_ids = torch.tensor(class_ids).repeat_interleave(per_class, dim=0)
train_features = gen_features.to('cpu')
train_ids = gen_ids
n_classes = eval_episode_factory.n_classes
if self.generalized:
n_classes += train_episode_factory.n_classes
seen_ids = torch.cat(
[
torch.tensor([sid]).repeat(feats.size(0))
for sid, feats in zip(train_class_ids, seen_train_features)
]
)
train_ids = torch.cat([train_ids, seen_ids])
seen_train_features = mapper(torch.cat(seen_train_features).to(self.device)).to('cpu')
train_features = torch.cat([train_features, seen_train_features])
train_dl = torch.utils.data.DataLoader(
torch.utils.data.TensorDataset(train_features, train_ids),
batch_size=128,
shuffle=True,
)
classifier = nn.Linear(train_features.size(1), n_classes).to(self.device)
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(
classifier.parameters(), lr=lr, weight_decay=weight_decay, betas=(beta1, 0.999)
)
for _ in range(epochs):
for feats, lbls in train_dl:
feats, lbls = feats.to(self.device), lbls.to(self.device)
logits = classifier(feats)
loss = criterion(logits, lbls)
optimizer.zero_grad()
loss.backward()
optimizer.step()
classifier.eval()
set_parameter_requires_grad(classifier, False)
test_ids = []
test_features = []
if self.generalized:
class_ids.extend(train_class_ids)
for feats, cids in zip(features, class_ids):
test_ids.append(torch.tensor([cids] * feats.size(0)))
test_features.append(mapper(feats.to(self.device)).to('cpu'))
accs = []
for feats, lbls in zip(test_features, test_ids):
feats, lbls = feats.to(self.device), lbls.to(self.device)
preds = classifier(feats).argmax(dim=-1)
accs.append((preds == lbls).sum().item() / feats.size(0))
self.vae.train()
set_parameter_requires_grad(self.vae, True)
if self.generalized:
split_dim = len(features) - len(seen_test_features)
acc_u = sum(accs[:split_dim]) / len(accs[:split_dim])
acc_s = sum(accs[split_dim:]) / len(accs[split_dim:])
acc = 2 * acc_s * acc_u / (acc_s + acc_u)
return [acc, acc_s, acc_u]
return [sum(accs) / len(accs)]
def test_setting(
generalized,
pretrained_fsl,
pretrained_dir,
dataset_dir,
feature_dir,
proposed_splits,
l2_attr_norm,
feature_norm,
augmentation,
gen_learning_rates,
fsl_learning_rates,
metadata_dim,
gen_beta1s,
fsl_beta1s,
generator_hidden_layers,
critic_hidden_layers,
classifier_hidden_layers,
embedding_dims,
mix,
ways,
shots,
queries_pers,
lambda_z2fsl,
lambda_wgan,
lambda_vae,
device,
val_epochs,
val_samples,
train_val_samples,
episodes,
eval_interval,
train_fsl,
real_support,
testing,
val_lr=1e-3,
val_beta1=0.5,
log_fn=None,
grid_search=True,
softmax=True,
):
"""Performs (optional) grid search and (optional) evaluation.
For description of args, see `Z2FSL_VAEGAN_Trainer` and its methods.
`pretrained_fsl`, `generalized`, `gen_learning_rates`, `gen_beta1s`,
`fsl_learning_rates`, `fsl_beta1s`, `generator_hidden_layers`,
`classifier_hidden_layers`, `critic_hidden_layers`, `lambda_z2fsl`,
`l2_attr_norm`, `proposed_splits`, `augmentation`, `ways`, `shots`, `queries_pers`,
`embedding_dims`, `train_fsl`, `mix`, `lambda_wgan`, `lambda_vae`, `real_support`
should be lists so that the grid search can search through."""
if log_fn is not None:
dire = os.path.split(log_fn)[0]
if dire and not os.path.exists(dire):
os.makedirs(dire)
search_params = dict(
pretrained_fsl=pretrained_fsl,
generalized=generalized,
gen_lr=gen_learning_rates,
gen_beta1=gen_beta1s,
fsl_lr=fsl_learning_rates,
fsl_beta1=fsl_beta1s,
generator_hidden_layers=generator_hidden_layers,
classifier_hidden_layers=classifier_hidden_layers,
critic_hidden_layers=critic_hidden_layers,
lambda_z2fsl=lambda_z2fsl,
l2_attr_norm=l2_attr_norm,
proposed_splits=proposed_splits,
augmentation=augmentation,
way=ways,
shot=shots,
queries_per=queries_pers,
embedding_dim=embedding_dims,
train_fsl=train_fsl,
mix=mix,
feature_norm=[feature_norm],
lambda_wgan=lambda_wgan,
lambda_vae=lambda_vae,
real_support=real_support,
)
max_accs = dict()
best_eps = dict()
best_params = dict()
best_train_samples = train_val_samples[0] # only for more-shots, no need for dict
param_grid = ParameterGrid(search_params)
if grid_search or len(param_grid) > 1:
for i, params in enumerate(param_grid):
init_msg = 'Running {}/{} {}'.format(i + 1, len(param_grid), params)
print('#' * len(init_msg) + '\n' + init_msg + '\n')
n_critic = 5 if params['lambda_wgan'] > 0 else 0
trainer = Z2FSL_VAEGAN_Trainer(
train_split='train',
eval_split='val',
metadata_dim=metadata_dim,
generator_hidden_layers=params['generator_hidden_layers'],
classifier_hidden_layers=params['classifier_hidden_layers'],
critic_hidden_layers=params['critic_hidden_layers'],
embedding_dim=params['embedding_dim'],
lambda_wgan=params['lambda_wgan'],
way=params['way'],
shot=params['shot'],
queries_per=params['queries_per'],
pretrained_fsl_dir=pretrained_dir if params['pretrained_fsl'] else '',
lambda_z2fsl=params['lambda_z2fsl'],
lambda_vae=params['lambda_vae'],
dataset_dir=dataset_dir,
feature_dir=feature_dir,
feature_norm=feature_norm,
proposed_splits=params['proposed_splits'],
l2_attr_norm=params['l2_attr_norm'],
device=device,
train_fsl=params['train_fsl'],
augmentation=params['augmentation'],
generalized=params['generalized'],
)
evals = trainer.train(
episodes=episodes,
eval_interval=eval_interval,
print_interval=50,
gen_lr=params['gen_lr'],
gen_beta1=params['gen_beta1'],
fsl_lr=params['fsl_lr'],
fsl_beta1=params['fsl_beta1'],
val_epochs=val_epochs,
val_samples=val_samples,
train_val_samples=train_val_samples,
val_beta1=val_beta1,
val_lr=val_lr,
mix=params['mix'],
real_support=params['real_support'],
softmax=softmax,
n_critic=n_critic,
)
# renew best measurements
for eps in evals:
# check if best result in every category
for method in evals[eps]:
try:
method_splits = method.split('-')
# check if method name has train_val_samples at the end
train_samples = int(method_splits[-1])
actual_method = '-'.join(method_splits[:-1])
except:
train_samples = best_train_samples # make sure max value doesnt change
actual_method = method
if evals[eps][method][0] > max_accs.get(actual_method, [-1])[0]:
max_accs[actual_method] = evals[eps][method]
best_eps[actual_method] = eps
best_params[actual_method] = params
best_train_samples = train_samples
if log_fn is not None and evals:
with open(log_fn, 'a') as lfp:
lfp.write('#' * len(str(params)) + '\n' + str(params) + '\n')
lfp.write(','.join(['eps'] + list(max_accs)) + '\n') # get eval method names
for eps in evals:
lfp.write(str(eps) + ',')
lfp.write(
','.join(
[
'-'.join(['{:.5f}'.format(acc) for acc in evals[eps][method]])
for method in evals[eps]
]
)
)
lfp.write('\n')
lfp.write('#' * len(str(params)) + '\n')
best_val_msg = []
for method in max_accs:
best_msg_params = {'eps': best_eps[method]}
if generalized:
best_msg_params.update(
{'train_samples': best_train_samples, 'val_samples': val_samples}
)
best_msg_params.update(best_params[method])
best_val_msg.append(
'{}:{}={}'.format(
method,
best_msg_params,
'-'.join(['{:.5f}'.format(acc) for acc in max_accs[method]]),
)
)
delim = '^' * len(best_val_msg[0])
best_val_msg = [delim] + best_val_msg + [delim]
best_val_msg = '\n'.join(best_val_msg) + '\n'
print(best_val_msg)
if log_fn is not None:
with open(log_fn, 'a') as lfp:
lfp.write(best_val_msg)
else:
best_params = {testing: next(iter(param_grid))}
best_eps = {testing: episodes}
if testing is not None or (not grid_search and len(param_grid) == 1):
params = best_params[testing]
eps = best_eps[testing]
best_msg_params = {'eps': eps}
if generalized:
best_msg_params.update(
{'train_samples': best_train_samples, 'val_samples': val_samples}
)
best_msg_params.update(params)
best_msg = 'Running best params {}'.format(best_msg_params)
print(best_msg + '\n' + '$' * len(best_msg))
n_critic = 5 if params['lambda_wgan'] > 0 else 0
trainer = Z2FSL_VAEGAN_Trainer(
train_split='trainval',
eval_split='test',
metadata_dim=metadata_dim,
generator_hidden_layers=params['generator_hidden_layers'],
classifier_hidden_layers=params['classifier_hidden_layers'],
critic_hidden_layers=params['critic_hidden_layers'],
embedding_dim=params['embedding_dim'],
lambda_wgan=params['lambda_wgan'],
way=params['way'],
shot=params['shot'],
queries_per=params['queries_per'],
pretrained_fsl_dir=pretrained_dir if params['pretrained_fsl'] else '',
lambda_z2fsl=params['lambda_z2fsl'],
lambda_vae=params['lambda_vae'],
dataset_dir=dataset_dir,
feature_dir=feature_dir,
feature_norm=feature_norm,
proposed_splits=params['proposed_splits'],
l2_attr_norm=params['l2_attr_norm'],
device=device,
train_fsl=params['train_fsl'],
augmentation=params['augmentation'],
generalized=params['generalized'],
)
evals = trainer.train(
episodes=eps,
eval_interval=eps,
print_interval=50,
gen_lr=params['gen_lr'],
gen_beta1=params['gen_beta1'],
fsl_lr=params['fsl_lr'],
fsl_beta1=params['fsl_beta1'],
val_epochs=val_epochs,
val_samples=val_samples,
train_val_samples=[best_train_samples],
val_beta1=val_beta1,
val_lr=val_lr,
mix=params['mix'],
real_support=params['real_support'],
softmax=softmax,
n_critic=n_critic,
)
if log_fn is not None:
eval_msg = 'Evaluation {}={}\n'.format(
best_msg_params, ','.join([str(acc) for acc in evals[eps].values()])
)
with open(log_fn, 'a') as lfp:
lfp.write(eval_msg)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-ms', '--manual_seed', type=int, help='set random seed')
parser.add_argument(
'-dt',
'--direct_testing',
default=False,
action='store_true',
help='whether to directly run test if only one set of params is given',
)
parser.add_argument(
'-test',
'--testing',
type=str,
choices=['normal', 'more-shots', 'finetune', 'softmax', 'mapped-softmax'],
help='if set, evaluates best configuration on test',
)
parser.add_argument(
'-gen',
'--generalized',
type=str2bool,
nargs='+',
default=[False],
help='whether to run GZSL',
)
parser.add_argument(
'-pfsl',
'--pretrained_fsl',
type=str2bool,
nargs='+',
default=[True],
help='whether to use pretrained prototypical net',
)
parser.add_argument(
'-fsld',
'--pretrained_fsl_dir',
type=str,
default='models',
help='directory of pretrained fsls.',
)
parser.add_argument('-md', '--metadata_dim', type=int, help='dimensionality of metadata')
parser.add_argument(
'-tfsl',
'--train_fsl',
type=str2bool,
nargs='+',
default=[False],
help='whether to train the fsl algo',
)
parser.add_argument(
'-dd', '--dataset_dir', type=str, default='CUB_200_2011', help='dataset root directory'
)
parser.add_argument(
'-fd',
'--feature_dir',
type=str,
default='features',
help='feature directory wrt dataset_dir',
)
parser.add_argument(
'-fn',
'--feature_norm',
default=False,
action='store_true',
help='whether to normalize features',
)
parser.add_argument(
'-ps',
'--proposed_splits',
type=str2bool,
nargs='+',
default=[True],
help='whether to use proposed splits',
)
parser.add_argument(
'-l2',
'--l2_attr_norm',
type=str2bool,
nargs='+',
default=[True],
help='whether to perform l2 norm on individual attributes',
)
parser.add_argument(
'-rs',
'--real_support',
type=str2bool,
nargs='+',
default=[True],
help='whether support of seen classes is real or generated',
)
parser.add_argument(
'-aug',
'--augmentation',
type=str2bool,
nargs='+',
default=[True],
help=('whether to use data augmentation ' '(not available for proposed features)'),
)
parser.add_argument('-l', '--log_fn', type=str, help='file to log val metrics')
parser.add_argument(
'-glr',
'--gen_learning_rates',
type=float,
nargs='+',
default=[1e-4],
help='wgan learning rates to search over',
)
parser.add_argument(
'-flr',
'--fsl_learning_rates',
type=float,
nargs='+',
default=[1e-4],
help='fsl learning rates to search over',
)
parser.add_argument(
'-gb1',
'--gen_beta1s',
type=float,
nargs='+',
default=[
0.5,
],
help='wgan beta_1 coefficients of Adam to search over',
)
parser.add_argument(
'-fb1',
'--fsl_beta1s',
type=float,
nargs='+',
default=[
0.5,
],
help='fsl beta_1 coefficients of Adam to search over',
)
parser.add_argument(
'-chl',
'--critic_hidden_layers',
type=str,
nargs='+',
default=['4096'],
help=(
'per layer hidden neurons for the critic to search over'
', integers split by dashes for multiple hidden layers'
),
)
parser.add_argument(
'-ghl',
'--generator_hidden_layers',
type=str,
nargs='+',
default=['4096'],
help=(
'per layer hidden neurons for the generator to search over'
', integers split by dashes for multiple hidden layers'
),
)
parser.add_argument(
'-clsfhl',
'--classifier_hidden_layers',
type=str,
nargs='+',
default=['2048'],
help=(
'per layer hidden neurons for the fsl algo to search over'
', integers split by dashes for multiple hidden layers'
),
)
parser.add_argument(
'-emb',
'--embedding_dims',
type=int,
nargs='+',
default=[2048],
help='embedding dim for protonet (fsl algo)',
)
parser.add_argument(
'-mix',
dest='mix',
type=str2bool,
nargs='+',
default=[False],
help='whether to mix support and queries during training',
)
parser.add_argument(
'-s',
'--shots',
type=int,
nargs='+',
default=[
10,
],
help='samples per class during training',
)
parser.add_argument(
'-w',
'--ways',
type=int,
nargs='+',
default=[
20,
],
help='number of classes per episode',
)
parser.add_argument(
'-qp',
'--queries_pers',
type=int,
nargs='+',
default=[
10,
],
help='number of samples to fetch per class',
)
parser.add_argument(
'-z2f',
'--lambda_z2fsl',
type=float,
nargs='+',
default=[0],
help='classifier loss coefficients to search over',
)
parser.add_argument(
'-wgan',
'--lambda_wgan',
type=float,
nargs='+',
default=[1000],
help='wgan loss coefficients to search over',
)
parser.add_argument(
'-vae',
'--lambda_vae',
type=float,
nargs='+',
default=[1],
help='vae loss coefficients to search over',
)
parser.add_argument(
'-vs',
'--val_samples',
type=int,
default=500,
help='number of samples per class to generate during testing',
)
parser.add_argument(
'-tvs',
'--train_val_samples',
type=int,
nargs='+',
help='number of samples per class for seen classes during testing',
)
parser.add_argument(
'-ve',
'--val_epochs',
type=int,
default=25,
help='number of epochs to train on synthetic data',
)
parser.add_argument(
'-vlr', '--val_lr', type=float, default=1e-3, help='learning rate of synthetic classifier'
)
parser.add_argument(
'-vb1',
'--val_beta1',
type=float,
default=0.5,
help='beta1 coefficient of Adam for synthetic classifier',
)
parser.add_argument('-dvc', '--device', type=str, default='cuda:0', help='device to run on')
parser.add_argument('-eps', '--episodes', type=int, default=15000, help='episodes to run')
parser.add_argument(
'-eval',
'--eval_interval',
type=int,
default=1000,
help='per how many episodes to evaluate model',
)
parser.add_argument(
'-soft',
'--softmax',
default=False,
action='store_true',
help='whether to run softmax or not',
)
args = parser.parse_args()
if args.manual_seed is not None:
manual_seed(args.manual_seed)
ds_name = dataset_name(args.dataset_dir)
if args.metadata_dim is None:
if ds_name == 'cub':
args.metadata_dim = 312
elif ds_name == 'awa2':
args.metadata_dim = 85
elif ds_name == 'sun':
args.metadata_dim = 102
if args.train_val_samples is None:
args.train_val_samples = [args.val_samples]
# correct if more that necessary options are provided
args.proposed_splits = list(set(args.proposed_splits))
args.l2_attr_norm = list(set(args.l2_attr_norm))
args.train_fsl = list(set(args.train_fsl))
args.generalized = list(set(args.generalized))
args.pretrained_fsl = list(set(args.pretrained_fsl))
args.real_support = list(set(args.real_support))
# just turn into lists
args.critic_hidden_layers = [
[int(n_h) for n_h in chl.split('-') if chl] for chl in args.critic_hidden_layers
]
args.generator_hidden_layers = [
[int(n_h) for n_h in chl.split('-') if chl] for chl in args.generator_hidden_layers
]
args.classifier_hidden_layers = [
[int(n_h) for n_h in chl.split('-') if chl] for chl in args.classifier_hidden_layers
]
if args.testing == 'softmax':
args.softmax = True
test_setting(
pretrained_fsl=args.pretrained_fsl,
pretrained_dir=args.pretrained_fsl_dir,
dataset_dir=args.dataset_dir,
grid_search=not args.direct_testing,
metadata_dim=args.metadata_dim,
feature_dir=args.feature_dir,
proposed_splits=args.proposed_splits,
l2_attr_norm=args.l2_attr_norm,
augmentation=args.augmentation,
feature_norm=args.feature_norm,
gen_learning_rates=args.gen_learning_rates,
fsl_beta1s=args.fsl_beta1s,
fsl_learning_rates=args.fsl_learning_rates,
gen_beta1s=args.gen_beta1s,
generator_hidden_layers=args.generator_hidden_layers,
classifier_hidden_layers=args.classifier_hidden_layers,
critic_hidden_layers=args.critic_hidden_layers,
embedding_dims=args.embedding_dims,
mix=args.mix,
ways=args.ways,
shots=args.shots,
queries_pers=args.queries_pers,
lambda_z2fsl=args.lambda_z2fsl,
lambda_vae=args.lambda_vae,
lambda_wgan=args.lambda_wgan,
device=args.device,
val_epochs=args.val_epochs,
val_samples=args.val_samples,
train_val_samples=args.train_val_samples,
episodes=args.episodes,
eval_interval=args.eval_interval,
val_lr=args.val_lr,
val_beta1=args.val_beta1,
log_fn=args.log_fn,
train_fsl=args.train_fsl,
generalized=args.generalized,
testing=args.testing,
real_support=args.real_support,
softmax=args.softmax,
)
| 57,772 | 36.248872 | 100 | py |
z2fsl | z2fsl-main/z2fsl/modules/feature_extractors.py | """Pre-trained visual feature extractors."""
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision.models import (
googlenet,
inception_v3,
resnet18,
resnet34,
resnet50,
resnet101,
resnet152,
vgg16_bn,
vgg11_bn,
vgg13_bn,
vgg19_bn,
)
class ImagenetNormalize(nn.Module):
"""Normalizes images of datasets from [-1, 1] to
ImageNet. Operates on batches rather than on individual
images. Make into a nn.Module so as to use in forward().
Note that values are solely based on ImageNet and Module
has no trainable parameters.
Attributes:
mean (`torch.nn.parameter.Parameter`): supposed mean to aleviate.
std (`torch.nn.parameter.Parameter`): supposed std to alleviate.
"""
def __init__(self):
"""Init."""
super().__init__()
# Parameter to follow module's device
# but require_grad False to keep const
self.mean = nn.Parameter(
torch.tensor((-0.03, -0.088, -0.188))[None, :, None, None],
requires_grad=False,
)
self.std = nn.Parameter(
torch.tensor((0.458, 0.448, 0.450))[None, :, None, None],
requires_grad=False,
)
def forward(self, x):
"""Forward propagation.
Args:
x(nn.Tensor): input batch.
Returns:
normalized x.
"""
return (x - self.mean) / self.std
GoogleNet_dim = 1024
ResNet_dim = 2048 # NOTE: only for default
VGG_dim = 20588
Inception_dim = 2048
class ResNetAvgpool(nn.Module):
"""ResNet feature extractor.
Features are extracted by Average Pooling layer of ResNets.
Note that requires_grad is kept as `True` (because we use
feature extractors as submodules in classifiers) and
torch.no_grad() should be used to disable differentiation.
Attributes:
features (`nn.Module`): net doing the feature extraction.
inp_trans (`nn.Module`): input transformation from dataset to
ImageNet.
"""
def __init__(self, resnet_id=101):
"""Init.
Args:
resnet_id (`int`, optional): ResNet version to load,
default=`50` (i.e. ResNet50).
"""
super().__init__()
assert resnet_id in (18, 34, 50, 101, 152), 'Invalid ResNet id'
resnet = globals()['resnet{}'.format(resnet_id)](pretrained=True, progress=False)
self.features = nn.Sequential(*list(resnet.children())[:-1])
self.inp_trans = ImagenetNormalize()
def forward(self, x):
"""Forward propagation.
Args:
x (`torch.Tensor`): input image.
Returns:
512-dimensional representation if ResNet{18, 34},
2048-dimensional representation else.
"""
x = self.inp_trans(x)
x = self.features(x)
x = x.view(x.size(0), -1) # 512, 512, 2048, 2048, 2048
return x
class VGGAvgpool(nn.Module):
"""VGG feature extractor.
Features are extracted from Average pooling layer of VGGs.
Note that requires_grad is kept as `True` (because we use
feature extractors as submodules in classifiers) and
torch.no_grad() should be used to disable differentiation.
Attributes:
features (`nn.Module`): net doing the feature extraction.
inp_trans (`nn.Module`): input transformation.
"""
def __init__(self, vgg_year=16):
"""Init.
Args:
vgg_year(int, optional): VGG of which year,
default=`16` (as in 2016).
"""
super().__init__()
assert vgg_year in (11, 13, 16, 19), 'Invalid VGG year'
vgg = globals()['vgg{}_bn'.format(vgg_year)](pretrained=True, progress=False)
self.features = nn.Sequential(*list(vgg.children())[:2])
self.inp_trans = ImagenetNormalize()
def forward(self, x):
"""Forward propagation.
Args:
x (`torch.Tensor`): input image.
Returns:
2048-dimensional representation.
"""
x = self.inp_trans(x)
x = self.features(x)
x = x.view(x.size(0), -1) # 2048
return x
class Inception3Avgpool(nn.Module):
"""Inception v3 feature extractor.
Features are extracted from Average pooling layer of Inception v3.
Note that requires_grad is kept as `True` (because we use
feature extractors as submodules in classifiers) and
torch.no_grad() should be used to disable differentiation.
Attributes:
full_model (`nn.Module`): the whole is kept because
of F functions used in original forward impl.
inp_trans (`nn.Module`): input transformation.
"""
def __init__(self):
"""Init."""
super().__init__()
self.features = inception_v3(pretrained=True, progress=False)
self.inp_trans = ImagenetNormalize()
def forward(self, x):
"""Forward propagation.
Args:
x (`torch.Tensor`): input image.
Returns:
2048-dimensional representation.
"""
x = self.inp_trans(x)
x = self.features.Conv2d_1a_3x3(x)
x = self.features.Conv2d_2a_3x3(x)
x = self.features.Conv2d_2b_3x3(x)
x = F.max_pool2d(x, kernel_size=3, stride=2)
x = self.features.Conv2d_3b_1x1(x)
x = self.features.Conv2d_4a_3x3(x)
x = F.max_pool2d(x, kernel_size=3, stride=2)
x = self.features.Mixed_5b(x)
x = self.features.Mixed_5c(x)
x = self.features.Mixed_5d(x)
x = self.features.Mixed_6a(x)
x = self.features.Mixed_6b(x)
x = self.features.Mixed_6c(x)
x = self.features.Mixed_6d(x)
x = self.features.Mixed_6e(x)
x = self.features.Mixed_7a(x)
x = self.features.Mixed_7b(x)
x = self.features.Mixed_7c(x)
x = F.adaptive_avg_pool2d(x, (1, 1))
x = x.view(x.size(0), -1) # 2048
return x
class GoogleNetAvgpool(nn.Module):
"""GoogleNet feature extractor.
Features are extracted from Average pooling layer of GoogleNet.
Note that requires_grad is kept as `True` (because we use
feature extractors as submodules in classifiers) and
torch.no_grad() should be used to disable differentiation.
Attributes:
features (`nn.Module`): net doing the feature extraction.
inp_trans (`nn.Module`): input transformation.
"""
def __init__(self):
"""Init."""
super().__init__()
glenet = googlenet(pretrained=True, progress=False)
self.features = nn.Sequential(*list(glenet.children())[:-2])
self.inp_trans = ImagenetNormalize()
def forward(self, x):
"""Forward propagation.
Args:
x (`torch.Tensor`): input image.
Returns:
1024-dimensional representation.
"""
x = self.inp_trans(x)
x = self.features(x)
x = x.view(x.size(0), -1) # 1024
return x
| 7,014 | 27.40081 | 89 | py |
z2fsl | z2fsl-main/z2fsl/modules/classifiers.py | """Supervised Classifiers."""
import torch
import torch.nn as nn
from z2fsl.utils.submodules import MLP
class PrototypicalNet(nn.Module):
"""Classifies examples based on distance metric
from class prototypes. FSL setting.
Attributes:
mapper (`nn.Module`): mapper from feature to
embedding space.
dist (function): distance function. Accepts
2D torch.Tensor prototypes and 2D
torch.Tensor queries and returns a 2D torch.Tensor
whose [i, j] element is the distance between
queries[i] nad prototypes[j].
"""
def __init__(
self,
in_features=512,
out_features=512,
hidden_layers=None,
dist='euclidean',
init_diagonal=False,
):
"""Init.
Args:
in_features (`int`, optional): input features dimension,
default=`512`.
out_features (`int`, optional): final output dimension,
default=`512`.
hidden_layers (`list` | `None`, optional): number of neurons
in hidden layers, default is one hidden
layer with units same as input.
dist (function | `str`, optional): distance metric. If str,
predefined distance is used accordingly.
If function is passed, it should accept
2D torch.Tensor prototypes and 2D
torch.Tensor queries and return a 2D torch.Tensor
whose [i, j] element is the distance between
queries[i] nad prototypes[j].
init_diagonal (`bool`, optional): whether to init linear layers
with diagonal weights and zero biases, default=`False`.
"""
super().__init__()
if hidden_layers is None:
hidden_layers = [in_features]
self.mapper = MLP(in_features, out_features, hidden_layers, hidden_actf=nn.ReLU())
if init_diagonal:
self.mapper.init_diagonal()
if isinstance(dist, str):
self.dist = self.__getattribute__(dist)
else:
self.dist = dist
@staticmethod
def cosine(prototypes, queries):
"""Computes cosine distance between prototypes
and set of queries.
Args:
prototypes (`torch.Tensor`): prototypes of size
(way, embedding_dim).
queries (`torch.Tensor`): queries of size
(n_queries, embedding_dim).
Returns:
A torch.Tensor of size (n_queries, way) where
element [i,j] contains distance between queries[i]
and prototypes[j].
"""
inner_prod = queries.matmul(prototypes.T)
norm_i = queries.norm(dim=1, keepdim=True)
norm_j = prototypes.norm(dim=1, keepdim=True).T
return 1 - inner_prod / norm_i / norm_j
@staticmethod
def euclidean(prototypes, queries):
"""Computes euclidean distance between prototypes
and set of queries.
Args:
prototypes (`torch.Tensor`): prototypes of size
(way, embedding_dim).
queries (`torch.Tensor`): queries of size
(n_queries, embedding_dim).
Returns:
A torch.Tensor of size (n_queries, way) where
element [i,j] contains distance between queries[i]
and prototypes[j].
"""
way = prototypes.size(0)
n_queries = queries.size(0)
prototypes = prototypes.repeat(n_queries, 1)
queries = queries.repeat_interleave(way, 0)
# after the repeats, prototypes have way classes after way classes after ...
# and queries have way repeats of 1st query, way repeats of 2nd query, ...
# so initial dist vector has distance of first query to all way classes
# then the distance of the second query to all way class, etc
return torch.norm(prototypes - queries, dim=1).view(n_queries, way)
def forward(self, support, query, mix=False):
"""Episodic forward propagation.
Computes prototypes given the support set of an episode
and then makes inference on the corresponding query set.
Args:
support (`list` of `torch.Tensor`s): support set list
whose every element is tensor of size
(shot, feature_dim), i.e. shot image features
belonging to the same class.
query(`list` of `torch.Tensor`s): query set list
whose every element is tensor of size
(n_queries, feature_dim), i.e. n_queries image
features belonging to the same class (for consistency
purposes with support).
mix(`bool`, optional): mix support and query, default `False`.
Returns:
A list of torch.Tensor of size (n_queries, way) logits
whose i-th element consists of logits of queries belonging
to the i-th class.
"""
if mix:
mix_support = []
mix_query = []
shot = support[0].size(0)
for sup_feats, que_feats in zip(support, query):
feats = torch.cat((sup_feats, que_feats))
rand_idc = torch.randperm(len(feats)).numpy()
mix_support.append(feats[rand_idc[:shot]])
mix_query.append(feats[rand_idc[shot:]])
support = mix_support
query = mix_query
prototypes = []
for class_features in support:
# class_features are (shot, feature_dim)
prototypes.append(self.mapper(class_features).mean(dim=0))
prototypes = torch.stack(prototypes)
logits = []
for class_features in query:
# class_features are (n_queries, feature_dim)
logits.append(-self.dist(prototypes, self.mapper(class_features)))
return logits
| 5,942 | 34.586826 | 90 | py |
z2fsl | z2fsl-main/z2fsl/pretraining/fsl_diagonal.py | """Prototypical Net training."""
import os
import argparse
import torch
import torch.optim as optim
import torch.nn as nn
from sklearn.model_selection import ParameterGrid
from z2fsl.utils.losses import EpisodeCrossEntropyLoss
from z2fsl.modules.classifiers import PrototypicalNet
from z2fsl.utils.datasets import FeatureEpisodeFactory
from z2fsl.utils.general import (
configuration_filename,
str2bool,
set_parameter_requires_grad,
dataset_name,
)
from z2fsl.utils.submodules import GaussianNoiseAugmentation
class FSLTrainer:
"""PN trainer class.
Attributes:
fsl_classifier (`nn.Module`): Prototypical Net.
episode_factory_kwargs (`dict`): episode factory keywords.
train_split (`str`): which split to train on (basically
`'train'` or `'trainval'`).
eval_split (`str`): which split to evaluate on (basically
`'val'` or `'test'`).
generalized (`bool`): whether to train and evaluate on GZSL.
way (`int`): way of training episodes.
shot (`int`): shot of training episodes (provided by generator).
queries_per (`int`): number of samples per class in query set.
device (`str`): device to run on.
noise_aug (`nn.Module`): Gaussian noise augmentation.
"""
def __init__(
self,
dataset_dir,
feature_dir='features',
train_split='traintrain',
eval_split='valtrain',
proposed_splits=True,
augmentation=False,
feature_norm=True,
feature_dim=2048,
way=20,
shot=10,
queries_per=10,
init_diagonal=True,
n_hidden=0,
device='cuda:0',
noise_std=0,
generalized=False,
):
"""Init.
Args:
dataset_dir (`str`): root dir of dataset.
feature_dir (`str`, optional): directory of features relative to `dataset_dir`.
proposed_splits (`bool`, optional): whether to use proposed splits,
default `True`.
augmentation (`bool`, optional): whether ot use augmentation, default `True`.
feature_norm (`bool`, optional): whether to minmax normalize features,
default `True`.
train_split (`str`, optional): which split to train on (basically
`'train'` or `'trainval'`), default `'train'`.
eval_split (`str`, optional): which split to evaluate on (basically
`'val'` or `'test'`), default `'val'`.
generalized (`bool`, optional): whether to train and evaluate on GZSL,
default `False`.
feature_dim (`int`, optional): dimension of input features, default `2048`.
n_hidden (`int`, optional): number of hidden layers, default `0`.
way (`int`, optional): way of training episodes, default `20`.
shot (`int`, optional): shot of training episodes (provided by generator),
default `5`.
queries_per (`int`, optional): number of samples per class in query set,
default `10`.
init_diagonal (`bool`, optional): whether to use PN intialization trick,
default `True`.
device (`str`, optional): device to run on, default `'cuda:0'`.
`train_fsl` (`bool`, optional): whether to train FSL classifier.
noise_std (`float`, optional): Gaussian noise std, default `0` (no noise).
"""
self.fsl_classifier = PrototypicalNet(
in_features=feature_dim,
out_features=feature_dim,
hidden_layers=n_hidden * [feature_dim],
init_diagonal=init_diagonal,
).to(device)
if noise_std > 0:
self.noise_aug = GaussianNoiseAugmentation(std=noise_std)
else:
self.noise_aug = nn.Identity()
self.noise_aug.to(device)
self.episode_factory_kwargs = dict(
dataset_dir=dataset_dir,
feature_dir=feature_dir,
feature_norm=feature_norm,
generalized=generalized,
proposed_splits=proposed_splits,
augmentation=augmentation,
)
self.train_split = train_split
self.eval_split = eval_split
self.generalized = generalized
self.way = way
self.shot = shot
self.queries_per = queries_per
self.device = device
def train(
self,
episodes=15000,
lr=1e-4,
beta1=0.5,
l2_coef=0,
lr_decay=False,
print_interval=30,
eval_interval=None,
):
"""Trains all components (FSL classifier, generator, encoder, discriminator)
and provides evaluation metrics.
Args:
episodes (`int`): number of training iterations.
lr (`float` | `None`, optional): FSL classifier's learning rate,
default `1e-4`.
beta1 (`float` | `None`, optional): FSL classifier's beta1 for Adam,
default `0.5`.
l2_coef (`float`, optional): weight decay, default `0`.
lr_decay (`bool`, optional): whether to apply learning rate decay,
default `False`.
print_interval (`int`, optional): episodes to wait before printing several
training losses, default `30`.
eval_interval (`int` | `None`, optional): episodes to wait before printing
evaluation metrics, default is at the end of training.
Returns:
`dict` iterations: accuracy key-value pairs, e.g.
{
1000: 0.34
2000: 0.54,
3000: 0.60
}
"""
if eval_interval is None:
eval_interval = episodes
episode_factory = FeatureEpisodeFactory(
split=self.train_split, inner_split='train', **self.episode_factory_kwargs
)
optimizer = optim.Adam(
self.fsl_classifier.parameters(), lr=lr, weight_decay=l2_coef, betas=(beta1, 0.999)
)
if lr_decay:
lr_decay = optim.lr_scheduler.MultiStepLR(
optimizer, milestones=range(eval_interval, episodes, eval_interval)
)
else:
lr_decay = optim.lr_scheduler.MultiplicativeLR(optimizer, lambda x: 1)
criterion = EpisodeCrossEntropyLoss()
evals = {}
for episode in range(episodes):
support, query, _ = episode_factory.fsl_episode(self.way, self.shot, self.queries_per)
support = [self.noise_aug(sup.to(self.device)) for sup in support]
query = [self.noise_aug(que.to(self.device)) for que in query]
logits = self.fsl_classifier(support, query)
loss = criterion(logits)
optimizer.zero_grad()
loss.backward()
optimizer.step()
lr_decay.step()
if episode == 0 or (episode + 1) % print_interval == 0:
correct = 0
total = 0
for i, class_logits in enumerate(logits):
preds = class_logits.cpu().argmax(dim=-1)
correct += (preds == i).sum().item()
total += preds.size(0)
acc = correct / total
print(
'Episode {} - Loss = {:.4f}, Accuracy = {:.2f}%'.format(
episode + 1, loss.item(), acc * 100
)
)
if (episode + 1) % eval_interval == 0:
evals[episode + 1] = self.eval()
return evals
def eval(self, shot=None, episodes=500):
"""Calculates test accuracy.
Args:
shot (`int` | `None`, optional): shot of test support, default is
same as training shot.
episodes (`int`, optional): number of episodes to average accuracy
over, default `500`.
Returns:
`float` accuracy.
"""
self.fsl_classifier.eval()
set_parameter_requires_grad(self.fsl_classifier, False)
if shot is None:
shot = self.shot
eval_episode_factory = FeatureEpisodeFactory(
split=self.eval_split,
stat_split=self.train_split,
dataset_dir=self.episode_factory_kwargs['dataset_dir'],
feature_dir=self.episode_factory_kwargs['feature_dir'],
proposed_splits=self.episode_factory_kwargs['proposed_splits'],
feature_norm=self.episode_factory_kwargs['feature_norm'],
augmentation=False,
generalized=self.generalized,
)
if self.generalized:
train_episode_factory = FeatureEpisodeFactory(
split=self.train_split,
inner_split='test',
dataset_dir=self.episode_factory_kwargs['dataset_dir'],
feature_dir=self.episode_factory_kwargs['feature_dir'],
proposed_splits=self.episode_factory_kwargs['proposed_splits'],
feature_norm=self.episode_factory_kwargs['feature_norm'],
augmentation=False,
generalized=self.generalized,
)
acc = 0
accs_s = 0
accs_u = 0
for _ in range(episodes):
if self.generalized:
n_classes = eval_episode_factory.n_classes + train_episode_factory.n_classes
eval_way = int(eval_episode_factory.n_classes ** 2 / n_classes)
train_way = eval_episode_factory.n_classes - eval_way
else:
n_classes = eval_episode_factory.n_classes
eval_way = n_classes
support, query, _ = eval_episode_factory.fsl_episode(
way=eval_way, shot=shot, queries_per=self.queries_per
)
if self.generalized:
_support, _query, _ = train_episode_factory.fsl_episode(
way=train_way, shot=shot, queries_per=self.queries_per
)
support.extend(_support)
query.extend(_query)
support = [sup.to(self.device) for sup in support]
query = [que.to(self.device) for que in query]
logits = self.fsl_classifier(support, query)
accs = []
for i, class_logits in enumerate(logits):
preds = class_logits.cpu().argmax(dim=-1)
accs.append((preds == i).sum().item() / len(preds))
if self.generalized:
acc_u = sum(accs[:eval_way]) / len(accs[:eval_way])
acc_s = sum(accs[eval_way:]) / len(accs[eval_way:])
accs_u += acc_u
accs_s += acc_s
acc += 2 * acc_s * acc_u / (acc_s + acc_u)
else:
acc += sum(accs) / len(accs)
self.fsl_classifier.train()
set_parameter_requires_grad(self.fsl_classifier, True)
if self.generalized:
return [acc / episodes, accs_u / episodes, accs_s / episodes]
return [acc / episodes]
def test_setting(
dataset_dir,
feature_dir,
feature_norm,
episodes,
eval_interval,
way,
shots,
queries_pers,
lrs,
beta1s,
weight_decays,
init_diagonals,
lr_decays,
n_hiddens,
proposed_splits,
device,
augmentation,
noise_stds,
generalized,
log_fn=None,
):
"""Performs (optional) grid search and (optional) evaluation.
For description of args, see `FSLTrainer` and its methods.
`shots`, `queries_pers`, `lrs`, `beta1s`, `init_diagonals`,
`lr_decays`, `n_hiddens`, `proposed_splits`, `weight_decays`,
`augmentation`, `noise_stds`, `generalized` should be lists
so that the grid search can search through."""
if log_fn is not None:
dire = os.path.split(log_fn)[0]
if dire and not os.path.exists(dire):
os.makedirs(dire)
search_params = dict(
shot=shots,
queries_per=queries_pers,
lr=lrs,
beta1=beta1s,
init_diagonal=init_diagonals,
lr_decay=lr_decays,
n_hidden=n_hiddens,
proposed_splits=proposed_splits,
weight_decay=weight_decays,
augmentation=augmentation,
feature_norm=[feature_norm],
noise_std=noise_stds,
generalized=generalized,
)
param_grid = ParameterGrid(search_params)
for i, params in enumerate(param_grid):
init_msg = 'Running {}/{}: {}'.format(i + 1, len(param_grid), params)
print(init_msg + '\n' + '#' * len(init_msg))
trainer = FSLTrainer(
dataset_dir=dataset_dir,
feature_dir=feature_dir,
way=way,
shot=params['shot'],
queries_per=params['queries_per'],
n_hidden=params['n_hidden'],
init_diagonal=params['init_diagonal'],
proposed_splits=params['proposed_splits'],
device=device,
augmentation=params['augmentation'],
feature_norm=feature_norm,
noise_std=params['noise_std'],
generalized=params['generalized'],
)
evals = trainer.train(
lr=params['lr'],
beta1=params['beta1'],
lr_decay=params['lr_decay'],
l2_coef=params['weight_decay'],
episodes=episodes,
eval_interval=eval_interval,
)
print()
for eps in evals:
print(
'Validation accuracy after {} episodes = {}'.format(
eps, ' '.join(['{:.1f}%'.format(acc * 100) for acc in evals[eps]])
)
)
print()
if log_fn is not None:
with open(log_fn, 'a') as lfp:
lfp.write(str(params) + '\n')
for eps in evals:
lfp.write(str(eps) + ',')
lfp.write(','.join(['{:.5f}'.format(acc) for acc in evals[eps]]))
lfp.write('\n')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'-dd', '--dataset_dir', type=str, default='CUB_200_2011', help='dataset root directory'
)
parser.add_argument(
'-fd',
'--feature_dir',
type=str,
default='normalized_proposed_features',
help='feature directory wrt dataset_dir',
)
parser.add_argument(
'-fn',
'--feature_norm',
default=False,
action='store_true',
help='whether to normalize features',
)
parser.add_argument(
'-gen',
'--generalized',
type=str2bool,
nargs='+',
default=[False],
help='whether to run GZSL',
)
parser.add_argument('-lr', '--learning_rates', type=float, default=1e-4, help='learning rate')
parser.add_argument('-w', '--way', type=int, default=10, help='number of classes per episode')
parser.add_argument('-s', '--shot', type=int, default=5, help='samples per class in support')
parser.add_argument(
'-qp', '--queries_pers', type=int, nargs='+', default=[5], help='queries per class in query'
)
parser.add_argument('-n', '--n_hidden', type=int, default=0, help='number of hidden layers')
parser.add_argument('-dvc', '--device', type=str, default='cuda:0', help='device to run on')
parser.add_argument(
'-spl',
'--split',
type=str,
choices=['train', 'trainval'],
default='trainval',
help='train split',
)
parser.add_argument('-eps', '--episodes', type=int, default=10000, help='episodes to run')
parser.add_argument('-mf', '--model_fn', type=str, required=True, help='model directory')
args = parser.parse_args()
trainer = FSLTrainer(
dataset_dir=args.dataset_dir,
way=args.way,
queries_per=args.queries_per,
shot=args.shot,
n_hidden=args.n_hidden,
train_split=args.split,
augmentation=True,
generalized=args.generalized,
feature_dir=args.feature_dir,
proposed_splits=True,
feature_norm=True,
device=args.device,
init_diagonal=True,
)
trainer.train(episodes=args.episodes, lr=args.learning_rate, beta1=0.5, print_interval=200)
model_fn = os.path.join(
args.model_path,
'fsl_{}_{}'.format(
dataset_name(args.dataset_dir),
configuration_filename(args.feature_dir, True, args.split, args.generalized),
),
)
torch.save(trainer.fsl_classifier.to('cpu'), model_fn)
| 16,610 | 34.267516 | 100 | py |
z2fsl | z2fsl-main/z2fsl/utils/losses.py | """Losses."""
import torch
import torch.nn as nn
import torch.autograd as autograd
class EpisodeCrossEntropyLoss(nn.Module):
"""FSL episode classification loss.
Attributes:
criterion (`nn.Module`): module that computes loss.
reduction (`str`): how to reduce vector results.
"""
def __init__(self, reduction='mean'):
"""Init.
Args:
reduction (`str`, optional): how to reduce
vector results, default `'mean'`.
"""
assert reduction in ('mean', 'sum')
super().__init__()
self.criterion = nn.CrossEntropyLoss()
self.reduction = reduction
def forward(self, logits):
"""Computes loss.
Args:
logits (`list`): list of torch.Tensor logits. Index
i corresponds to i-th class.
Returns:
Loss.
"""
loss = []
for i, class_logits in enumerate(logits):
labels = i * torch.ones(
class_logits.size(0), dtype=torch.long, device=class_logits.device
)
loss.append(self.criterion(class_logits, labels))
if self.reduction == 'mean':
return sum(loss) / len(loss)
elif self.reduction == 'sum':
return sum(loss)
def gradient_penalty(critic, real_samples, gen_samples, real_cond=None, gen_cond=None):
"""Computes the gradient penalty of a critic in
the WGAN framework to enforce the Lipschitz constraint.
Args:
critic (`nn.Module`): WGAN critic.
real_samples (`torch.Tensor`): real samples
for the critic.
gen_samples (`torch.Tensor`): generated samples
for the critic.
real_cond (`torch.Tensor`, optional): real conditioning
variable for the critic.
gen_cond (`torch.Tensor`, optional): generated
conditioning variable for the critic. If
not set, real_cond is solely used.
Returns:
The gradient penalty as defined in
`Improved Training of Wasserstein GANs`.
"""
alpha = torch.empty(real_samples.size(0), 1, device=real_samples.device).uniform_()
inter_samples = alpha * real_samples + (1 - alpha) * gen_samples.detach()
inter_samples = nn.Parameter(inter_samples) # necessary for autograd
if real_cond is not None:
if gen_cond is None:
gen_cond = real_cond
inter_cond = alpha * real_cond + (1 - alpha) * gen_cond
outs = critic(inter_samples, inter_cond)
else:
outs = critic(inter_samples)
nabla = autograd.grad(outs.sum(), inter_samples, create_graph=True)[0]
return ((nabla.norm(2, dim=-1) - 1) ** 2).mean()
def kl_divergence(mus, logvars):
"""Computes KL divergence of N(mus, e^{logvar/2}) from N(0, I).
Args:
mus (`torch.Tensor`): size (batch_size, cond_dim) with the means.
logvars (`torch.Tensor`): size (batch_size, cond_dim) with the
logarithm of the std devs.
Returns:
Loss.
"""
return (
0.5
* (
logvars.exp().sum(dim=-1) + (mus ** 2).sum(dim=-1) - logvars.sum(dim=-1) - mus.size(-1)
).mean()
)
| 3,204 | 28.953271 | 99 | py |
z2fsl | z2fsl-main/z2fsl/utils/datasets.py | """Dataset."""
import os
import torch
from z2fsl.utils.conf import TRAIN_SPLIT, TEST_SPLIT
from z2fsl.utils.general import configuration_filename, slice_fn, dataset_name
class FeatureEpisodeFactory:
"""Factory of episodes/minibatches in a ZSL setting
with extracted features instead of images.
Attributes:
attributes (`dict`): str-torch.Tensor key-value pairs
where keys are "name" of classes and values are
attribute vectors.
dataset_dir (`str`): dataset root directory.
feature_dir (`str`): feature directory wrt dataset_dir.
way (`int`): number of classes to fetch from during
an episode.
queries_per (`int`): number of images per class to fetch
as query images.
id_mapping (`dict`): mapping between class ids and usable
labels.
n_classes (`int`): number of classes.
"""
def __init__(
self,
dataset_dir,
split='train',
feature_dir='features',
classes_fn='classes.txt',
augmentation=False,
inner_split='train',
proposed_splits=True,
l2_attr_norm=False,
feature_norm=False,
generalized=False,
**kwargs
):
"""Init.
Args:
dataset_dir (`str`): root directory of dataset.
split (`str`, optional): train, val or test split (also trainval),
default=`train`.
feature_dir (`str`, optional): feature directory w.r.t. dataset_dir,
default=`images`.
classes_fn (`str`, optional): txt with id and directory name for
all classes wrt dataset_dir, default=`classes.txt`.
augmentation (`bool`, optional): whether to use data augmentation
(during training), default=`False`. If using the proposed
features - where no data augmentation is used -, it should
not be set to `True` (or simply not set).
inner_split (`str`, optional): to be set if this dataset represents a
train split to specify if train or test features from the
split should be fetched in GZSL setting, options [`'train'`,
`'test'`], default `'train'`.
proposed_splits (`bool`, optional): whether to use proposed splits (PS)
from Xian et al., 2018, default=`True`.
generalized (`bool`, optional): whether testing GZSL, default `False`.
"""
ds_name = dataset_name(dataset_dir)
if ds_name == 'cub':
attributes_fn = 'attributes/class_attribute_labels_continuous.txt'
elif ds_name == 'awa2':
attributes_fn = 'predicate-matrix-continuous.txt'
elif ds_name == 'sun':
attributes_fn = 'continuous-attributes.txt'
if feature_norm:
stat_split = kwargs.get('stat_split', split)
stats_fn = configuration_filename(feature_dir, proposed_splits, stat_split, generalized)
stats_fn = os.path.join(dataset_dir, stats_fn)
minmax = torch.load(stats_fn)
self.load = lambda fn: (torch.load(fn) - minmax[0]) / (minmax[1] - minmax[0])
else:
self.load = torch.load
available_classes = os.path.join(
'proposed_splits' if proposed_splits else '', split + 'classes.txt'
)
with open(os.path.join(dataset_dir, available_classes), 'r') as avcls:
available_class_names = [line.strip() for line in avcls.readlines()]
with open(os.path.join(dataset_dir, classes_fn), 'r') as cfp:
class_names = [line.strip().split()[1] for line in cfp.readlines()]
with open(os.path.join(dataset_dir, attributes_fn), 'r') as afp:
attributes = [[float(val) for val in line.strip().split()] for line in afp.readlines()]
# attributes and class names from classes.txt file in same order -> zip
self.attributes = {
class_name: torch.tensor(attribute)
for class_name, attribute in zip(
class_names, attributes
)
if class_name in available_class_names
}
if l2_attr_norm:
self.attributes = {
class_name: self.attributes[class_name] / self.attributes[class_name].norm()
for class_name in self.attributes
}
### get all available filenames
filenames = {}
for class_name in self.attributes:
class_dir = os.path.join(dataset_dir, feature_dir, class_name)
filenames[class_name] = os.listdir(class_dir)
if generalized and split.startswith('train'):
# if generalized and this is a train split
# further restrict available files
loc_fn = '{}{}_{}_loc.txt'.format(
split, '_proposed_splits' if proposed_splits else '', inner_split
)
loc_fn = os.path.join(dataset_dir, loc_fn)
if os.path.exists(loc_fn): # if loc_fn exists, there are standard seen/unseen splits
with open(loc_fn, 'r') as lfp:
available_files = [line.strip() for line in lfp.readlines()]
# filter files not in standard splits
for class_name in filenames:
filenames[class_name] = [
fn for fn in filenames[class_name] if fn in available_files
]
else: # use arbitrary splits if standard ones are not present
_slice = slice_fn(TEST_SPLIT if inner_split == 'test' else TRAIN_SPLIT)
filenames = {class_name: _slice(filenames[class_name]) for class_name in filenames}
self.filenames = filenames
self.dataset_dir = dataset_dir
self.feature_dir = feature_dir
self.split = split
self.augmentation = augmentation
self.id_mapping = {real: usable for usable, real in enumerate(self.attributes)}
self.n_classes = len(self.attributes)
def __call__(self, way=None, queries_per=None): # [:None] returns all
"""Creates an episode/minibatch.
Args:
way (`int`, optional): number of classes to fetch from during
the episode.
queries_per (`int`, optional): number of images per class to fetch
as query images.
Returns:
A `list` of `torch.Tensor` features, a `list`
of `torch.Tensor` attributes, of way classes and
a `list` of IDs.
"""
rand_inds = torch.randperm(len(self.attributes)).numpy()[:way]
all_names = list(self.attributes)
class_names = [all_names[i] for i in rand_inds]
eps_attributes = [self.attributes[i] for i in class_names]
eps_features = []
class_ids = [self.id_mapping[class_name] for class_name in class_names]
for class_name in class_names:
class_features = []
feature_filenames = self.filenames[class_name]
rand_inds = torch.randperm(len(feature_filenames)).numpy()[:queries_per]
feature_filenames = [feature_filenames[i] for i in rand_inds]
for ft_fn in feature_filenames:
if self.augmentation and self.split.startswith('train'):
crop = torch.randint(10, (1,)).item()
else:
crop = 0 # center crop
features = self.load(
os.path.join(self.dataset_dir, self.feature_dir, class_name, ft_fn)
)[crop]
class_features.append(features)
eps_features.append(torch.stack(class_features))
return eps_features, eps_attributes, class_ids
def fsl_episode(self, way, shot, queries_per):
"""Fetches and FSL episode.
Args:
way (`int`): number of classes.
shot (`int`): number of samples per class for
support.
queries_per (`int`): number of samples per class
for querying.
Returns:
`list` of `way` `torch.Tensor`s with `shot` vectors each,
`list` of `way` `torch.Tensor`s with `queries_per` vectors
each and `list` of `way` `int`s with usable labels.
"""
features, _, class_ids = self(way=way, queries_per=shot + queries_per)
query = [feats[:queries_per] for feats in features]
support = [feats[queries_per:] for feats in features]
return support, query, class_ids
| 8,594 | 39.352113 | 100 | py |
z2fsl | z2fsl-main/z2fsl/utils/submodules.py | """Various major components."""
import torch
import torch.nn as nn
class MLP(nn.Module):
"""Simple MLP.
Attributes:
layers (`nn.Module`): sequence of layers.
"""
def __init__(
self,
in_features,
out_features,
hidden_layers=None,
dropout=0,
hidden_actf=nn.LeakyReLU(0.2),
output_actf=nn.ReLU(),
noise_std=0,
):
"""Init.
Args:
in_features (`int`): input dimension.
out_features (`int`): final output dimension.
hidden_layers (`list` of `int`s | `int`| `None`, optional):
list of hidden layer sizes of arbitrary length or int for one
hidden layer, default `None` (no hidden layers).
dropout(`float`, optional): dropout probability, default `0`.
hidden_actf (activation function, optional): activation
function of hidden layers, default `nn.LeakyReLU(0.2)`.
output_actf (activation function, optional): activation
function of output layers, default `nn.ReLU()`.
noise_std (`float`, optional): std dev of gaussian noise to add
to MLP result, default `0` (no noise).
"""
if hidden_layers is None:
hidden_layers = []
if isinstance(hidden_layers, int):
hidden_layers = [hidden_layers]
super().__init__()
hidden_layers = [in_features] + hidden_layers + [out_features]
layers = []
for i, (in_f, out_f) in enumerate(zip(hidden_layers[:-1], hidden_layers[1:])):
layers.append(nn.Linear(in_f, out_f))
if i != len(hidden_layers) - 2:
# up to second-to-last layer
layers.append(hidden_actf)
layers.append(nn.Dropout(dropout))
else:
layers.append(output_actf) # ok to use relu, resnet feats are >= 0
self.layers = nn.Sequential(*layers)
self.noise_aug = GaussianNoiseAugmentation(std=noise_std)
def forward(self, x):
"""Forward propagation.
Args:
x (`torch.Tensor`): input of size (batch, in_features).
Returns:
A torch.Tensor of size (batch, out_features).
"""
return self.noise_aug(self.layers(x))
def init_diagonal(self):
"""Sets weights of linear layers to approx I
and biases to 0.
"""
def init_fn(mod):
"""Function to pass to .apply()."""
classname = mod.__class__.__name__
if classname.find('Linear') != -1:
init = torch.randn_like(mod.weight) / 10
init[range(mod.in_features), range(mod.in_features)] = 1
mod.weight = nn.Parameter(init, requires_grad=True)
if mod.bias is not None:
mod.bias = nn.Parameter(torch.zeros_like(mod.bias), requires_grad=True)
self.apply(init_fn)
class ConcatMLP(MLP):
"""Simple MLP that accepts two inputs
which are to be concatenated."""
def forward(self, x, y):
"""Concatenates input and noise and forward propagates.
x's and input's dimensions must add up to in_features.
Args:
x (`torch.Tensor`): input.
noise (`torch.Tensor`): randomly sampled noise.
Returns:
A torch.Tensor of size (batch, out_features).
"""
x = torch.cat((x, y), dim=1)
return super().forward(x)
class GaussianNoiseAugmentation(nn.Module):
"""Module that adds gaussian noise to tensor
(for robustness, ...).
Attributes:
mean(torch.Tensor): scalar mean of Gaussian of noise to add.
std(torch.Tensor): scalar std dev of Gaussian of noise to add.
"""
def __init__(self, mean=0.0, std=1.0):
"""Init.
Args:
mean (`float`, optional): mean of Gaussian of noise to add,
default `0`.
std(`float`, optional): std of Gaussian of noise to add,
default `1`. `0` deactivates module.
"""
super().__init__()
self.mean = mean
self.std = std
def forward(self, x):
"""Forward propagation.
Args:
x (`torch.Tensor`): input.
Returns:
Input plus noise if training or just input
if evaluating.
"""
if self.training and self.std != 0:
return x + torch.empty_like(x).normal_(self.mean, self.std)
return x
def __repr__(self):
"""Repr.
Print mean and std along with classname.
"""
if self.std > 0:
return '{}(mean={}, std={})'.format(self._get_name(), self.mean, self.std)
return self._get_name() + '(Deactivated)'
class CVAE(nn.Module):
"""Conditional VAE.
Attributes:
encoder (`nn.Module`): encodes features to concatenated
mean and logarithm of variance.
decoder (`nn.Module`): decodes latent variable and
conditioning variable to feature.
"""
def __init__(self, feature_dim, latent_dim, cond_dim, hidden_layers):
"""Init.
Args:
feature_dim (`int`): dimensionality of features.
latent_dim (`int`): dimensionality of latent variables.
cond_dim (`int`): dimensionality of conditioning variables.
hidden_layers (`list` | `int` | `None`): hidden layers of
encoder and decoder.
"""
super().__init__()
# handle cases to add another layer at encoder
if hidden_layers:
if isinstance(hidden_layers, int):
hidden_layers = [hidden_layers]
else:
hidden_layers = []
self.encoder = ConcatMLP(
feature_dim + cond_dim,
2 * latent_dim,
hidden_layers=list(reversed(hidden_layers)),
output_actf=nn.Identity(),
)
self.decoder = ConcatMLP(
latent_dim + cond_dim,
feature_dim,
hidden_layers=hidden_layers,
output_actf=nn.Sigmoid(),
)
def forward(self, x, cond):
"""Forward propagation.
Args:
x (`torch.Tensor`): input feature.
cond (`torch.Tensor`): conditioning variable.
Returns:
The reconstruction of `x`, the mean of the latent
variable and the logarithm of its variance.
"""
latent_code, mean, logvar = self.encode(x, cond)
x_hat = self.decode(cond, latent_code)
return x_hat, mean, logvar
def decode(self, cond, noise):
"""Decodes latent variable and conditioning variable.
Args:
cond (`torch.Tensor`): conditioning variable.
noise (`torch.Tensor`): latent variable.
Returns:
The reconstructed feature.
"""
x = self.decoder(cond, noise)
return x
def encode(self, feature, cond):
"""Encodes features.
Args:
feature (`torch.Tensor`): feature to be encoded.
Returns:
The latent variable, its mean and the logarithm
of its variance.
"""
latent_stats = self.encoder(feature, cond)
split_dim = latent_stats.size(1) // 2
mean, logvar = latent_stats[:, :split_dim], latent_stats[:, split_dim:]
latent_code = mean + torch.empty_like(logvar).normal_() * (logvar / 2).exp()
return latent_code, mean, logvar
| 7,552 | 29.954918 | 91 | py |
z2fsl | z2fsl-main/z2fsl/utils/general.py | """General utilities."""
import argparse
import os
import random
import numpy as np
import torch
def str2bool(arg):
"""CL bool arguments to bools"""
if arg.lower() == 'true':
return True
if arg.lower() == 'false':
return False
raise argparse.ArgumentTypeError('Boolean value expected.')
def set_parameter_requires_grad(model, requires_grad=False):
"""Sets requires_grad for all the parameters in a model.
Args:
model (`nn model`): model to alter.
requires_grad (`bool`): whether the model
requires grad.
"""
for param in model.parameters():
param.requires_grad_(requires_grad)
def is_grayscale(image):
"""Return if image is grayscale.
Assert if image only has 1 channel.
Args:
image (`PIL.Image`): image to check.
Returns:
bool indicating whether image is grayscale.
"""
try:
# channel==1 is 2nd channel
image.getchannel(1)
return False
except ValueError:
return True
def configuration_filename(feature_dir, proposed_splits, split, generalized):
"""Calculates configuration specific filenames.
Args:
feature_dir (`str`): directory of features wrt
to dataset directory.
proposed_splits (`bool`): whether using proposed splits.
split (`str`): train split.
generalized (`bool`): whether GZSL setting.
Returns:
`str` containing arguments in appropriate form.
"""
return '{}{}_{}{}.pt'.format(
feature_dir,
('_proposed_splits' if proposed_splits else ''),
split,
'_generalized' if generalized else '',
)
def slice_fn(tuple_slice):
"""Makes a function that slices a list.
Args:
tuple_slice (`tuple`): tuple of 2 floats denoting
the percentage at which to split the argument
of the function.
Returns:
A function that slices an iterable.
"""
return lambda l: l[slice(int(len(l) * tuple_slice[0]), int(len(l) * tuple_slice[1]))]
def dataset_name(dataset_dir):
"""Gets standard dataset name from dataset directory.
Args:
dataset_dir (`str`): absolute dataset directory.
Returns:
`'cub'`, `'awa2'` or `'sun'`.
"""
benchmark = os.path.split(dataset_dir)[1]
if not benchmark:
benchmark = os.path.split(dataset_dir[:-1])[1]
benchmark = benchmark.lower()
if 'cub' in benchmark:
return 'cub'
elif 'awa2' in benchmark or 'attributes2' in benchmark:
return 'awa2'
elif 'sun' in benchmark:
return 'sun'
else:
ValueError('Dataset name was not recognised')
def manual_seed(seed):
"""Sets all possible seeds and variables
for reproducibility in PyTorch.
Args:
seed (`int`): seed to use.
"""
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
| 3,130 | 23.653543 | 89 | py |
FSAD-Net | FSAD-Net-master/main.py | import torch
from models import *
from torch.utils.data import DataLoader
from Recorder import Recorder
from tqdm import tqdm
from pathlib import Path
import torch.nn.init as init
import os
import torchvision
from complex_2d_my_data_loader import MyDataLoader
import numpy
from sampler import BalancedBatchSampler
recorder = Recorder('colon', 'Colonoscopy')
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
LR = 1e-4
one = torch.tensor(1, dtype=torch.float)
mone = one * -1
mone = mone.to(device)
def weights_init(m):
if isinstance(m, torch.nn.Linear):
if m.weight is not None:
init.xavier_uniform_(m.weight)
if m.bias is not None:
init.constant_(m.bias, 0.0)
class Contrastive_Loss(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, y_true, y_pred):
confidence_margin = torch.tensor(6, dtype=torch.float).to(device)
zero = torch.tensor(0., dtype=torch.float).to(device)
y_true = y_true.to(device)
ref = torch.tensor(numpy.random.normal(loc=0., scale=1.0, size=15000), dtype=torch.float)
dev = (y_pred - torch.mean(ref)) / torch.std(ref)
inlier_loss = torch.abs(dev)
outlier_loss = torch.abs(torch.max(confidence_margin - dev, zero))
return torch.mean((1 - y_true) * inlier_loss + y_true * outlier_loss), torch.mean((1 - y_true) * inlier_loss), torch.mean(y_true * inlier_loss)
data_root = '/home/yu/PycharmProjects/MICCAI/data/colon'
data_transform = torchvision.transforms.Compose([
torchvision.transforms.Resize((32, 32)),
torchvision.transforms.CenterCrop(32),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
])
epoch = 5660
encoder = Encoder().to(device)
root = Path('/home/yu/PycharmProjects/MICCAI2020/check_points/encoder') #deep infomax pretrained encoder
enc_file = root / Path('encoder_5660.wgt')
encoder.load_state_dict(torch.load(enc_file))
encoder.eval()
con_criterion = Contrastive_Loss().to(device)
dev_net = dev_network_d().to(device)
dev_net.apply(weights_init)
dev_net.train()
optimizer_dev = torch.optim.Adam(dev_net.parameters(), lr=LR, betas=(0, 0.9))
d_scheduler = torch.optim.lr_scheduler.StepLR(optimizer_dev, step_size=10, gamma=0.95)
if __name__ == '__main__':
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
batch_size = 64
data_root = '/home/yu/PycharmProjects/MICCAI2020/data/colon/train'
data_transform = torchvision.transforms.Compose([
torchvision.transforms.Resize((32, 32)),
torchvision.transforms.CenterCrop(32),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
])
training_dataset = MyDataLoader(normal_path=os.path.join(data_root, 'train_set'),
transform=data_transform,
abnormal_path=os.path.join(data_root, 'abnormal_50'),
test_path=None,
train=True)
count = 0
labels = []
for index in range(0, len(training_dataset)):
_, label = training_dataset.__getitem__(index)
if label.split('/')[-1].split('_')[0] == '0':
count += 1
# print(label)
labels.append(1)
# abnormal sample
else:
labels.append(0)
# normal sample
print("The number of abnormal in training set: " + str(count))
labels = numpy.array(labels)
target = torch.tensor(labels).to(device)
training_dataset_loader = torch.utils.data.DataLoader(training_dataset, batch_size=batch_size, shuffle=False,
drop_last=True, sampler= BalancedBatchSampler(training_dataset, target),
num_workers=0, pin_memory=True)
epoch_restart = 0
dataiter = iter(training_dataset_loader)
for epoch in range(epoch_restart + 1, 1101):
batch = tqdm(training_dataset_loader, total=len(training_dataset) // batch_size)
new_count = 0
for x, label in batch:
x = x.to(device)
l = []
for index in range(0, x.shape[0]):
if label[index].split('/')[-1].split('_')[0] == '0':
count += 1
l += [1]
else:
l += [0]
l = numpy.array(l)
l = torch.tensor(l, dtype=torch.float).to(device)
target = []
for index in range(0, x.shape[0]):
if label[index].split('/')[-1].split('_')[0] == '0':
count += 1
target += [1]
# abnormal sample
else:
target += [-1]
# normal sample
target = numpy.array(target)
target = torch.tensor(target, dtype=torch.float).to(device)
l_reverse = []
for index in range(0, x.shape[0]):
if label[index].split('/')[-1].split('_')[0] == '0':
count += 1
# print(label)
l_reverse += [0]
# abnormal sample
else:
l_reverse += [1]
# normal sample
l_reverse = numpy.array(l_reverse)
l_reverse = torch.tensor(l_reverse, dtype=torch.float).to(device)
encodings, features_in = encoder(x)
optimizer_dev.zero_grad()
scores = dev_net(encodings)
scores = torch.squeeze(scores)
outlier_scores = l_reverse * scores
inlier_scores = l * scores
Contra_Loss, inlier_loss, outlier_loss = con_criterion(l, scores)
Contra_Loss.backward()
optimizer_dev.step()
recorder.record(loss=Contra_Loss, epoch=int(epoch), num_batches=len(training_dataset_loader), n_batch=epoch,
loss_name='dev contrastive loss')
recorder.record(loss=outlier_scores.mean(), epoch=int(epoch), num_batches=len(training_dataset_loader), n_batch=epoch,
loss_name='inlier score')
recorder.record(loss=inlier_scores.mean(), epoch=int(epoch), num_batches=len(training_dataset_loader), n_batch=epoch,
loss_name='outlier score')
if epoch % 10 == 0:
path = 'check_points/'
torch.save(dev_net.state_dict(), path + '/ckpt/fsad_net_{}.pth'.format(str(epoch)))
| 6,705 | 35.053763 | 151 | py |
FSAD-Net | FSAD-Net-master/sampler.py | import torch
is_torchvision_installed = True
try:
import torchvision
except:
is_torchvision_installed = False
import torch.utils.data
import random
class BalancedBatchSampler(torch.utils.data.sampler.Sampler):
def __init__(self, dataset, labels=None):
self.labels = labels
self.dataset = dict()
self.balanced_max = 0
# Save all the indices for all the classes
for idx in range(0, len(dataset)):
label = self._get_label(dataset, idx)
if label not in self.dataset:
self.dataset[label] = list()
self.dataset[label].append(idx)
self.balanced_max = len(self.dataset[label]) \
if len(self.dataset[label]) > self.balanced_max else self.balanced_max
# Oversample the classes with fewer elements than the max
for label in self.dataset:
while len(self.dataset[label]) < self.balanced_max:
self.dataset[label].append(random.choice(self.dataset[label]))
self.keys = list(self.dataset.keys())
self.currentkey = 0
self.indices = [-1]*len(self.keys)
def __iter__(self):
while self.indices[self.currentkey] < self.balanced_max - 1:
self.indices[self.currentkey] += 1
yield self.dataset[self.keys[self.currentkey]][self.indices[self.currentkey]]
self.currentkey = (self.currentkey + 1) % len(self.keys)
self.indices = [-1]*len(self.keys)
def _get_label(self, dataset, idx, labels = None):
if self.labels is not None:
return self.labels[idx].item()
else:
# Trying guessing
dataset_type = type(dataset)
if is_torchvision_installed and dataset_type is torchvision.datasets.MNIST:
return dataset.train_labels[idx].item()
elif is_torchvision_installed and dataset_type is torchvision.datasets.ImageFolder:
return dataset.imgs[idx][1]
else:
raise Exception("You should pass the tensor of labels to the constructor as second argument")
def __len__(self):
return self.balanced_max*len(self.keys) | 2,195 | 40.433962 | 109 | py |
FSAD-Net | FSAD-Net-master/Helper.py | from matplotlib import cm
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import numpy
import os
import torchvision
from sklearn.metrics import roc_curve, auc
def poly_lr_scheduler(my_optimizer, init_lr, epoch,
lr_decay_iter=1,
max_iter=100,
power=0.9):
"""Polynomial decay of learning rate
:param init_lr is base learning rate
:param epoch is a current epoch
:param lr_decay_iter how frequently decay occurs, default is 1
:param max_iter is number of maximum iterations
:param power is a polymomial power
:param my_optimizer is optimizer
"""
if epoch % lr_decay_iter or epoch > max_iter:
return my_optimizer
lr = init_lr * (1 - epoch / max_iter) ** power
for param_group in my_optimizer.param_groups:
param_group['lr'] = lr
return lr
def get_current_learning_rate(optimizer):
for param_group in optimizer.param_groups:
return param_group['lr']
def mnist_plot_encoded_3d_chart(number_value, encoded_data):
fig = plt.figure(2)
ax = Axes3D(fig)
x_axis, y_axis, z_axis = encoded_data.data[:, 0].cpu().detach().numpy(), \
encoded_data.data[:, 1].cpu().detach().numpy(), \
encoded_data.data[:, 2].cpu().detach().numpy()
for x, y, z, s in zip(x_axis, y_axis, z_axis, number_value):
c = cm.rainbow(int(255 * s / 9))
ax.text(x, y, z, s, backgroundcolor=c)
ax.set_xlim(x_axis.min(), x_axis.max())
ax.set_ylim(y_axis.min(), y_axis.max())
ax.set_zlim(z_axis.min(), z_axis.max())
plt.show()
def mnist_get_data_set(dir_path, get_number=None, not_number=None, train=True):
img_transform = torchvision.transforms.Compose([
torchvision.transforms.Resize((31, 31)),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
data_set = torchvision.datasets.MNIST(dir_path, train=train, transform=img_transform)
idx = 0
if get_number is not None or not_number is not None:
if get_number is not None:
idx = data_set.train_labels == get_number
elif not_number is not None:
idx = data_set.train_labels != not_number
data_set.train_labels = data_set.train_labels[idx]
data_set.train_data = data_set.train_data[idx]
return data_set
def plot_2d_chart(x1, y1, label1='predict_results',
x2=None, y2=None, label2=None, save_path=None, title=None):
plt.plot(x1, y1, color='red', label=label1, marker='o', mec='r', mfc='w')
if x2 is not None and y2 is not None:
plt.plot(x2, y2, color='green', label=label2, marker='*', mec='g', mfc='w')
# plt.plot(x2, y2, color='green', label=label2,
# marker='o',
# mec='green',
# mfc='w')
plt.xlabel('labels')
plt.xticks(rotation=45)
plt.ylabel('loss')
plt.legend()
if title is not None:
plt.title(title)
else:
plt.title('line_chart_for_anomaly_detector')
plt.grid()
if save_path is not None:
plt.savefig(save_path)
plt.close()
else:
plt.show()
def view_images(title_list, image_list, task_title=None, size=5, axis=False):
row = numpy.int(numpy.ceil(len(image_list) / 2))
column = numpy.int(numpy.ceil(len(image_list) / row))
plt.figure(task_title)
plt.figure(task_title, figsize=(size, size))
for col_index in range(column):
for row_index in range(row):
num = col_index * row + row_index
if num == len(image_list) or num == len(title_list):
break
plt.subplot(row, column, num + 1)
plt.title(title_list[num])
if not axis:
plt.axis('off')
plt.imshow(image_list[num])
plt.get_current_fig_manager().full_screen_toggle()
plt.show()
def plot_abnormal_normal_chart(abnormal, normal,
save=None, show=False):
plt.plot(numpy.arange(0, 128), abnormal, color='red', label='abnormal_only_zeros ',
marker='o',
mec='r', mfc='w')
plt.plot(numpy.arange(128, 256), normal, color='green', label='normal_no_zeros',
marker='o',
mec='green',
mfc='w')
plt.xlabel('labels')
plt.xticks(rotation=45)
plt.ylabel('loss')
plt.legend()
plt.title('line_chart_for_anomaly_detector')
plt.grid()
if show:
plt.show()
if save is not None:
dir_path = str(save).split('/')[0]
if not os.path.exists(dir_path):
os.mkdir(dir_path)
plt.savefig('{}.png'.format(save))
plt.close()
def mnist_get_visualize_data(input_image, output_image):
pic_total_1 = numpy.array(input_image[0])
for i in range(input_image.shape[0] - 1):
pic_total_1 = numpy.concatenate((pic_total_1, input_image[i + 1]), axis=1)
pic_total_2 = numpy.array(output_image[0])
for i in range(output_image.shape[0] - 1):
pic_total_2 = numpy.concatenate((pic_total_2, output_image[i + 1]), axis=1)
pic_total = numpy.concatenate((pic_total_1, pic_total_2), axis=0)
return pic_total
def draw_roc(tp_list, fp_list, title=None, save_path = None):
# auc drawing
auc_curve = auc(fp_list, tp_list)
plt.plot(fp_list, tp_list, color='red', label='AUC area:(%0.5f)' % auc_curve)
plt.plot([0, 1], [0, 1], linestyle='--')
plt.xlim([-0.005, 1.005])
plt.ylim([-0.005, 1.005])
plt.xlabel('FPR')
plt.ylabel('TPR')
if title is not None:
plt.title('Roc_Curve based on {}'.format(title))
plt.legend(loc='lower right')
plt.grid()
if save_path is not None:
plt.savefig(save_path)
plt.close()
else:
plt.show()
| 5,905 | 32.556818 | 89 | py |
FSAD-Net | FSAD-Net-master/Recorder.py | import os
import numpy as np
import torchvision.utils as vutils
from tensorboardX import SummaryWriter
from IPython import display
from matplotlib import pyplot as plt
import torch
'''
TensorBoard Data will be stored in './runs' path
'''
class Recorder:
def __init__(self, model_name, data_name):
self.data_name = data_name
self.comment = '{}_{}'.format(model_name, data_name)
self.data_subdir = data_name
self.image_subdir = os.path.join(self.data_subdir, 'images')
self.check_point_store = os.path.join(self.data_subdir, 'CheckPointStore')
Recorder.make_dir(self.data_subdir)
Recorder.make_dir(self.image_subdir)
Recorder.make_dir(self.check_point_store)
# TensorBoard
self.writer = SummaryWriter('{}_log'.format(model_name.lower()))
def record(self, loss, epoch, n_batch, num_batches, loss_name='loss'):
# var_class = torch.autograd.variable.Variable
if isinstance(loss, torch.autograd.Variable):
loss = loss.data.cpu().numpy()
# step = Recorder.step(epoch, n_batch, num_batches)
self.writer.add_scalar(loss_name, loss, epoch)
def log_images(self, images, num_images, epoch, n_batch, num_batches,
format='NCHW', normalize=True, title=None):
"""
input images are expected in format (NCHW)
"""
if type(images) == np.ndarray:
images = torch.from_numpy(images)
if format == 'NHWC':
images = images.transpose(1, 3)
step = Recorder.step(epoch, n_batch, num_batches)
img_name = '{}/images: *{}*'.format(self.comment, title)
# Make horizontal grid from image tensor
horizontal_grid = vutils.make_grid(images, normalize=normalize, scale_each=True)
# Make vertical grid from image tensor
nrows = int(np.sqrt(num_images))
grid = vutils.make_grid(images, nrow=nrows, normalize=True, scale_each=True)
# Add horizontal images to tensorboard
self.writer.add_image(img_name, horizontal_grid, step)
# Save plots
self.save_torch_images(horizontal_grid, grid, epoch, n_batch)
def save_torch_images(self, horizontal_grid, grid, epoch, n_batch, plot_horizontal=True, axis=False):
# Plot and save horizontal
fig = plt.figure(figsize=(16, 16))
plt.imshow(np.moveaxis(horizontal_grid.numpy(), 0, -1))
if not axis:
plt.axis('off')
if plot_horizontal:
display.display(plt.gcf())
self.save_images(fig, epoch, n_batch, comment='horizontal')
plt.close()
# Save squared
fig = plt.figure()
plt.imshow(np.moveaxis(grid.numpy(), 0, -1))
if not axis:
plt.axis('off')
self.save_images(fig, epoch, n_batch)
plt.close()
def save_images(self, fig, epoch, n_batch, comment=''):
fig.savefig(os.path.join(self.image_subdir,
'{}_epoch_{}_batch_{}.png'.format(comment, epoch, n_batch)))
@staticmethod
def display_status(epoch, num_epochs, n_batch, num_batches, loss):
if isinstance(loss, torch.Tensor):
loss = loss.cpu().detach().numpy()
else:
loss = loss
print('Epoch: [{}/{}], Batch Num: [{}/{}], Loss: {:.4f}'.format(epoch, num_epochs, n_batch, num_batches, loss))
def save_models(self, model, epoch):
out_dir = self.check_point_store
torch.save(model.state_dict(),
'{}/epoch_{}'.format(out_dir, epoch))
def close(self):
self.writer.close()
# Private Functionality
@staticmethod
def step(epoch, n_batch, num_batches):
return epoch * num_batches + n_batch
@staticmethod
def make_dir(directory):
if not os.path.exists(directory):
os.mkdir(directory)
| 3,894 | 32.008475 | 119 | py |
FSAD-Net | FSAD-Net-master/models.py | from pathlib import Path
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import nn as nn
class Encoder(nn.Module):
def __init__(self, rep_dim=64):
super().__init__()
self.rep_dim = rep_dim
self.c0 = nn.Conv2d(3, 64, kernel_size=4, stride=1)
self.c1 = nn.Conv2d(64, 128, kernel_size=4, stride=1)
self.c2 = nn.Conv2d(128, 256, kernel_size=4, stride=1)
self.c3 = nn.Conv2d(256, 512, kernel_size=4, stride=1)
self.l1 = nn.Linear(512*20*20, self.rep_dim)
self.b1 = nn.BatchNorm2d(128)
self.b2 = nn.BatchNorm2d(256)
self.b3 = nn.BatchNorm2d(512)
def forward(self, x):
h = F.relu(self.c0(x))
features = F.relu(self.b1(self.c1(h)))
h = F.relu(self.b2(self.c2(features)))
h = F.relu(self.b3(self.c3(h)))
encoded = self.l1(h.view(x.shape[0], -1))
# print(encoded.shape)
return encoded, features
def get_embedding(self, x):
return self.forward(x)
class GlobalDiscriminator(nn.Module):
def __init__(self):
super().__init__()
self.c0 = nn.Conv2d(128, 64, kernel_size=3)
self.c1 = nn.Conv2d(64, 32, kernel_size=3)
self.l0 = nn.Linear(32 * 22 * 22 + 64, 512)
self.l1 = nn.Linear(512, 512)
self.l2 = nn.Linear(512, 1)
def forward(self, y, M):
h = F.relu(self.c0(M))
h = self.c1(h)
h = h.view(y.shape[0], -1)
h = torch.cat((y, h), dim=1)
h = F.relu(self.l0(h))
h = F.relu(self.l1(h))
return self.l2(h)
class LocalDiscriminator(nn.Module):
def __init__(self):
super().__init__()
self.c0 = nn.Conv2d(192, 512, kernel_size=1)
self.c1 = nn.Conv2d(512, 512, kernel_size=1)
self.c2 = nn.Conv2d(512, 1, kernel_size=1)
def forward(self, x):
h = F.relu(self.c0(x))
h = F.relu(self.c1(h))
return self.c2(h)
class PriorDiscriminator(nn.Module):
def __init__(self):
super().__init__()
self.l0 = nn.Linear(64, 1000)
self.l1 = nn.Linear(1000, 200)
self.l2 = nn.Linear(200, 1)
def forward(self, x):
h = F.relu(self.l0(x))
h = F.relu(self.l1(h))
return torch.sigmoid(self.l2(h))
class Classifier(nn.Module):
def __init__(self):
super().__init__()
self.l1 = nn.Linear(64, 15)
self.bn1 = nn.BatchNorm1d(15)
self.l2 = nn.Linear(15, 10)
self.bn2 = nn.BatchNorm1d(10)
self.l3 = nn.Linear(10, 10)
self.bn3 = nn.BatchNorm1d(10)
def forward(self, x):
encoded, _ = x[0], x[1]
clazz = F.relu(self.bn1(self.l1(encoded)))
clazz = F.relu(self.bn2(self.l2(clazz)))
clazz = F.softmax(self.bn3(self.l3(clazz)), dim=1)
return clazz
class DeepInfoAsLatent(nn.Module):
def __init__(self, run, epoch):
super().__init__()
model_path = Path('/home/yu/PycharmProjects/DeepInfomaxPytorch-master/data/cifar/checkpoints') / Path(str(run)) / Path('encoder' + str(epoch) + '.wgt')
self.encoder = Encoder()
self.encoder.load_state_dict(torch.load(str(model_path)))
self.classifier = Classifier()
def forward(self, x):
z, features = self.encoder(x)
z = z.detach()
return self.classifier((z, features))
DIM = 64
class AutoEncoder(nn.Module):
def __init__(self):
super(AutoEncoder, self).__init__()
self.c0 = nn.Conv2d(3, 64, kernel_size=4, stride=1)
self.c1 = nn.Conv2d(64, 128, kernel_size=4, stride=1)
self.c2 = nn.Conv2d(128, 256, kernel_size=4, stride=1)
self.c3 = nn.Conv2d(256, 512, kernel_size=4, stride=1)
self.l1 = nn.Linear(512 * 20 * 20, 64)
self.b1 = nn.BatchNorm2d(128)
self.b2 = nn.BatchNorm2d(256)
self.b3 = nn.BatchNorm2d(512)
preprocess = nn.Sequential(
nn.Linear(64, 20*20*512),
nn.BatchNorm1d(20*20*512),
nn.ReLU(inplace=True),
)
block1 = nn.Sequential(
nn.ConvTranspose2d(512, 256, 4, stride=1),
nn.BatchNorm2d(256),
nn.ReLU(True),
)
block2 = nn.Sequential(
nn.ConvTranspose2d(256, 128, 4, stride=1),
nn.BatchNorm2d(128),
nn.ReLU(True),
)
block3 = nn.Sequential(
nn.ConvTranspose2d(128, 64, 4, stride=1),
nn.BatchNorm2d(64),
nn.ReLU(True),
)
deconv_out = nn.ConvTranspose2d(64, 3, 4, stride=1)
self.preprocess = preprocess
self.block1 = block1
self.block2 = block2
self.block2 = block3
self.deconv_out = deconv_out
self.tanh = nn.Tanh()
def forward(self, x):
h = F.relu(self.c0(x))
features = F.relu(self.b1(self.c1(h)))
h = F.relu(self.b2(self.c2(features)))
h = F.relu(self.b3(self.c3(h)))
encoded = self.l1(h.view(x.shape[0], -1))
output = self.preprocess(encoded)
output = output.view(-1, 4 * DIM, 4, 4)
output = self.block1(output)
output = self.block2(output)
output = self.block3(output)
output = self.deconv_out(output)
output = self.tanh(output)
return output.view(-1, 3, 32, 32)
class Discriminator(nn.Module):
def __init__(self):
super(Discriminator, self).__init__()
main = nn.Sequential(
nn.Conv2d(3, DIM, 3, 2, padding=1),
nn.LeakyReLU(),
nn.Conv2d(DIM, 2 * DIM, 3, 2, padding=1),
nn.LeakyReLU(),
nn.Conv2d(2 * DIM, 4 * DIM, 3, 2, padding=1),
nn.LeakyReLU(),
)
self.main = main
self.linear = nn.Linear(4*4*4*DIM, 1)
def forward(self, input):
output = self.main(input)
output1 = output
output = output.view(-1, 4*4*4*DIM)
output = self.linear(output)
return output, output1
class dev_network_d(nn.Module):
def __init__(self):
super(dev_network_d, self).__init__()
self.l0 = nn.Linear(64, 1000)
self.n0 = nn.BatchNorm1d(num_features=1000)
self.l1 = nn.Linear(1000, 200)
self.n1 = nn.BatchNorm1d(num_features=200)
self.l2 = nn.Linear(200, 20)
self.n2 = nn.BatchNorm1d(num_features=20)
self.l3 = nn.Linear(20, 1)
def forward(self, input):
h = F.relu(self.l0(input))
h = self.n0(h)
h = F.relu(self.l1(h))
h = self.n1(h)
h = F.relu( self.l2(h))
h = self.n2(h)
h = self.l3(h)
return h
class classifer_d(nn.Module):
def __init__(self):
super(classifer_d, self).__init__()
self.l0 = nn.Linear(64, 1000)
self.n0 = nn.BatchNorm1d(num_features=1000)
self.l1 = nn.Linear(1000, 200)
self.n1 = nn.BatchNorm1d(num_features=200)
self.l2 = nn.Linear(200, 20)
self.n2 = nn.BatchNorm1d(num_features=20)
self.l3 = nn.Linear(20, 2)
self.n3 = nn.BatchNorm1d(num_features=2)
def forward(self, input):
h = F.relu(self.l0(input))
h = self.n0(h)
h = F.relu(self.l1(h))
h = self.n1(h)
h = F.relu( self.l2(h))
h = self.n2(h)
h = self.l3(h)
h = self.n3(h)
# h = F.softmax(h, dim=1)
return h | 7,387 | 28.31746 | 159 | py |
FSAD-Net | FSAD-Net-master/complex_2d_my_data_loader.py | import os
import torch.utils.data
from PIL import Image
class MyDataLoader(torch.utils.data.Dataset):
# constructor of the class
def __init__(self, normal_path, abnormal_path, test_path, transform=None, train=False, validate=False, test=False):
assert (train is True and test is False and validate is False) or \
(train is False and test is True and validate is False) or \
(train is False and test is False and validate is True), \
">> Error: The mode in use is one of [train, validate, test]"
if validate is True:
assert abnormal_path is not None and normal_path is not None, \
'>> ERROR: we need normal and abnormal path in validate mode'
self.test = test
self.train = train
self.validate = validate
self.transform = transform
normal_images = []
abnormal_images = []
if normal_path is not None:
normal_images = [os.path.join(normal_path, img) for img in os.listdir(normal_path)]
normal_images = sorted(normal_images, key=lambda x: int(str(x).
split('/')[-1].
split('.')[0].
split('_')[-1]))
if abnormal_path is not None:
abnormal_images = [os.path.join(abnormal_path, img) for img in os.listdir(abnormal_path)]
abnormal_images = sorted(abnormal_images, key=lambda x: int(str(x).
split('/')[-1].
split('.')[0].
split('_')[-1]))
if self.train is True and abnormal_path is not None:
print('>> Warning: There is no abnormal images will be used in training set!')
images = None
if self.train:
# numbers of images in data set
images_number = len(normal_images)
images = normal_images[int(0 * images_number):] + abnormal_images
if self.validate:
images_number = len(normal_images)
images = normal_images[:int(0 * images_number)] + abnormal_images
# random.shuffle(images)
self.images = images
def __getitem__(self, index):
image_path = self.images[index]
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
labels = []
if self.test:
return
else:
label = "1_"+ str(image_path.split('/')[-1].split('_')[1].split('.')[0]) \
if image_path.split('/')[-1].split('_')[0] == 'normal' \
else "0_" + str(image_path.split('/')[-1].split('_')[1].split('.')[0])
data = Image.open(image_path)
# data = self.transform(data)
return data, label
def __len__(self):
return len(self.images)
| 3,082 | 40.662162 | 119 | py |
recsim | recsim-master/recsim/agents/dopamine/dqn_agent.py | # coding=utf-8
# coding=utf-8
# Copyright 2019 The RecSim Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Recsim-specific Dopamine DQN agent and related utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from dopamine.agents.dqn import dqn_agent
from dopamine.replay_memory import circular_replay_buffer
import gin.tf
from gym import spaces
import numpy as np
import tensorflow.compat.v1 as tf
DQNNetworkType = collections.namedtuple('dqn_network', ['q_values'])
class ResponseAdapter(object):
"""Custom flattening of responses to accommodate dopamine replay buffer."""
def __init__(self, input_response_space):
"""Init function for ResponseAdapter.
Args:
input_response_space: this is assumed to be an instance of
gym.spaces.Tuple; each element of the tuple is has to be an instance
of gym.spaces.Dict consisting of feature_name: 0-d gym.spaces.Box
(single float) key-value pairs.
"""
self._input_response_space = input_response_space
self._single_response_space = input_response_space.spaces[0]
self._response_names = list(self._single_response_space.spaces.keys())
self._response_shape = (len(input_response_space.spaces),
len(self._response_names))
self._response_dtype = np.float32
@property
def response_names(self):
return self._response_names
@property
def response_shape(self):
return self._response_shape
@property
def response_dtype(self):
return self._response_dtype
def encode(self, responses):
response_tensor = np.zeros(self._response_shape, dtype=self._response_dtype)
for i, response in enumerate(responses):
# Note: the order of dictionary keys in the self._input_response_space is
# not necessarily the same as the order of the keys in the observation
# dictionary itself. To guarrantee the position of elements are consistent
# with the order in self._response_names, we iterate over keys explicitly.
for j, key in enumerate(self.response_names):
response_tensor[i, j] = response[key]
return response_tensor
@gin.configurable
class ObservationAdapter(object):
"""An adapter to convert between user/doc observation and images."""
def __init__(self, input_observation_space, stack_size=1):
self._input_observation_space = input_observation_space
user_space = input_observation_space.spaces['user']
doc_space = input_observation_space.spaces['doc']
self._num_candidates = len(doc_space.spaces)
doc_space_shape = spaces.flatdim(list(doc_space.spaces.values())[0])
# Use the longer of user_space and doc_space as the shape of each row.
obs_shape = (np.max([spaces.flatdim(user_space), doc_space_shape]),)
self._observation_shape = (self._num_candidates + 1,) + obs_shape
self._observation_dtype = user_space.dtype
self._stack_size = stack_size
# Pads an array with zeros to make its shape identical to _observation_shape.
def _pad_with_zeros(self, array):
width = self._observation_shape[1] - len(array)
return np.pad(array, (0, width), mode='constant')
@property
def output_observation_space(self):
"""The output observation space of the adapter."""
user_space = self._input_observation_space.spaces['user']
doc_space = self._input_observation_space.spaces['doc']
user_dim = spaces.flatdim(user_space)
low = np.concatenate(
[self._pad_with_zeros(np.ones(user_dim) * -np.inf).reshape(1, -1)] + [
self._pad_with_zeros(np.ones(spaces.flatdim(d)) *
-np.inf).reshape(1, -1)
for d in doc_space.spaces.values()
])
high = np.concatenate(
[self._pad_with_zeros(np.ones(user_dim) * np.inf).reshape(1, -1)] + [
self._pad_with_zeros(np.ones(spaces.flatdim(d)) *
np.inf).reshape(1, -1)
for d in doc_space.spaces.values()
])
return spaces.Box(low=low, high=high, dtype=np.float32)
def encode(self, observation):
"""Encode user observation and document observations to an image."""
# It converts the observation from the simulator to a numpy array to be
# consumed by DQN agent, which assume the input is a "image".
# The first row is user's observation. The remaining rows are documents'
# observation, one row for each document.
image = np.zeros(
self._observation_shape + (self._stack_size,),
dtype=self._observation_dtype)
image[0, :, 0] = self._pad_with_zeros(
spaces.flatten(self._input_observation_space.spaces['user'],
observation['user']))
doc_space = zip(self._input_observation_space.spaces['doc'].spaces.values(),
observation['doc'].values())
image[1:, :, 0] = np.array([
self._pad_with_zeros(spaces.flatten(doc_space, d))
for doc_space, d in doc_space
])
return image
# The following functions creates the DQN network for RecSim.
def recsim_dqn_network(user, doc, scope):
inputs = tf.concat([user, doc], axis=1)
with tf.variable_scope(scope, reuse=tf.AUTO_REUSE):
hidden = tf.keras.layers.Dense(256, activation=tf.nn.relu)(inputs)
hidden = tf.keras.layers.Dense(32, activation=tf.nn.relu)(hidden)
q_value = tf.keras.layers.Dense(1, name='output')(hidden)
return q_value
class DQNAgentRecSim(dqn_agent.DQNAgent):
"""RecSim-specific Dopamine DQN agent that converts the observation space."""
def __init__(self, sess, observation_space, num_actions, stack_size,
optimizer_name, eval_mode, **kwargs):
if stack_size != 1:
raise ValueError(
'Invalid stack_size: %s. Only stack_size=1 is supported for now.' %
stack_size)
self._env_observation_space = observation_space
# In our case, the observation is a data structure that stores observation
# of the user and candidate documents. We uses an observation adapter to
# convert it to an "image", which is required by dopamine DQNAgent.
self._obs_adapter = ObservationAdapter(self._env_observation_space)
if optimizer_name == 'adam':
optimizer = tf.train.AdamOptimizer()
elif optimizer_name == 'sgd':
optimizer = tf.train.GradientDescentOptimizer(learning_rate=1e-3)
else:
optimizer = tf.train.RMSPropOptimizer(
learning_rate=0.00025,
decay=0.95,
momentum=0.0,
epsilon=0.00001,
centered=True)
dqn_agent.DQNAgent.__init__(
self,
sess,
num_actions,
observation_shape=self._obs_adapter.output_observation_space.shape,
observation_dtype=self._obs_adapter.output_observation_space.dtype,
stack_size=stack_size,
network=recsim_dqn_network,
optimizer=optimizer,
eval_mode=eval_mode,
**kwargs)
def _validate_states(self, states):
shape = states.get_shape()
if len(shape) != 4 or shape[1] != self._num_candidates + 1:
raise ValueError('Invalid states shape: %s. '
'Expecting [batch_size, %s, num_features, stack_size].' %
(shape, self._num_candidates + 1))
def wrapped_replay_buffer(**kwargs):
return circular_replay_buffer.WrappedReplayBuffer(**kwargs)
| 7,883 | 38.42 | 80 | py |
MCGR | MCGR-main/eval_ensemble.py | import argparse
import numpy as np
import os
import sys
import torch
import torch.nn.functional as F
import data
import models
import utils
parser = argparse.ArgumentParser(description='Ensemble evaluation')
parser.add_argument('--dataset', type=str, default='CIFAR10', metavar='DATASET',
help='dataset name (default: CIFAR10)')
parser.add_argument('--use_test', action='store_true',
help='switches between validation and test set (default: validation)')
parser.add_argument('--transform', type=str, default='VGG', metavar='TRANSFORM',
help='transform name (default: VGG)')
parser.add_argument('--data_path', type=str, default=None, metavar='PATH',
help='path to datasets location (default: None)')
parser.add_argument('--batch_size', type=int, default=128, metavar='N',
help='input batch size (default: 128)')
parser.add_argument('--num-workers', type=int, default=4, metavar='N',
help='number of workers (default: 4)')
parser.add_argument('--model', type=str, default=None, metavar='MODEL',
help='model name (default: None)')
parser.add_argument('--ckpt', type=str, action='append', metavar='CKPT', required=True,
help='checkpoint to eval, pass all the models through this parameter')
args = parser.parse_args()
torch.backends.cudnn.benchmark = True
loaders, num_classes = data.loaders(
args.dataset,
args.data_path,
args.batch_size,
args.num_workers,
args.transform,
args.use_test
)
architecture = getattr(models, args.model)
model = architecture.base(num_classes=num_classes, **architecture.kwargs)
criterion = F.cross_entropy
model.cuda()
ensemble_size = 0
predictions_sum = np.zeros((len(loaders['test'].dataset), num_classes))
for path in args.ckpt:
print(path)
checkpoint = torch.load(path)
model.load_state_dict(checkpoint['model_state'])
predictions, targets = utils.predictions(loaders['test'], model)
acc = 100.0 * np.mean(np.argmax(predictions, axis=1) == targets)
predictions_sum += predictions
ens_acc = 100.0 * np.mean(np.argmax(predictions_sum, axis=1) == targets)
print('Model accuracy: %8.4f. Ensemble accuracy: %8.4f' % (acc, ens_acc))
| 2,287 | 33.149254 | 90 | py |
MCGR | MCGR-main/fge.py | import argparse
import numpy as np
import os
import sys
import tabulate
import time
import torch
import torch.nn.functional as F
import data
import models
import utils
parser = argparse.ArgumentParser(description='FGE training')
parser.add_argument('--dir', type=str, default='/tmp/fge/', metavar='DIR',
help='training directory (default: /tmp/fge)')
parser.add_argument('--dataset', type=str, default='CIFAR10', metavar='DATASET',
help='dataset name (default: CIFAR10)')
parser.add_argument('--use_test', action='store_true',
help='switches between validation and test set (default: validation)')
parser.add_argument('--transform', type=str, default='VGG', metavar='TRANSFORM',
help='transform name (default: VGG)')
parser.add_argument('--data_path', type=str, default=None, metavar='PATH',
help='path to datasets location (default: None)')
parser.add_argument('--batch_size', type=int, default=128, metavar='N',
help='input batch size (default: 128)')
parser.add_argument('--num-workers', type=int, default=4, metavar='N',
help='number of workers (default: 4)')
parser.add_argument('--model', type=str, default=None, metavar='MODEL',
help='model name (default: None)')
parser.add_argument('--ckpt', type=str, default=None, metavar='CKPT',
help='checkpoint to eval (default: None)')
parser.add_argument('--epochs', type=int, default=20, metavar='N',
help='number of epochs to train (default: 20)')
parser.add_argument('--cycle', type=int, default=4, metavar='N',
help='number of epochs to train (default: 4)')
parser.add_argument('--lr_1', type=float, default=0.05, metavar='LR1',
help='initial learning rate (default: 0.05)')
parser.add_argument('--lr_2', type=float, default=0.0001, metavar='LR2',
help='initial learning rate (default: 0.0001)')
parser.add_argument('--momentum', type=float, default=0.9, metavar='M',
help='SGD momentum (default: 0.9)')
parser.add_argument('--wd', type=float, default=1e-4, metavar='WD',
help='weight decay (default: 1e-4)')
parser.add_argument('--seed', type=int, default=1, metavar='S', help='random seed (default: 1)')
args = parser.parse_args()
assert args.cycle % 2 == 0, 'Cycle length should be even'
os.makedirs(args.dir, exist_ok=True)
with open(os.path.join(args.dir, 'fge.sh'), 'w') as f:
f.write(' '.join(sys.argv))
f.write('\n')
torch.backends.cudnn.benchmark = True
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
loaders, num_classes = data.loaders(
args.dataset,
args.data_path,
args.batch_size,
args.num_workers,
args.transform,
args.use_test
)
architecture = getattr(models, args.model)
model = architecture.base(num_classes=num_classes, **architecture.kwargs)
criterion = F.cross_entropy
checkpoint = torch.load(args.ckpt)
start_epoch = checkpoint['epoch'] + 1
model.load_state_dict(checkpoint['model_state'])
model.cuda()
optimizer = torch.optim.SGD(
model.parameters(),
lr=args.lr_1,
momentum=args.momentum,
weight_decay=args.wd
)
optimizer.load_state_dict(checkpoint['optimizer_state'])
ensemble_size = 0
predictions_sum = np.zeros((len(loaders['test'].dataset), num_classes))
columns = ['ep', 'lr', 'tr_loss', 'tr_acc', 'te_nll', 'te_acc', 'ens_acc', 'time']
for epoch in range(args.epochs):
time_ep = time.time()
lr_schedule = utils.cyclic_learning_rate(epoch, args.cycle, args.lr_1, args.lr_2)
train_res = utils.train(loaders['train'], model, optimizer, criterion, lr_schedule=lr_schedule)
test_res = utils.test(loaders['test'], model, criterion)
time_ep = time.time() - time_ep
predictions, targets = utils.predictions(loaders['test'], model)
ens_acc = None
if (epoch % args.cycle + 1) == args.cycle // 2:
ensemble_size += 1
predictions_sum += predictions
ens_acc = 100.0 * np.mean(np.argmax(predictions_sum, axis=1) == targets)
if (epoch + 1) % (args.cycle // 2) == 0:
utils.save_checkpoint(
args.dir,
start_epoch + epoch,
name='fge',
model_state=model.state_dict(),
optimizer_state=optimizer.state_dict()
)
values = [epoch, lr_schedule(1.0), train_res['loss'], train_res['accuracy'], test_res['nll'],
test_res['accuracy'], ens_acc, time_ep]
table = tabulate.tabulate([values], columns, tablefmt='simple', floatfmt='9.4f')
if epoch % 40 == 0:
table = table.split('\n')
table = '\n'.join([table[1]] + table)
else:
table = table.split('\n')[2]
print(table)
| 4,796 | 36.476563 | 99 | py |
MCGR | MCGR-main/eval_curve.py | import argparse
import numpy as np
import os
import tabulate
import torch
import torch.nn.functional as F
import data
import models
import curves
import utils
parser = argparse.ArgumentParser(description='DNN curve evaluation')
parser.add_argument('--dir', type=str, default='/tmp/eval', metavar='DIR',
help='training directory (default: /tmp/eval)')
parser.add_argument('--num_points', type=int, default=61, metavar='N',
help='number of points on the curve (default: 61)')
parser.add_argument('--dataset', type=str, default='CIFAR10', metavar='DATASET',
help='dataset name (default: CIFAR10)')
parser.add_argument('--use_test', action='store_true',
help='switches between validation and test set (default: validation)')
parser.add_argument('--transform', type=str, default='VGG', metavar='TRANSFORM',
help='transform name (default: VGG)')
parser.add_argument('--data_path', type=str, default=None, metavar='PATH',
help='path to datasets location (default: None)')
parser.add_argument('--batch_size', type=int, default=128, metavar='N',
help='input batch size (default: 128)')
parser.add_argument('--num_workers', type=int, default=4, metavar='N',
help='number of workers (default: 4)')
parser.add_argument('--model', type=str, default=None, metavar='MODEL',
help='model name (default: None)')
parser.add_argument('--curve', type=str, default=None, metavar='CURVE',
help='curve type to use (default: None)')
parser.add_argument('--num_bends', type=int, default=3, metavar='N',
help='number of curve bends (default: 3)')
parser.add_argument('--ckpt', type=str, default=None, metavar='CKPT',
help='checkpoint to eval (default: None)')
parser.add_argument('--wd', type=float, default=1e-4, metavar='WD',
help='weight decay (default: 1e-4)')
args = parser.parse_args()
os.makedirs(args.dir, exist_ok=True)
torch.backends.cudnn.benchmark = True
loaders, num_classes = data.loaders(
args.dataset,
args.data_path,
args.batch_size,
args.num_workers,
args.transform,
args.use_test,
shuffle_train=False
)
architecture = getattr(models, args.model)
curve = getattr(curves, args.curve)
model = curves.CurveNet(
num_classes,
curve,
architecture.curve,
args.num_bends,
architecture_kwargs=architecture.kwargs,
)
model.cuda()
checkpoint = torch.load(args.ckpt)
model.load_state_dict(checkpoint['model_state'])
criterion = F.cross_entropy
regularizer = curves.l2_regularizer(args.wd)
T = args.num_points
ts = np.linspace(0.0, 1.0, T)
tr_loss = np.zeros(T)
tr_nll = np.zeros(T)
tr_acc = np.zeros(T)
te_loss = np.zeros(T)
te_nll = np.zeros(T)
te_acc = np.zeros(T)
tr_err = np.zeros(T)
te_err = np.zeros(T)
dl = np.zeros(T)
previous_weights = None
columns = ['t', 'Train loss', 'Train nll', 'Train error (%)', 'Test nll', 'Test error (%)']
t = torch.FloatTensor([0.0]).cuda()
for i, t_value in enumerate(ts):
t.data.fill_(t_value)
weights = model.weights(t)
if previous_weights is not None:
dl[i] = np.sqrt(np.sum(np.square(weights - previous_weights)))
previous_weights = weights.copy()
utils.update_bn(loaders['train'], model, t=t)
tr_res = utils.test(loaders['train'], model, criterion, regularizer, t=t)
te_res = utils.test(loaders['test'], model, criterion, regularizer, t=t)
tr_loss[i] = tr_res['loss']
tr_nll[i] = tr_res['nll']
tr_acc[i] = tr_res['accuracy']
tr_err[i] = 100.0 - tr_acc[i]
te_loss[i] = te_res['loss']
te_nll[i] = te_res['nll']
te_acc[i] = te_res['accuracy']
te_err[i] = 100.0 - te_acc[i]
values = [t, tr_loss[i], tr_nll[i], tr_err[i], te_nll[i], te_err[i]]
table = tabulate.tabulate([values], columns, tablefmt='simple', floatfmt='10.4f')
if i % 40 == 0:
table = table.split('\n')
table = '\n'.join([table[1]] + table)
else:
table = table.split('\n')[2]
print(table)
def stats(values, dl):
min = np.min(values)
max = np.max(values)
avg = np.mean(values)
int = np.sum(0.5 * (values[:-1] + values[1:]) * dl[1:]) / np.sum(dl[1:])
return min, max, avg, int
tr_loss_min, tr_loss_max, tr_loss_avg, tr_loss_int = stats(tr_loss, dl)
tr_nll_min, tr_nll_max, tr_nll_avg, tr_nll_int = stats(tr_nll, dl)
tr_err_min, tr_err_max, tr_err_avg, tr_err_int = stats(tr_err, dl)
te_loss_min, te_loss_max, te_loss_avg, te_loss_int = stats(te_loss, dl)
te_nll_min, te_nll_max, te_nll_avg, te_nll_int = stats(te_nll, dl)
te_err_min, te_err_max, te_err_avg, te_err_int = stats(te_err, dl)
print('Length: %.2f' % np.sum(dl))
print(tabulate.tabulate([
['train loss', tr_loss[0], tr_loss[-1], tr_loss_min, tr_loss_max, tr_loss_avg, tr_loss_int],
['train error (%)', tr_err[0], tr_err[-1], tr_err_min, tr_err_max, tr_err_avg, tr_err_int],
['test nll', te_nll[0], te_nll[-1], te_nll_min, te_nll_max, te_nll_avg, te_nll_int],
['test error (%)', te_err[0], te_err[-1], te_err_min, te_err_max, te_err_avg, te_err_int],
], [
'', 'start', 'end', 'min', 'max', 'avg', 'int'
], tablefmt='simple', floatfmt='10.4f'))
np.savez(
os.path.join(args.dir, 'curve.npz'),
ts=ts,
dl=dl,
tr_loss=tr_loss,
tr_loss_min=tr_loss_min,
tr_loss_max=tr_loss_max,
tr_loss_avg=tr_loss_avg,
tr_loss_int=tr_loss_int,
tr_nll=tr_nll,
tr_nll_min=tr_nll_min,
tr_nll_max=tr_nll_max,
tr_nll_avg=tr_nll_avg,
tr_nll_int=tr_nll_int,
tr_acc=tr_acc,
tr_err=tr_err,
tr_err_min=tr_err_min,
tr_err_max=tr_err_max,
tr_err_avg=tr_err_avg,
tr_err_int=tr_err_int,
te_loss=te_loss,
te_loss_min=te_loss_min,
te_loss_max=te_loss_max,
te_loss_avg=te_loss_avg,
te_loss_int=te_loss_int,
te_nll=te_nll,
te_nll_min=te_nll_min,
te_nll_max=te_nll_max,
te_nll_avg=te_nll_avg,
te_nll_int=te_nll_int,
te_acc=te_acc,
te_err=te_err,
te_err_min=te_err_min,
te_err_max=te_err_max,
te_err_avg=te_err_avg,
te_err_int=te_err_int,
)
| 6,194 | 31.952128 | 100 | py |
MCGR | MCGR-main/test.py | import argparse
import torch
import curves
import d
import models
import attack.pgd as pgd
import attack.pgd2 as pgd2
from tqdm import tqdm
from attack.autopgd_train import apgd_train,pgd_1
from attack.att import msd_v1,msd_v0,l1_dir_topk,pgd_l1_topk
parser = argparse.ArgumentParser(description='DNN curve training')
parser.add_argument('--dir', type=str, default='/tmp/curve/', metavar='DIR',
help='training directory (default: /tmp/curve/)')
parser.add_argument('--transform', type=str, default='ResNet', metavar='TRANSFORM',
help='transform name (default: VGG)')
parser.add_argument('--batch_size', type=int, default=128, metavar='N',
help='input batch size (default: 128)')
parser.add_argument('--num-workers', type=int, default=4, metavar='N',
help='number of workers (default: 4)')
parser.add_argument('--model', type=str, default='PreResNet110', metavar='MODEL',
help='model name (default: None)')
parser.add_argument('--curve', type=str, default=None, metavar='CURVE',
help='curve type to use (default: None)')
parser.add_argument('--num_bends', type=int, default=3, metavar='N',
help='number of curve bends (default: 3)')
parser.add_argument('--seed', type=int, default=1, metavar='S', help='random seed (default: 1)')
args = parser.parse_args()
torch.backends.cudnn.benchmark = True
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
D=d.Data()
data_loader_test = torch.utils.data.DataLoader(dataset=D.data_test,
batch_size=128,
shuffle=True,
num_workers=16)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
architecture = getattr(models, args.model)
if args.curve is None:
model = architecture.base(num_classes=10, **architecture.kwargs)
else:
curve = getattr(curves, args.curve)
model = curves.CurveNet(
10,
curve,
architecture.curve,
args.num_bends,
args.fix_start,
args.fix_end,
architecture_kwargs=architecture.kwargs,
)
base_model = None
if args.resume is None:
for path, k in [(args.init_start, 0), (args.init_end, args.num_bends - 1)]:
if path is not None:
if base_model is None:
base_model = architecture.base(num_classes=num_classes, **architecture.kwargs)
checkpoint = torch.load(path)
print('Loading %s as point #%d' % (path, k))
base_model.load_state_dict(checkpoint['model_state'])
model.import_base_parameters(base_model, k)
if args.init_linear:
print('Linear initialization.')
model.init_linear()
#model.load_state_dict(torch.load('./save/pgd/checkpoint-150.pt')['model_state'])
model.load_state_dict(torch.load('./save/msd3-test/checkpoint-50.pt')['model_state'])
model.cuda()
model.eval()
test_correct = 0
for data in tqdm(data_loader_test):
X_test, y_test = data
X_test, y_test = X_test.to(device), y_test.to(device)
outputs = model(X_test)
_, pred = torch.max(outputs.data, 1)
test_correct += torch.sum(pred == y_test.data)
print("Test Accuracy is:{:.4f}%".format(100 * test_correct / len(D.data_test)))
correct=0
for data in tqdm(data_loader_test):
X_test, y_test = data
X_test, y_test = X_test.to(device), y_test.to(device)
X_test= pgd_1(model, X_test, y_test,eps=12, n_iter=10)
outputs = model(X_test)
_, pred = torch.max(outputs.data, 1)
correct += torch.sum(pred == y_test.data)
print("Test Accuracy after AT1-slide is:{:.4f}%".format(100 * correct / len(D.data_test)))
correct=0
for data in tqdm(data_loader_test):
X_test, y_test = data
X_test, y_test = X_test.to(device), y_test.to(device)
X_test, acc_tr, _, _ = apgd_train(model, X_test, y_test, norm='L1',eps=12, n_iter=10)
outputs = model(X_test)
_, pred = torch.max(outputs.data, 1)
correct += torch.sum(pred == y_test.data)
print("Test Accuracy after AT1-auto is:{:.4f}%".format(100 * correct / len(D.data_test)))
correct=0
for data in tqdm(data_loader_test):
X_test, y_test = data
X_test, y_test = X_test.to(device), y_test.to(device)
X_test += pgd_l1_topk(model,X_test,y_test, epsilon=12, alpha=0.05, num_iter = 50, device = "cuda:0", restarts = 0, version = 0)
outputs = model(X_test)
_, pred = torch.max(outputs.data, 1)
correct += torch.sum(pred == y_test.data)
print("Test Accuracy after AT1 is:{:.4f}%".format(100 * correct / len(D.data_test)))
at=pgd.PGD()
at2=pgd2.PGD()
correct=0
for data in tqdm(data_loader_test):
X_test, y_test = data
X_test, y_test = X_test.to(device), y_test.to(device)
X_test=at.generate(model,X_test,y_test,None,device)
outputs = model(X_test)
_, pred = torch.max(outputs.data, 1)
correct += torch.sum(pred == y_test.data)
print("Test Accuracy after AT is:{:.4f}%".format(100 * correct / len(D.data_test)))
correct=0
for data in tqdm(data_loader_test):
X_test, y_test = data
X_test, y_test = X_test.to(device), y_test.to(device)
X_test=at2.generate(model,X_test,y_test,None,device)
outputs = model(X_test)
_, pred = torch.max(outputs.data, 1)
correct += torch.sum(pred == y_test.data)
print("Test Accuracy after AT2 is:{:.4f}%".format(100 * correct / len(D.data_test)))
| 5,523 | 35.826667 | 131 | py |
MCGR | MCGR-main/curve_test.py | import torch
import argparse
import numpy as np
import os
import tabulate
import pgdtest2
import models
import curves
import utils
import data
import csv
if __name__ == '__main__':
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
cost = torch.nn.CrossEntropyLoss()
parser = argparse.ArgumentParser(description='DNN curve evaluation')
parser.add_argument('--dir', type=str, default='/tmp/eval', metavar='DIR',
help='training directory (default: /tmp/eval)')
parser.add_argument('--num_points', type=int, default=61, metavar='N',
help='number of points on the curve (default: 61)')
parser.add_argument('--dataset', type=str, default='CIFAR10', metavar='DATASET',
help='dataset name (default: CIFAR10)')
parser.add_argument('--use_test', action='store_true',
help='switches between validation and test set (default: validation)')
parser.add_argument('--transform', type=str, default='ResNet', metavar='TRANSFORM',
help='transform name (default: VGG)')
parser.add_argument('--data_path', type=str, default='data', metavar='PATH',
help='path to datasets location (default: None)')
parser.add_argument('--batch_size', type=int, default=128, metavar='N',
help='input batch size (default: 128)')
parser.add_argument('--num_workers', type=int, default=4, metavar='N',
help='number of workers (default: 12)')
parser.add_argument('--model', type=str, default='PreResNet110', metavar='MODEL',
help='model name (default: None)')
parser.add_argument('--curve', type=str, default='Bezier', metavar='CURVE',
help='curve type to use (default: None)')
parser.add_argument('--num_bends', type=int, default=3, metavar='N',
help='number of curve bends (default: 3)')
parser.add_argument('--ckpt', type=str, default='./realsave/pgd-pgd2-150_msd/checkpoint-200.pt', metavar='CKPT',
help='checkpoint to eval (default: None)')
parser.add_argument('--wd', type=float, default=1e-4, metavar='WD',
help='weight decay (default: 1e-4)')
args = parser.parse_args()
os.makedirs(args.dir, exist_ok=True)
torch.backends.cudnn.benchmark = True
loaders, num_classes = data.loaders(
args.dataset,
args.data_path,
args.batch_size,
args.num_workers,
args.transform,
args.use_test,
shuffle_train=False
)
architecture = getattr(models, args.model)
curve = getattr(curves, args.curve)
model = curves.CurveNet(
num_classes,
curve,
architecture.curve,
args.num_bends,
architecture_kwargs=architecture.kwargs,
)
pgdtest=pgdtest2.PGDTest()
model.cuda()
checkpoint = torch.load(args.ckpt)
model.load_state_dict(checkpoint['model_state'])
T = args.num_points
columns = ['t','Test clean acc','Test pgd_inf acc','Test pgd_2 acc','Train clean loss','Train pgd_inf loss','Train pgd_2 loss']
ts = np.linspace(0.0, 1.0, T)
teca=np.zeros(T)
teia = np.zeros(T)
te2a = np.zeros(T)
tecl = np.zeros(T)
teil = np.zeros(T)
te2l = np.zeros(T)
trca = np.zeros(T)
tria = np.zeros(T)
tr2a = np.zeros(T)
trcl = np.zeros(T)
tril = np.zeros(T)
tr2l = np.zeros(T)
t = torch.FloatTensor([0.0]).cuda()
#f = open(args.dir+'/log.csv', 'w')
for i, t_value in enumerate(ts):
t.data.fill_(0.82)
utils.update_bn(loaders['train'], model, t=t)
teca[i],teia[i],te2a[i],tecl[i],teil[i],te2l[i],trca[i],tria[i],tr2a[i],trcl[i],tril[i],tr2l[i]=pgdtest.test(model,0,t=t)
values = [t,teca[i],teia[i],te2a[i],trcl[i],tril[i],tr2l[i]]
table = tabulate.tabulate([values], columns, tablefmt='simple', floatfmt='10.4f')
print(table)
break
| 4,043 | 34.787611 | 131 | py |
MCGR | MCGR-main/d.py | import torch
from torchvision import datasets, transforms
from torch.utils.data import random_split
class Data:
def __init__(self):
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
])
transform = transforms.Compose([transforms.ToTensor()])
self.data_train = datasets.CIFAR10(root='./data/', train=True,
download=True, transform=transform_train)
self.data_test = datasets.CIFAR10(root='./data/', train=False,
download=True, transform=transform)
if __name__ == '__main__':
# D1=Data('mnist')
D2=Data()
| 755 | 36.8 | 82 | py |
MCGR | MCGR-main/other_utils.py | import os
import torch
class Logger():
def __init__(self, log_path):
self.log_path = log_path
def log(self, str_to_log):
print(str_to_log)
if not self.log_path is None:
with open(self.log_path, 'a') as f:
f.write(str_to_log + '\n')
f.flush()
def check_imgs(adv, x, norm):
delta = (adv - x).view(adv.shape[0], -1)
if norm == 'Linf':
res = delta.abs().max(dim=1)[0]
elif norm == 'L2':
res = (delta ** 2).sum(dim=1).sqrt()
elif norm == 'L1':
res = delta.abs().sum(dim=1)
str_det = 'max {} pert: {:.5f}, nan in imgs: {}, max in imgs: {:.5f}, min in imgs: {:.5f}'.format(
norm, res.max(), (adv != adv).sum(), adv.max(), adv.min())
print(str_det)
return str_det
def L1_norm(x, keepdim=False):
z = x.abs().view(x.shape[0], -1).sum(-1)
if keepdim:
z = z.view(-1, *[1]*(len(x.shape) - 1))
return z
def L2_norm(x, keepdim=False):
z = (x ** 2).view(x.shape[0], -1).sum(-1).sqrt()
if keepdim:
z = z.view(-1, *[1]*(len(x.shape) - 1))
return z
def L0_norm(x):
return (x != 0.).view(x.shape[0], -1).sum(-1)
def makedir(path):
if not os.path.exists(path):
os.makedirs(path)
| 1,284 | 25.22449 | 102 | py |
MCGR | MCGR-main/train_.py | import argparse
import os
import sys
import tabulate
import time
import torch
import torch.nn.functional as F
import curves
import data
import models
import utils
parser = argparse.ArgumentParser(description='DNN curve training')
parser.add_argument('--dir', type=str, default='/tmp/curve/', metavar='DIR',
help='training directory (default: /tmp/curve/)')
parser.add_argument('--dataset', type=str, default='CIFAR10', metavar='DATASET',
help='dataset name (default: CIFAR10)')
parser.add_argument('--use_test', action='store_true',
help='switches between validation and test set (default: validation)')
parser.add_argument('--transform', type=str, default='VGG', metavar='TRANSFORM',
help='transform name (default: VGG)')
parser.add_argument('--data_path', type=str, default=None, metavar='PATH',
help='path to datasets location (default: None)')
parser.add_argument('--batch_size', type=int, default=128, metavar='N',
help='input batch size (default: 128)')
parser.add_argument('--num-workers', type=int, default=4, metavar='N',
help='number of workers (default: 4)')
parser.add_argument('--model', type=str, default=None, metavar='MODEL', required=True,
help='model name (default: None)')
parser.add_argument('--curve', type=str, default=None, metavar='CURVE',
help='curve type to use (default: None)')
parser.add_argument('--num_bends', type=int, default=3, metavar='N',
help='number of curve bends (default: 3)')
parser.add_argument('--init_start', type=str, default=None, metavar='CKPT',
help='checkpoint to init start point (default: None)')
parser.add_argument('--fix_start', dest='fix_start', action='store_true',
help='fix start point (default: off)')
parser.add_argument('--init_end', type=str, default=None, metavar='CKPT',
help='checkpoint to init end point (default: None)')
parser.add_argument('--fix_end', dest='fix_end', action='store_true',
help='fix end point (default: off)')
parser.set_defaults(init_linear=True)
parser.add_argument('--init_linear_off', dest='init_linear', action='store_false',
help='turns off linear initialization of intermediate points (default: on)')
parser.add_argument('--resume', type=str, default=None, metavar='CKPT',
help='checkpoint to resume training from (default: None)')
parser.add_argument('--epochs', type=int, default=200, metavar='N',
help='number of epochs to train (default: 200)')
parser.add_argument('--save_freq', type=int, default=50, metavar='N',
help='save frequency (default: 50)')
parser.add_argument('--lr', type=float, default=0.01, metavar='LR',
help='initial learning rate (default: 0.01)')
parser.add_argument('--momentum', type=float, default=0.9, metavar='M',
help='SGD momentum (default: 0.9)')
parser.add_argument('--wd', type=float, default=1e-4, metavar='WD',
help='weight decay (default: 1e-4)')
parser.add_argument('--seed', type=int, default=1, metavar='S', help='random seed (default: 1)')
parser.add_argument('--pgd',type=str,default=0,metavar='PGD',help='type of pgd attack')
parser.add_argument('--origin_model',type=str,default=0,metavar='ORIGIN_MODEL',help='orgin model to load')
args = parser.parse_args()
os.makedirs(args.dir, exist_ok=True)
with open(os.path.join(args.dir, 'command.sh'), 'w') as f:
f.write(' '.join(sys.argv))
f.write('\n')
torch.backends.cudnn.benchmark = True
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
loaders, num_classes = data.loaders(
args.dataset,
args.data_path,
args.batch_size,
args.num_workers,
args.transform,
args.use_test
)
architecture = getattr(models, args.model)
if args.curve is None:
model = architecture.base(num_classes=num_classes, **architecture.kwargs)
else:
curve = getattr(curves, args.curve)
model = curves.CurveNet(
num_classes,
curve,
architecture.curve,
args.num_bends,
args.fix_start,
args.fix_end,
architecture_kwargs=architecture.kwargs,
)
base_model = None
if args.resume is None:
for path, k in [(args.init_start, 0), (args.init_end, args.num_bends - 1)]:
if path is not None:
if base_model is None:
base_model = architecture.base(num_classes=num_classes, **architecture.kwargs)
checkpoint = torch.load(path)
print('Loading %s as point #%d' % (path, k))
base_model.load_state_dict(checkpoint['model_state'])
model.import_base_parameters(base_model, k)
if args.init_linear:
print('Linear initialization.')
model.init_linear()
model.load_state_dict(torch.load(args.origin_model)['model_state'])
model.cuda()
def learning_rate_schedule(base_lr, epoch, total_epochs):
alpha = epoch / total_epochs
if alpha <= 0.5:
factor = 1.0
elif alpha <= 0.9:
factor = 1.0 - (alpha - 0.5) / 0.4 * 0.99
else:
factor = 0.01
return factor * base_lr
criterion = F.cross_entropy
regularizer = None if args.curve is None else curves.l2_regularizer(args.wd)
optimizer = torch.optim.SGD(
filter(lambda param: param.requires_grad, model.parameters()),
lr=args.lr,
momentum=args.momentum,
weight_decay=args.wd if args.curve is None else 0.0
)
start_epoch = 1
if args.resume is not None:
print('Resume training from %s' % args.resume)
checkpoint = torch.load(args.resume)
start_epoch = checkpoint['epoch'] + 1
model.load_state_dict(checkpoint['model_state'])
optimizer.load_state_dict(checkpoint['optimizer_state'])
columns = ['ep', 'lr', 'tr_loss', 'tr_acc', 'te_nll', 'te_acc', 'time']
utils.save_checkpoint(
args.dir,
start_epoch - 1,
model_state=model.state_dict(),
optimizer_state=optimizer.state_dict()
)
has_bn = utils.check_bn(model)
test_res = {'loss': None, 'accuracy': None, 'nll': None}
for epoch in range(start_epoch, args.epochs + 1):
time_ep = time.time()
lr = learning_rate_schedule(args.lr, epoch, args.epochs)
utils.adjust_learning_rate(optimizer, lr)
train_res = utils.train(loaders['train'], model, optimizer, criterion, regularizer,pgdtype=args.pgd)
if args.curve is None or not has_bn:
test_res = utils.test(loaders['test'], model, criterion, regularizer)
if epoch % args.save_freq == 0:
utils.save_checkpoint(
args.dir,
epoch,
model_state=model.state_dict(),
optimizer_state=optimizer.state_dict()
)
time_ep = time.time() - time_ep
values = [epoch, lr, train_res['loss'], train_res['accuracy'], test_res['nll'],
test_res['accuracy'], time_ep]
table = tabulate.tabulate([values], columns, tablefmt='simple', floatfmt='9.4f')
if epoch % 40 == 1 or epoch == start_epoch:
table = table.split('\n')
table = '\n'.join([table[1]] + table)
else:
table = table.split('\n')[2]
print(table)
if args.epochs % args.save_freq != 0:
utils.save_checkpoint(
args.dir,
args.epochs,
model_state=model.state_dict(),
optimizer_state=optimizer.state_dict()
)
| 7,504 | 37.096447 | 106 | py |
MCGR | MCGR-main/utils.py | import numpy as np
import os
import torch
import curves
import attack.pgd as pgd
import attack.pgd2 as pgd2
from attack.att import *
from attack.autopgd_train import apgd_train,pgd_1
def l2_regularizer(weight_decay):
def regularizer(model):
l2 = 0.0
for p in model.parameters():
l2 += torch.sqrt(torch.sum(p ** 2))
return 0.5 * weight_decay * l2
return regularizer
def cyclic_learning_rate(epoch, cycle, alpha_1, alpha_2):
def schedule(iter):
t = ((epoch % cycle) + iter) / cycle
if t < 0.5:
return alpha_1 * (1.0 - 2.0 * t) + alpha_2 * 2.0 * t
else:
return alpha_1 * (2.0 * t - 1.0) + alpha_2 * (2.0 - 2.0 * t)
return schedule
def adjust_learning_rate(optimizer, lr):
for param_group in optimizer.param_groups:
param_group['lr'] = lr
return lr
def save_checkpoint(dir, epoch, name='checkpoint', **kwargs):
state = {
'epoch': epoch,
}
state.update(kwargs)
filepath = os.path.join(dir, '%s-%d.pt' % (name, epoch))
torch.save(state, filepath)
def train(train_loader, model, optimizer, criterion, regularizer=None, lr_schedule=None,pgdtype='0',curveflag=False):
loss_sum = 0.0
correct = 0.0
#print(pgd)
num_iters = len(train_loader)
model.train()
for iter, (input, target) in enumerate(train_loader):
if lr_schedule is not None:
lr = lr_schedule(iter / num_iters)
adjust_learning_rate(optimizer, lr)
input = input.cuda()
target = target.cuda()
t = input.data.new(1).uniform_()
if pgdtype=='inf':
at = pgd.PGD()
if curveflag:
input = at.generate(model, input, target, None, 0,t=t)
else:
input=at.generate(model, input, target, None, 0)
model.train()
if pgdtype=='2':
at = pgd2.PGD()
if curveflag:
input = at.generate(model, input, target, None, 0,t=t)
else:
input = at.generate(model, input, target, None, 0)
model.train()
if pgdtype=='1':
model.eval()
if curveflag:
input += pgd_l1_topk(model, input, target, epsilon=12, alpha=0.05, num_iter=10, device="cuda:0",
restarts=0, version=0,t=t)
else:
#input+=pgd_l1_topk(model,input,target, epsilon=12, alpha=0.05, num_iter = 10, device = "cuda:0", restarts = 0, version = 0)
input, acc_tr, _, _ = apgd_train(model, input, target, norm='L1',eps=12, n_iter=10)
model.train()
if pgdtype=='msd':
if curveflag:
input += msd_v0(model, input, target, epsilon_l_inf=8 / 255, epsilon_l_2=1, epsilon_l_1=12,
alpha_l_inf=2 / 255, alpha_l_2=0.2, alpha_l_1=0.05, num_iter=10, device="cuda:0", t=t)
else:
input += msd_v0(model,input,target,epsilon_l_inf = 8/255, epsilon_l_2= 1, epsilon_l_1 = 12,
alpha_l_inf = 2/255, alpha_l_2 = 0.2, alpha_l_1 = 0.05, num_iter = 10, device = "cuda:0")
model.train()
if curveflag:
output = model(input, t=t)
else:
output = model(input)
loss = criterion(output, target)
if regularizer is not None:
loss += regularizer(model)
optimizer.zero_grad()
loss.backward()
optimizer.step()
loss_sum += loss.item() * input.size(0)
pred = output.data.argmax(1, keepdim=True)
correct += pred.eq(target.data.view_as(pred)).sum().item()
return {
'loss': loss_sum / len(train_loader.dataset),
'accuracy': correct * 100.0 / len(train_loader.dataset),
}
def test(test_loader, model, criterion, regularizer=None, **kwargs):
loss_sum = 0.0
nll_sum = 0.0
correct = 0.0
model.eval()
for input, target in test_loader:
input = input.cuda()
target = target.cuda()
output = model(input, **kwargs)
nll = criterion(output, target)
loss = nll.clone()
if regularizer is not None:
loss += regularizer(model)
nll_sum += nll.item() * input.size(0)
loss_sum += loss.item() * input.size(0)
pred = output.data.argmax(1, keepdim=True)
correct += pred.eq(target.data.view_as(pred)).sum().item()
return {
'nll': nll_sum / len(test_loader.dataset),
'loss': loss_sum / len(test_loader.dataset),
'accuracy': correct * 100.0 / len(test_loader.dataset),
}
def predictions(test_loader, model, **kwargs):
model.eval()
preds = []
targets = []
for input, target in test_loader:
input = input.cuda()
output = model(input, **kwargs)
probs = F.softmax(output, dim=1)
preds.append(probs.cpu().data.numpy())
targets.append(target.numpy())
return np.vstack(preds), np.concatenate(targets)
def isbatchnorm(module):
return issubclass(module.__class__, torch.nn.modules.batchnorm._BatchNorm) or \
issubclass(module.__class__, curves._BatchNorm)
def _check_bn(module, flag):
if isbatchnorm(module):
flag[0] = True
def check_bn(model):
flag = [False]
model.apply(lambda module: _check_bn(module, flag))
return flag[0]
def reset_bn(module):
if isbatchnorm(module):
module.reset_running_stats()
def _get_momenta(module, momenta):
if isbatchnorm(module):
momenta[module] = module.momentum
def _set_momenta(module, momenta):
if isbatchnorm(module):
module.momentum = momenta[module]
def update_bn(loader, model, **kwargs):
if not check_bn(model):
return
model.train()
momenta = {}
model.apply(reset_bn)
model.apply(lambda module: _get_momenta(module, momenta))
num_samples = 0
for input, _ in loader:
input = input.cuda()
batch_size = input.data.size(0)
momentum = batch_size / (num_samples + batch_size)
for module in momenta.keys():
module.momentum = momentum
model(input, **kwargs)
num_samples += batch_size
model.apply(lambda module: _set_momenta(module, momenta))
| 6,292 | 29.697561 | 140 | py |
MCGR | MCGR-main/merge.py | import torch
import argparse
import numpy as np
import os
import tabulate
import models
import curves
import utils
import data
import csv
if __name__ == '__main__':
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
cost = torch.nn.CrossEntropyLoss()
parser = argparse.ArgumentParser(description='DNN curve evaluation')
parser.add_argument('--dir', type=str, default='/tmp/eval', metavar='DIR',
help='save directory')
parser.add_argument('--num_points', type=int, default=61, metavar='N',
help='number of points on the curve (default: 61)')
parser.add_argument('--dataset', type=str, default='CIFAR10', metavar='DATASET',
help='dataset name (default: CIFAR10)')
parser.add_argument('--use_test', action='store_true',
help='switches between validation and test set (default: validation)')
parser.add_argument('--transform', type=str, default='ResNet', metavar='TRANSFORM',
help='transform name (default: VGG)')
parser.add_argument('--data_path', type=str, default='data', metavar='PATH',
help='path to datasets location (default: None)')
parser.add_argument('--batch_size', type=int, default=128, metavar='N',
help='input batch size (default: 128)')
parser.add_argument('--num_workers', type=int, default=4, metavar='N',
help='number of workers (default: 12)')
parser.add_argument('--model', type=str, default='PreResNet110', metavar='MODEL',
help='model name (default: None)')
parser.add_argument('--curve', type=str, default='Bezier', metavar='CURVE',
help='curve type to use (default: None)')
parser.add_argument('--num_bends', type=int, default=3, metavar='N',
help='number of curve bends (default: 3)')
parser.add_argument('--ckpt', type=str, default='./finalsave/0.3-0.8-50/checkpoint-50.pt', metavar='CKPT',
help='checkpoint to merge (default: None)')
parser.add_argument('--wd', type=float, default=1e-4, metavar='WD',
help='weight decay (default: 1e-4)')
parser.add_argument('--t', type=float, default=0.5, metavar='T',
help='t (default: 0.5)')
args = parser.parse_args()
os.makedirs(args.dir, exist_ok=True)
torch.backends.cudnn.benchmark = True
loaders, num_classes = data.loaders(
args.dataset,
args.data_path,
args.batch_size,
args.num_workers,
args.transform,
args.use_test,
shuffle_train=False
)
architecture = getattr(models, args.model)
curve = getattr(curves, args.curve)
model = curves.CurveNet(
num_classes,
curve,
architecture.curve,
args.num_bends,
architecture_kwargs=architecture.kwargs,
)
model.cuda()
checkpoint = torch.load(args.ckpt)
model.load_state_dict(checkpoint['model_state'])
t = torch.FloatTensor([0.0]).cuda()
t.data.fill_(args.t)
utils.update_bn(loaders['train'], model, t=t)
parm = []
parms=[]
bezier=curves.Bezier(3).cuda()
al=bezier(t)
i=0
model1 = models.preresnet.PreResNet110.base(10)
m = model.state_dict()
m1 = model1.state_dict()
dict={}
for p in m:
#print(p)
bool = True
for i in range(3):
tar='_'+str(i)
if p[-2:] == tar:
bool = False
name = p[4:-2]
#print(name)
if dict.get(name) is None:
dict[name] = al[i] * m[p]
else:
dict[name] += al[i] * m[p]
if bool :
name = p[4:]
dict[name] = m[p]
for p in m1:
#print(p)
if not dict.get(p) is None:
#print(p)
m1[p]=dict[p]
else:
m1[p]=dict[p[:-8]+'weight']
model1.load_state_dict(m1)
utils.save_checkpoint(
args.dir,
0,
model_state=model1.state_dict()
)
print('done!')
| 4,198 | 32.592 | 110 | py |
MCGR | MCGR-main/data.py | import os
import torch
import torchvision
import torchvision.transforms as transforms
class Transforms:
class CIFAR10:
class VGG:
train = transforms.Compose([
transforms.RandomHorizontalFlip(),
transforms.RandomCrop(32, padding=4),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
class ResNet:
train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
##transforms.Normalize(mean=[0.4914, 0.4822, 0.4465], std=[0.2023, 0.1994, 0.2010]),
])
test = transforms.Compose([
transforms.ToTensor(),
#transforms.Normalize(mean=[0.4914, 0.4822, 0.4465], std=[0.2023, 0.1994, 0.2010]),
])
CIFAR100 = CIFAR10
def loaders(dataset, path, batch_size, num_workers, transform_name, use_test=False,
shuffle_train=True):
ds = getattr(torchvision.datasets, dataset)
path = os.path.join(path, dataset.lower())
transform = getattr(getattr(Transforms, dataset), transform_name)
train_set = ds(path, train=True, download=True, transform=transform.train)
if use_test:
print('You are going to run models on the test set. Are you sure?')
test_set = ds(path, train=False, download=True, transform=transform.test)
else:
print("Using train (45000) + validation (5000)")
train_set.train_data = train_set.data[:-5000]
train_set.train_labels = train_set.targets[:-5000]
test_set = ds(path, train=True, download=True, transform=transform.test)
test_set.train = False
test_set.test_data = test_set.data[-5000:]
test_set.test_labels = test_set.targets[-5000:]
delattr(test_set, 'data')
delattr(test_set, 'targets')
return {
'train': torch.utils.data.DataLoader(
train_set,
batch_size=batch_size,
shuffle=shuffle_train,
num_workers=num_workers,
pin_memory=True
),
'test': torch.utils.data.DataLoader(
test_set,
batch_size=batch_size,
shuffle=False,
num_workers=num_workers,
pin_memory=True
),
}, max(train_set.targets) + 1
| 2,753 | 33.425 | 100 | py |
MCGR | MCGR-main/pgdtest.py | import torch
from torchvision import datasets, transforms
from torch.utils.data import random_split
import attack.pgd as pgd
import attack.pgd2 as pgd2
from attack.att import *
from tqdm import tqdm
class PGDTest():
def __init__(self,dataset):
if dataset=='CIFAR100':
self.data_train = datasets.CIFAR100(root='./data/cifar10', train=True,
download=True, transform=transforms.Compose([transforms.ToTensor()]))
self.data_loader_train = torch.utils.data.DataLoader(dataset=self.data_train,
batch_size=128,
shuffle=True,
num_workers=8)
self.data_test = datasets.CIFAR100(root='./data/cifar10', train=False,
download=True, transform=transforms.Compose([transforms.ToTensor()]))
self.data_loader_test = torch.utils.data.DataLoader(dataset=self.data_test,
batch_size=128,
shuffle=True,
num_workers=8)
else:
self.data_train = datasets.CIFAR10(root='./data/cifar10', train=True,
download=True, transform=transforms.Compose([transforms.ToTensor()]))
self.data_loader_train = torch.utils.data.DataLoader(dataset=self.data_train,
batch_size=128,
shuffle=True,
num_workers=8)
self.data_test = datasets.CIFAR10(root='./data/cifar10', train=False,
download=True, transform=transforms.Compose([transforms.ToTensor()]))
self.data_loader_test = torch.utils.data.DataLoader(dataset=self.data_test,
batch_size=128,
shuffle=True,
num_workers=8)
def test_once(self,model,train,device,pgdtype,**kwargs):
correct = 0
cost = torch.nn.CrossEntropyLoss()
loss=0.0
data_loader_test=self.data_loader_test
num=len(self.data_test)
if train:
data_loader_test = self.data_loader_train
num = len(self.data_train)
at = pgd.PGD()
at2 = pgd2.PGD()
for data in tqdm(data_loader_test):
X_test, y_test = data
X_test, y_test = X_test.to(device), y_test.to(device)
if pgdtype == 'inf':
X_test = at.generate(model, X_test, y_test, None, device,**kwargs)
if pgdtype=='2':
X_test = at2.generate(model, X_test, y_test, None, device,**kwargs)
if pgdtype=='1':
X_test+= pgd_l1_topk(model, X_test, y_test, epsilon=12, alpha=0.05, num_iter=50, device="cuda:0", restarts=0,
version=0,**kwargs)
outputs = model(X_test, **kwargs)
_, pred = torch.max(outputs.data, 1)
correct += torch.sum(pred == y_test.data)
l = cost(outputs, y_test)
loss += l.item()
return float(100 * correct / num),loss / (int(num/128) + 1)
def test(self,model,device,**kwargs):
model.to(device)
model.eval()
teca, tecl = self.test_once(model, False, device, '0', **kwargs)
teia, teil = self.test_once(model, False, device, 'inf', **kwargs)
te2a, te2l = self.test_once(model, False, device, '2', **kwargs)
te1a, te1l = self.test_once(model, False, device, '1', **kwargs)
trca, trcl = self.test_once(model, True, device, '0', **kwargs)
tria, tril = self.test_once(model, True, device, 'inf', **kwargs)
tr1a, tr1l = self.test_once(model, True, device, '1', **kwargs)
tr2a, tr2l = self.test_once(model, True, device, '2', **kwargs)
return teca,teia,te2a,tecl,teil,te2l,trca,tria,tr2a,trcl,tril,tr2l,te1a,te1l,tr1a,tr1l
'''
if __name__ == '__main__':
model=models.preresnet.PreResNet110.base(10)
model.load_state_dict(torch.load('./save/pgd/checkpoint-150.pt')['model_state'])
model.to(0)
pgdtest=PGDTest()
pgdtest.test(model,0)
'''
| 4,567 | 50.909091 | 125 | py |
MCGR | MCGR-main/connect.py | import argparse
import numpy as np
import os
import sys
import tabulate
import torch
import torch.nn.functional as F
import data
import models
import utils
parser = argparse.ArgumentParser(description='Connect models with polychain')
parser.add_argument('--dir', type=str, default='/tmp/chain/', metavar='DIR',
help='training directory (default: /tmp/chain)')
parser.add_argument('--num_points', type=int, default=6, metavar='N',
help='number of points between models (default: 6)')
parser.add_argument('--dataset', type=str, default='CIFAR10', metavar='DATASET',
help='dataset name (default: CIFAR10)')
parser.add_argument('--use_test', action='store_true',
help='switches between validation and test set (default: validation)')
parser.add_argument('--transform', type=str, default='VGG', metavar='TRANSFORM',
help='transform name (default: VGG)')
parser.add_argument('--data_path', type=str, default=None, metavar='PATH',
help='path to datasets location (default: None)')
parser.add_argument('--batch_size', type=int, default=128, metavar='N',
help='input batch size (default: 128)')
parser.add_argument('--num_workers', type=int, default=4, metavar='N',
help='number of workers (default: 4)')
parser.add_argument('--model', type=str, default=None, metavar='MODEL',
help='model name (default: None)')
parser.add_argument('--ckpt', type=str, action='append', metavar='CKPT', required=True,
help='checkpoint to eval, pass all the models through this parameter')
parser.add_argument('--wd', type=float, default=1e-4, metavar='WD',
help='weight decay (default: 1e-4)')
args = parser.parse_args()
os.makedirs(args.dir, exist_ok=True)
with open(os.path.join(args.dir, 'chain.sh'), 'w') as f:
f.write(' '.join(sys.argv))
f.write('\n')
torch.backends.cudnn.benchmark = True
loaders, num_classes = data.loaders(
args.dataset,
args.data_path,
args.batch_size,
args.num_workers,
args.transform,
args.use_test,
shuffle_train=False
)
architecture = getattr(models, args.model)
base_model = architecture.base(num_classes, **architecture.kwargs)
base_model.cuda()
criterion = F.cross_entropy
regularizer = utils.l2_regularizer(args.wd)
def get_weights(model):
return np.concatenate([p.data.cpu().numpy().ravel() for p in model.parameters()])
T = (args.num_points - 1) * len(args.ckpt - 1) + 1
ts = np.linspace(0.0, len(args.ckpt) - 1, T)
tr_loss = np.zeros(T)
tr_nll = np.zeros(T)
tr_acc = np.zeros(T)
te_loss = np.zeros(T)
te_nll = np.zeros(T)
te_acc = np.zeros(T)
tr_err = np.zeros(T)
te_err = np.zeros(T)
columns = ['t', 'Train loss', 'Train nll', 'Train error (%)', 'Test nll', 'Test error (%)']
alphas = np.linspace(0.0, 1.0, args.num_points)
for path in args.ckpt:
print(path)
step = 0
for i in range(len(args.ckpt) - 1):
base_model.load_state_dict(torch.load(args.ckpt[i])['model_state'])
w_1 = get_weights(base_model)
base_model.load_state_dict(torch.load(args.ckpt[i + 1])['model_state'])
w_2 = get_weights(base_model)
for alpha in alphas[1 if i > 0 else 0:]:
w = (1.0 - alpha) * w_1 + alpha * w_2
offset = 0
for parameter in base_model.parameters():
size = np.prod(parameter.size())
value = w[offset:offset+size].reshape(parameter.size())
parameter.data.copy_(torch.from_numpy(value))
offset += size
utils.update_bn(loaders['train'], base_model)
tr_res = utils.test(loaders['train'], base_model, criterion, regularizer)
te_res = utils.test(loaders['test'], base_model, criterion, regularizer)
tr_loss[step] = tr_res['loss']
tr_nll[step] = tr_res['nll']
tr_acc[step] = tr_res['accuracy']
tr_err[step] = 100.0 - tr_acc[step]
te_loss[step] = te_res['loss']
te_nll[step] = te_res['nll']
te_acc[step] = te_res['accuracy']
te_err[step] = 100.0 - te_acc[step]
values = [ts[step], tr_loss[step], tr_nll[step], tr_err[step], te_nll[step], te_err[step]]
table = tabulate.tabulate([values], columns, tablefmt='simple', floatfmt='10.4f')
if step % 40 == 0:
table = table.split('\n')
table = '\n'.join([table[1]] + table)
else:
table = table.split('\n')[2]
print(table)
step += 1
np.savez(
os.path.join(args.dir, 'chain.npz'),
ts=ts,
tr_loss=tr_loss,
tr_nll=tr_nll,
tr_acc=tr_acc,
tr_err=tr_err,
te_loss=te_loss,
te_nll=te_nll,
te_acc=te_acc,
te_err=te_err,
)
| 4,742 | 31.486301 | 98 | py |
MCGR | MCGR-main/curves.py | import numpy as np
import math
import torch
import torch.nn.functional as F
from torch.nn import Module, Parameter
from torch.nn.modules.utils import _pair
from scipy.special import binom
class Bezier(Module):
def __init__(self, num_bends):
super(Bezier, self).__init__()
self.register_buffer(
'binom',
torch.Tensor(binom(num_bends - 1, np.arange(num_bends), dtype=np.float32))
)
self.register_buffer('range', torch.arange(0, float(num_bends)))
self.register_buffer('rev_range', torch.arange(float(num_bends - 1), -1, -1))
def forward(self, t):
return self.binom * \
torch.pow(t, self.range) * \
torch.pow((1.0 - t), self.rev_range)
class PolyChain(Module):
def __init__(self, num_bends):
super(PolyChain, self).__init__()
self.num_bends = num_bends
self.register_buffer('range', torch.arange(0, float(num_bends)))
def forward(self, t):
t_n = t * (self.num_bends - 1)
return torch.max(self.range.new([0.0]), 1.0 - torch.abs(t_n - self.range))
class CurveModule(Module):
def __init__(self, fix_points, parameter_names=()):
super(CurveModule, self).__init__()
self.fix_points = fix_points
self.num_bends = len(self.fix_points)
self.parameter_names = parameter_names
self.l2 = 0.0
def compute_weights_t(self, coeffs_t):
w_t = [None] * len(self.parameter_names)
self.l2 = 0.0
for i, parameter_name in enumerate(self.parameter_names):
for j, coeff in enumerate(coeffs_t):
parameter = getattr(self, '%s_%d' % (parameter_name, j))
if parameter is not None:
if w_t[i] is None:
w_t[i] = parameter * coeff
else:
w_t[i] += parameter * coeff
if w_t[i] is not None:
self.l2 += torch.sum(w_t[i] ** 2)
return w_t
class Linear(CurveModule):
def __init__(self, in_features, out_features, fix_points, bias=True):
super(Linear, self).__init__(fix_points, ('weight', 'bias'))
self.in_features = in_features
self.out_features = out_features
self.l2 = 0.0
for i, fixed in enumerate(self.fix_points):
self.register_parameter(
'weight_%d' % i,
Parameter(torch.Tensor(out_features, in_features), requires_grad=not fixed)
)
for i, fixed in enumerate(self.fix_points):
if bias:
self.register_parameter(
'bias_%d' % i,
Parameter(torch.Tensor(out_features), requires_grad=not fixed)
)
else:
self.register_parameter('bias_%d' % i, None)
self.reset_parameters()
def reset_parameters(self):
stdv = 1. / math.sqrt(self.in_features)
for i in range(self.num_bends):
getattr(self, 'weight_%d' % i).data.uniform_(-stdv, stdv)
bias = getattr(self, 'bias_%d' % i)
if bias is not None:
bias.data.uniform_(-stdv, stdv)
def forward(self, input, coeffs_t):
weight_t, bias_t = self.compute_weights_t(coeffs_t)
return F.linear(input, weight_t, bias_t)
class Conv2d(CurveModule):
def __init__(self, in_channels, out_channels, kernel_size, fix_points, stride=1,
padding=0, dilation=1, groups=1, bias=True):
super(Conv2d, self).__init__(fix_points, ('weight', 'bias'))
if in_channels % groups != 0:
raise ValueError('in_channels must be divisible by groups')
if out_channels % groups != 0:
raise ValueError('out_channels must be divisible by groups')
kernel_size = _pair(kernel_size)
stride = _pair(stride)
padding = _pair(padding)
dilation = _pair(dilation)
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.dilation = dilation
self.groups = groups
for i, fixed in enumerate(self.fix_points):
self.register_parameter(
'weight_%d' % i,
Parameter(
torch.Tensor(out_channels, in_channels // groups, *kernel_size),
requires_grad=not fixed
)
)
for i, fixed in enumerate(self.fix_points):
if bias:
self.register_parameter(
'bias_%d' % i,
Parameter(torch.Tensor(out_channels), requires_grad=not fixed)
)
else:
self.register_parameter('bias_%d' % i, None)
self.reset_parameters()
def reset_parameters(self):
n = self.in_channels
for k in self.kernel_size:
n *= k
stdv = 1. / math.sqrt(n)
for i in range(self.num_bends):
getattr(self, 'weight_%d' % i).data.uniform_(-stdv, stdv)
bias = getattr(self, 'bias_%d' % i)
if bias is not None:
bias.data.uniform_(-stdv, stdv)
def forward(self, input, coeffs_t):
weight_t, bias_t = self.compute_weights_t(coeffs_t)
return F.conv2d(input, weight_t, bias_t, self.stride,
self.padding, self.dilation, self.groups)
class _BatchNorm(CurveModule):
_version = 2
def __init__(self, num_features, fix_points, eps=1e-5, momentum=0.1, affine=True,
track_running_stats=True):
super(_BatchNorm, self).__init__(fix_points, ('weight', 'bias'))
self.num_features = num_features
self.eps = eps
self.momentum = momentum
self.affine = affine
self.track_running_stats = track_running_stats
self.l2 = 0.0
for i, fixed in enumerate(self.fix_points):
if self.affine:
self.register_parameter(
'weight_%d' % i,
Parameter(torch.Tensor(num_features), requires_grad=not fixed)
)
else:
self.register_parameter('weight_%d' % i, None)
for i, fixed in enumerate(self.fix_points):
if self.affine:
self.register_parameter(
'bias_%d' % i,
Parameter(torch.Tensor(num_features), requires_grad=not fixed)
)
else:
self.register_parameter('bias_%d' % i, None)
if self.track_running_stats:
self.register_buffer('running_mean', torch.zeros(num_features))
self.register_buffer('running_var', torch.ones(num_features))
self.register_buffer('num_batches_tracked', torch.tensor(0, dtype=torch.long))
else:
self.register_parameter('running_mean', None)
self.register_parameter('running_var', None)
self.register_parameter('num_batches_tracked', None)
self.reset_parameters()
def reset_running_stats(self):
if self.track_running_stats:
self.running_mean.zero_()
self.running_var.fill_(1)
self.num_batches_tracked.zero_()
def reset_parameters(self):
self.reset_running_stats()
if self.affine:
for i in range(self.num_bends):
getattr(self, 'weight_%d' % i).data.uniform_()
getattr(self, 'bias_%d' % i).data.zero_()
def _check_input_dim(self, input):
raise NotImplementedError
def forward(self, input, coeffs_t):
self._check_input_dim(input)
exponential_average_factor = 0.0
if self.training and self.track_running_stats:
self.num_batches_tracked += 1
if self.momentum is None: # use cumulative moving average
exponential_average_factor = 1.0 / self.num_batches_tracked.item()
else: # use exponential moving average
exponential_average_factor = self.momentum
weight_t, bias_t = self.compute_weights_t(coeffs_t)
return F.batch_norm(
input, self.running_mean, self.running_var, weight_t, bias_t,
self.training or not self.track_running_stats,
exponential_average_factor, self.eps)
def extra_repr(self):
return '{num_features}, eps={eps}, momentum={momentum}, affine={affine}, ' \
'track_running_stats={track_running_stats}'.format(**self.__dict__)
def _load_from_state_dict(self, state_dict, prefix, metadata, strict,
missing_keys, unexpected_keys, error_msgs):
version = metadata.get('version', None)
if (version is None or version < 2) and self.track_running_stats:
# at version 2: added num_batches_tracked buffer
# this should have a default value of 0
num_batches_tracked_key = prefix + 'num_batches_tracked'
if num_batches_tracked_key not in state_dict:
state_dict[num_batches_tracked_key] = torch.tensor(0, dtype=torch.long)
super(_BatchNorm, self)._load_from_state_dict(
state_dict, prefix, metadata, strict,
missing_keys, unexpected_keys, error_msgs)
class BatchNorm2d(_BatchNorm):
def _check_input_dim(self, input):
if input.dim() != 4:
raise ValueError('expected 4D input (got {}D input)'
.format(input.dim()))
class CurveNet(Module):
def __init__(self, num_classes, curve, architecture, num_bends, fix_start=True, fix_end=True,
architecture_kwargs={}):
super(CurveNet, self).__init__()
self.num_classes = num_classes
self.num_bends = num_bends
self.fix_points = [fix_start] + [False] * (self.num_bends - 2) + [fix_end]
self.curve = curve
self.architecture = architecture
self.l2 = 0.0
self.coeff_layer = self.curve(self.num_bends)
self.net = self.architecture(num_classes, fix_points=self.fix_points, **architecture_kwargs)
self.curve_modules = []
for module in self.net.modules():
if issubclass(module.__class__, CurveModule):
self.curve_modules.append(module)
def import_base_parameters(self, base_model, index):
parameters = list(self.net.parameters())[index::self.num_bends]
base_parameters = base_model.parameters()
for parameter, base_parameter in zip(parameters, base_parameters):
parameter.data.copy_(base_parameter.data)
def import_base_buffers(self, base_model):
for buffer, base_buffer in zip(self.net._all_buffers(), base_model._all_buffers()):
buffer.data.copy_(base_buffer.data)
def export_base_parameters(self, base_model, index):
parameters = list(self.net.parameters())[index::self.num_bends]
base_parameters = base_model.parameters()
for parameter, base_parameter in zip(parameters, base_parameters):
base_parameter.data.copy_(parameter.data)
def init_linear(self):
parameters = list(self.net.parameters())
for i in range(0, len(parameters), self.num_bends):
weights = parameters[i:i+self.num_bends]
for j in range(1, self.num_bends - 1):
alpha = j * 1.0 / (self.num_bends - 1)
weights[j].data.copy_(alpha * weights[-1].data + (1.0 - alpha) * weights[0].data)
def weights(self, t):
coeffs_t = self.coeff_layer(t)
weights = []
for module in self.curve_modules:
print(module)
weights.extend([w for w in module.compute_weights_t(coeffs_t) if w is not None])
print(len(self.curve_modules))
return np.concatenate([w.detach().cpu().numpy().ravel() for w in weights])
def _compute_l2(self):
self.l2 = sum(module.l2 for module in self.curve_modules)
def forward(self, input, t=None):
if t is None:
t = input.data.new(1).uniform_()
coeffs_t = self.coeff_layer(t)
output = self.net(input, coeffs_t)
self._compute_l2()
return output
def l2_regularizer(weight_decay):
return lambda model: 0.5 * weight_decay * model.l2
| 12,379 | 37.566978 | 100 | py |
MCGR | MCGR-main/eval_curve_pgd.py | import torch
import argparse
import numpy as np
import os
import tabulate
import pgdtest
import models
import curves
import utils
import data
import csv
if __name__ == '__main__':
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
cost = torch.nn.CrossEntropyLoss()
parser = argparse.ArgumentParser(description='DNN curve evaluation')
parser.add_argument('--dir', type=str, default='/tmp/eval', metavar='DIR',
help='training directory (default: /tmp/eval)')
parser.add_argument('--num_points', type=int, default=61, metavar='N',
help='number of points on the curve (default: 61)')
parser.add_argument('--dataset', type=str, default='CIFAR10', metavar='DATASET',
help='dataset name (default: CIFAR10)')
parser.add_argument('--use_test', action='store_true',
help='switches between validation and test set (default: validation)')
parser.add_argument('--transform', type=str, default='VGG', metavar='TRANSFORM',
help='transform name (default: VGG)')
parser.add_argument('--data_path', type=str, default=None, metavar='PATH',
help='path to datasets location (default: None)')
parser.add_argument('--batch_size', type=int, default=128, metavar='N',
help='input batch size (default: 128)')
parser.add_argument('--num_workers', type=int, default=4, metavar='N',
help='number of workers (default: 12)')
parser.add_argument('--model', type=str, default=None, metavar='MODEL',
help='model name (default: None)')
parser.add_argument('--curve', type=str, default=None, metavar='CURVE',
help='curve type to use (default: None)')
parser.add_argument('--num_bends', type=int, default=3, metavar='N',
help='number of curve bends (default: 3)')
parser.add_argument('--ckpt', type=str, default=None, metavar='CKPT',
help='checkpoint to eval (default: None)')
parser.add_argument('--wd', type=float, default=1e-4, metavar='WD',
help='weight decay (default: 1e-4)')
args = parser.parse_args()
os.makedirs(args.dir, exist_ok=True)
torch.backends.cudnn.benchmark = True
pgdtest=pgdtest.PGDTest(args.dataset)
loaders, num_classes = data.loaders(
args.dataset,
args.data_path,
args.batch_size,
args.num_workers,
args.transform,
args.use_test,
shuffle_train=False
)
architecture = getattr(models, args.model)
curve = getattr(curves, args.curve)
model = curves.CurveNet(
num_classes,
curve,
architecture.curve,
args.num_bends,
architecture_kwargs=architecture.kwargs,
)
model.cuda()
checkpoint = torch.load(args.ckpt)
model.load_state_dict(checkpoint['model_state'])
T = args.num_points
columns = ['t','Test clean acc','Test pgd_inf acc','Test pgd_2 acc','Test pgd1_acc','Train clean loss','Train pgd_inf loss','Train pgd_2 loss','Train pgd_1 loss']
ts = np.linspace(0.0, 1.0, T)
teca=np.zeros(T)
teia = np.zeros(T)
te2a = np.zeros(T)
te1a= np.zeros(T)
tecl = np.zeros(T)
teil = np.zeros(T)
te2l = np.zeros(T)
te1l= np.zeros(T)
trca = np.zeros(T)
tria = np.zeros(T)
tr2a = np.zeros(T)
tr1a= np.zeros(T)
trcl = np.zeros(T)
tril = np.zeros(T)
tr2l = np.zeros(T)
tr1l= np.zeros(T)
t = torch.FloatTensor([0.0]).cuda()
f = open(args.dir+'/log.csv', 'w')
print(args.dir+'/log.csv')
for i, t_value in enumerate(ts):
t.data.fill_(t_value)
utils.update_bn(loaders['train'], model, t=t)
teca[i],teia[i],te2a[i],tecl[i],teil[i],te2l[i],trca[i],tria[i],tr2a[i],trcl[i],tril[i],tr2l[i],te1a[i],te1l[i],tr1a[i],tr1l[i]=pgdtest.test(model,0,t=t)
values = [t,teca[i],teia[i],te2a[i],te1a[i],trcl[i],tril[i],tr2l[i],tr1l[i]]
table = tabulate.tabulate([values], columns, tablefmt='simple', floatfmt='10.4f')
if i % 40 == 0:
table = table.split('\n')
table = '\n'.join([table[1]] + table)
else:
table = table.split('\n')[2]
print(table)
with f:
write = csv.writer(f)
write.writerow(['t','test clean acc','test pgd_inf acc','test pgd_2 acc','test pgd_1 acc','test clean loss','test pgd_inf loss','test pgd_2 loss','test pgd_1 loss',\
'train clean acc','train pgd_inf acc','train pgd_2 acc','train pgd_1 acc','train clean loss','train pgd_inf loss','train pgd_2 loss','train pgd_1 loss'])
for i in range(T):
values=[ts[i],teca[i],teia[i],te2a[i],te1a[i],tecl[i],teil[i],te2l[i],te1l[i],trca[i],tria[i],tr2a[i],tr1a[i],trcl[i],tril[i],tr2l[i],tr1l[i]]
write.writerow(values)
| 4,961 | 38.070866 | 181 | py |
MCGR | MCGR-main/train.py | import argparse
import os
import sys
import tabulate
import time
import torch
import torch.nn.functional as F
import curves
import data
import models
import utils
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='DNN curve training')
parser.add_argument('--dir', type=str, default='/tmp/curve/', metavar='DIR',
help='training directory (default: /tmp/curve/)')
parser.add_argument('--dataset', type=str, default='CIFAR10', metavar='DATASET',
help='dataset name (default: CIFAR10)')
parser.add_argument('--use_test', action='store_true',
help='switches between validation and test set (default: validation)')
parser.add_argument('--transform', type=str, default='VGG', metavar='TRANSFORM',
help='transform name (default: VGG)')
parser.add_argument('--data_path', type=str, default=None, metavar='PATH',
help='path to datasets location (default: None)')
parser.add_argument('--batch_size', type=int, default=128, metavar='N',
help='input batch size (default: 128)')
parser.add_argument('--num-workers', type=int, default=4, metavar='N',
help='number of workers (default: 4)')
parser.add_argument('--model', type=str, default=None, metavar='MODEL', required=True,
help='model name (default: None)')
parser.add_argument('--curve', type=str, default=None, metavar='CURVE',
help='curve type to use (default: None)')
parser.add_argument('--num_bends', type=int, default=3, metavar='N',
help='number of curve bends (default: 3)')
parser.add_argument('--init_start', type=str, default=None, metavar='CKPT',
help='checkpoint to init start point (default: None)')
parser.add_argument('--fix_start', dest='fix_start', action='store_true',
help='fix start point (default: off)')
parser.add_argument('--init_end', type=str, default=None, metavar='CKPT',
help='checkpoint to init end point (default: None)')
parser.add_argument('--fix_end', dest='fix_end', action='store_true',
help='fix end point (default: off)')
parser.set_defaults(init_linear=True)
parser.add_argument('--init_linear_off', dest='init_linear', action='store_false',
help='turns off linear initialization of intermediate points (default: on)')
parser.add_argument('--resume', type=str, default=None, metavar='CKPT',
help='checkpoint to resume training from (default: None)')
parser.add_argument('--epochs', type=int, default=200, metavar='N',
help='number of epochs to train (default: 200)')
parser.add_argument('--save_freq', type=int, default=50, metavar='N',
help='save frequency (default: 50)')
parser.add_argument('--lr', type=float, default=0.01, metavar='LR',
help='initial learning rate (default: 0.01)')
parser.add_argument('--momentum', type=float, default=0.9, metavar='M',
help='SGD momentum (default: 0.9)')
parser.add_argument('--wd', type=float, default=1e-4, metavar='WD',
help='weight decay (default: 1e-4)')
parser.add_argument('--seed', type=int, default=1, metavar='S', help='random seed (default: 1)')
parser.add_argument('--pgd',type=str,default=0,metavar='PGD',help='type of pgd attack')
args = parser.parse_args()
os.makedirs(args.dir, exist_ok=True)
with open(os.path.join(args.dir, 'command.sh'), 'w') as f:
f.write(' '.join(sys.argv))
f.write('\n')
torch.backends.cudnn.benchmark = True
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
loaders, num_classes = data.loaders(
args.dataset,
args.data_path,
args.batch_size,
args.num_workers,
args.transform,
args.use_test
)
architecture = getattr(models, args.model)
curveflag=False
if args.curve is None:
model = architecture.base(num_classes=num_classes, **architecture.kwargs)
else:
curveflag=True
curve = getattr(curves, args.curve)
model = curves.CurveNet(
num_classes,
curve,
architecture.curve,
args.num_bends,
args.fix_start,
args.fix_end,
architecture_kwargs=architecture.kwargs,
)
base_model = None
if args.resume is None:
for path, k in [(args.init_start, 0), (args.init_end, args.num_bends - 1)]:
if path is not None:
if base_model is None:
base_model = architecture.base(num_classes=num_classes, **architecture.kwargs)
checkpoint = torch.load(path)
print('Loading %s as point #%d' % (path, k))
base_model.load_state_dict(checkpoint['model_state'])
model.import_base_parameters(base_model, k)
if args.init_linear:
print('Linear initialization.')
model.init_linear()
model.cuda()
def learning_rate_schedule(base_lr, epoch, total_epochs):
alpha = epoch / total_epochs
if alpha <= 0.5:
factor = 1.0
elif alpha <= 0.9:
factor = 1.0 - (alpha - 0.5) / 0.4 * 0.99
else:
factor = 0.01
return factor * base_lr
criterion = F.cross_entropy
regularizer = None if args.curve is None else curves.l2_regularizer(args.wd)
optimizer = torch.optim.SGD(
filter(lambda param: param.requires_grad, model.parameters()),
lr=args.lr,
momentum=args.momentum,
weight_decay=args.wd if args.curve is None else 0.0
)
start_epoch = 1
if args.resume is not None:
print('Resume training from %s' % args.resume)
checkpoint = torch.load(args.resume)
start_epoch = checkpoint['epoch'] + 1
model.load_state_dict(checkpoint['model_state'])
optimizer.load_state_dict(checkpoint['optimizer_state'])
columns = ['ep', 'lr', 'tr_loss', 'tr_acc', 'te_nll', 'te_acc', 'time']
utils.save_checkpoint(
args.dir,
start_epoch - 1,
model_state=model.state_dict(),
optimizer_state=optimizer.state_dict()
)
has_bn = utils.check_bn(model)
test_res = {'loss': None, 'accuracy': None, 'nll': None}
for epoch in range(start_epoch, args.epochs + 1):
time_ep = time.time()
lr = learning_rate_schedule(args.lr, epoch, args.epochs)
utils.adjust_learning_rate(optimizer, lr)
train_res = utils.train(loaders['train'], model, optimizer, criterion, regularizer,pgdtype=args.pgd,curveflag=curveflag)
if args.curve is None or not has_bn:
test_res = utils.test(loaders['test'], model, criterion, regularizer)
if epoch % args.save_freq == 0:
utils.save_checkpoint(
args.dir,
epoch,
model_state=model.state_dict(),
optimizer_state=optimizer.state_dict()
)
time_ep = time.time() - time_ep
values = [epoch, lr, train_res['loss'], train_res['accuracy'], test_res['nll'],
test_res['accuracy'], time_ep]
table = tabulate.tabulate([values], columns, tablefmt='simple', floatfmt='9.4f')
if epoch % 40 == 1 or epoch == start_epoch:
table = table.split('\n')
table = '\n'.join([table[1]] + table)
else:
table = table.split('\n')[2]
print(table)
if args.epochs % args.save_freq != 0:
utils.save_checkpoint(
args.dir,
args.epochs,
model_state=model.state_dict(),
optimizer_state=optimizer.state_dict()
)
| 8,033 | 39.989796 | 128 | py |
MCGR | MCGR-main/attack/pgd.py | import torch
import torch.nn as nn
class PGD:
def __init__(self, eps=8 / 255., step_size=2 / 255., max_iter=10, random_init=True,
targeted=False, loss_fn=nn.CrossEntropyLoss(), batch_size=64):
self.eps = eps
self.step_size = step_size
self.max_iter = max_iter
self.random_init = random_init
self.targeted = targeted
self.loss_fn = loss_fn
self.batch_size = batch_size
def attack(self, model, x, y, x_adv=None, targets=None,**kwargs):
if x_adv is None:
if self.random_init:
x_adv = 2 * self.eps * (torch.rand_like(x) - 0.5) + x
x_adv = x_adv.clamp(0.0, 1.0)
else:
x_adv = torch.clone(x).detach()
x_adv.requires_grad_(True)
pred_adv = model(x_adv,**kwargs)
if isinstance(pred_adv, (list, tuple)):
pred_adv = pred_adv[-1]
if self.targeted:
assert targets is not None, "Target labels not found!"
loss = self.loss_fn(pred_adv, targets)
else:
loss = self.loss_fn(pred_adv, y)
loss.requires_grad_(True)
grad = torch.autograd.grad(loss, x_adv)[0]
pert = self.step_size * grad.sign()
x_adv = (x_adv + pert).clamp(0.0, 1.0).detach()
pert = (x_adv - x).clamp(-self.eps, self.eps)
return x + pert
def generate(self, model, x, y=None, targets=None, device=torch.device("cpu"), **kwargs):
model.to(device)
model.eval()
x_adv = []
for i in range(0, x.size(0), self.batch_size):
x_batch = x[i: i + self.batch_size].to(device)
if y is None:
y_batch = model(x_batch,**kwargs)
if isinstance(y_batch, tuple):
y_batch = y_batch[-1]
y_batch = y_batch.max(dim=-1)[1].to(device)
else:
y_batch = y[i: i + self.batch_size].to(device)
for j in range(self.max_iter):
if j == 0:
x_adv_batch = self.attack(model, x_batch, y_batch, targets=targets,**kwargs)
else:
x_adv_batch = self.attack(model, x_batch, y_batch, x_adv_batch, targets=targets,**kwargs)
x_adv.append(x_adv_batch)
return torch.cat(x_adv, dim=0).to(device)
| 2,353 | 38.233333 | 109 | py |
MCGR | MCGR-main/attack/att.py | import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from torchvision.datasets import CIFAR10
from torch.utils.data import DataLoader, TensorDataset
import torchvision.transforms as transforms
import torch.optim as optim
import torch.nn as nn
import torch
#import ipdb
import random
def norms_l0(Z):
return ((Z.view(Z.shape[0], -1)!=0).sum(dim=1)[:,None,None,None]).float()
def norms_l1(Z):
return Z.view(Z.shape[0], -1).abs().sum(dim=1)[:,None,None,None]
def norms(Z):
return Z.view(Z.shape[0], -1).norm(dim=1)[:,None,None,None]
def norms_l2(Z):
return norms(Z)
def norms_linf(Z):
return Z.view(Z.shape[0], -1).abs().max(dim=1)[0]
'''
#DEFAULTS
pgd_linf: epsilon=0.03, alpha=0.003, num_iter = 40
pgd_l0 : epsilon = 12, alpha = 1
pgd_l1_topk : epsilon = 12, alpha = 0.05, num_iter = 40, k = rand(5,20) --> (alpha = alpha/k *20)
pgd_l2 : epsilon =0.5, alpha=0.05, num_iter = 50
'''
def pgd_l2(model, X, y, epsilon=0.5, alpha=0.05, num_iter = 10, device = "cuda:0", restarts = 0, version = 0):
max_delta = torch.zeros_like(X)
delta = torch.zeros_like(X, requires_grad = True)
for t in range(num_iter):
output = model(X+delta)
incorrect = output.max(1)[1] != y
correct = (~incorrect).unsqueeze(1).unsqueeze(1).unsqueeze(1).half()
correct = 1.0 if version == 0 else correct
#Finding the correct examples so as to attack only them only for version 1 (Test time)
loss = nn.CrossEntropyLoss()(model(X + delta), y)
loss.backward()
delta.data += correct*alpha*delta.grad.detach() / norms(delta.grad.detach())
delta.data *= epsilon / norms(delta.detach()).clamp(min=epsilon)
delta.data = torch.min(torch.max(delta.detach(), -X), 1-X) # clip X+delta to [0,1]
delta.grad.zero_()
max_delta = delta.detach()
for k in range (restarts):
delta = torch.rand_like(X, requires_grad=True)
delta.data *= (2.0*delta.data - 1.0)*epsilon
delta.data /= norms(delta.detach()).clamp(min=epsilon)
for t in range(num_iter):
output = model(X+delta)
incorrect = output.max(1)[1] != y
correct = (~incorrect).unsqueeze(1).unsqueeze(1).unsqueeze(1).half()
correct = 1.0 if version == 0 else correct
#Finding the correct examples so as to attack only them only for version 1
loss = nn.CrossEntropyLoss()(model(X + delta), y)
loss.backward()
delta.data += correct*alpha*delta.grad.detach() / norms(delta.grad.detach())
delta.data *= epsilon / norms(delta.detach()).clamp(min=epsilon)
delta.data = torch.min(torch.max(delta.detach(), -X), 1-X) # clip X+delta to [0,1]
delta.grad.zero_()
output = model(X+delta)
incorrect = output.max(1)[1] != y
#Edit Max Delta only for successful attacks
max_delta[incorrect] = delta.detach()[incorrect]
return max_delta
def pgd_l1_topk(model, X,y, epsilon = 12, alpha = 0.05, num_iter = 50, k = 20, device = "cuda:1", restarts = 1, version = 0,**kwargs):
#Gap : Dont attack pixels closer than the gap value to 0 or 1
gap = 0.05
max_delta = torch.zeros_like(X)
delta = torch.zeros_like(X, requires_grad = True)
for t in range (num_iter):
output = model(X+delta)
incorrect = output.max(1)[1] != y
correct = (~incorrect).unsqueeze(1).unsqueeze(1).unsqueeze(1).half()
correct = 1.0 if version == 0 else correct
#Finding the correct examples so as to attack only them only for version 1
loss = nn.CrossEntropyLoss()(model(X+delta,**kwargs), y)
loss.backward()
k = random.randint(5,20)
alpha = 0.05/k*20
delta.data += alpha*correct*l1_dir_topk(delta.grad.detach(), delta.data, X, gap,k)
if (norms_l1(delta) > epsilon).any():
delta.data = proj_l1ball(delta.data, epsilon, device)
delta.data = torch.min(torch.max(delta.detach(), -X), 1-X) # clip X+delta to [0,1]
delta.grad.zero_()
max_delta = delta.detach()
#Restarts
for k in range(restarts):
delta = torch.rand_like(X,requires_grad = True)
delta.data = (2*delta.data - 1.0)*epsilon
delta.data /= norms_l1(delta.detach()).clamp(min=epsilon)
for t in range (num_iter):
output = model(X+delta)
incorrect = output.max(1)[1] != y
correct = (~incorrect).unsqueeze(1).unsqueeze(1).unsqueeze(1).half()
correct = 1.0 if version == 0 else correct
#Finding the correct examples so as to attack only them only for version 1
loss = nn.CrossEntropyLoss()(model(X+delta,**kwargs), y)
loss.backward()
k = random.randint(5,20)
alpha = 0.05/k*20
delta.data += alpha*correct*l1_dir_topk(delta.grad.detach(), delta.data, X, gap,k)
if (norms_l1(delta) > epsilon).any():
delta.data = proj_l1ball(delta.data, epsilon, device)
delta.data = torch.min(torch.max(delta.detach(), -X), 1-X) # clip X+delta to [0,1]
delta.grad.zero_()
output = model(X+delta,**kwargs)
incorrect = output.max(1)[1] != y
#Edit Max Delta only for successful attacks
max_delta[incorrect] = delta.detach()[incorrect]
return max_delta
def pgd_linf(model, X, y, epsilon=0.03, alpha=0.003, num_iter = 10, device = "cuda:0", restarts = 0, version = 0,**kwargs):
""" Construct FGSM adversarial examples on the examples X"""
max_delta = torch.zeros_like(X)
delta = torch.zeros_like(X, requires_grad=True)
for t in range(num_iter):
output = model(X+delta,**kwargs)
incorrect = output.max(1)[1] != y
correct = (~incorrect).unsqueeze(1).unsqueeze(1).unsqueeze(1).half()
correct = 1.0 if version == 0 else correct
#Finding the correct examples so as to attack only them only for version 1
loss = nn.CrossEntropyLoss()(model(X + delta,**kwargs), y)
loss.backward()
delta.data = (delta.data + alpha*correct*delta.grad.detach().sign()).clamp(-epsilon,epsilon)
delta.data = torch.min(torch.max(delta.detach(), -X), 1-X) # clip X+delta to [0,1]
delta.grad.zero_()
max_delta = delta.detach()
for i in range (restarts):
delta = torch.rand_like(X, requires_grad=True)
delta.data = (delta.data*2.0 - 1.0)*epsilon
for t in range(num_iter):
output = model(X+delta,**kwargs)
incorrect = output.max(1)[1] != y
correct = (~incorrect).unsqueeze(1).unsqueeze(1).unsqueeze(1).half()
correct = 1.0 if version == 0 else correct
#Finding the correct examples so as to attack only themonly for version 1
loss = nn.CrossEntropyLoss()(model(X + delta,**kwargs), y)
loss.backward()
delta.data = (delta.data + alpha*correct*delta.grad.detach().sign()).clamp(-epsilon,epsilon)
delta.data = torch.min(torch.max(delta.detach(), -X), 1-X) # clip X+delta to [0,255]
delta.grad.zero_()
output = model(X+delta,**kwargs)
incorrect = output.max(1)[1] != y
#Edit Max Delta only for successful attacks
max_delta[incorrect] = delta.detach()[incorrect]
return max_delta
def pgd_l0(model, X,y, epsilon = 12, alpha = 1, num_iter = 0, device = "cuda:1"):
delta = torch.zeros_like(X, requires_grad = True)
batch_size = X.shape[0]
# print("Updated")
for t in range (epsilon):
loss = nn.CrossEntropyLoss()(model(X + delta), y)
loss.backward()
temp = delta.grad.view(batch_size, 1, -1)
neg = (delta.data != 0)
X_curr = X + delta
neg1 = (delta.grad < 0)*(X_curr < 0.1)
neg2 = (delta.grad > 0)*(X_curr > 0.9)
neg += neg1 + neg2
u = neg.view(batch_size,1,-1)
temp[u] = 0
my_delta = torch.zeros_like(X).view(batch_size, 1, -1)
maxv = temp.max(dim = 2)
minv = temp.min(dim = 2)
val_max = maxv[0].view(batch_size)
val_min = minv[0].view(batch_size)
pos_max = maxv[1].view(batch_size)
pos_min = minv[1].view(batch_size)
select_max = (val_max.abs()>=val_min.abs()).half()
select_min = (val_max.abs()<val_min.abs()).half()
my_delta[torch.arange(batch_size), torch.zeros(batch_size, dtype = torch.long), pos_max] = (1-X.view(batch_size, 1, -1)[torch.arange(batch_size), torch.zeros(batch_size, dtype = torch.long), pos_max])*select_max
my_delta[torch.arange(batch_size), torch.zeros(batch_size, dtype = torch.long), pos_min] = -X.view(batch_size, 1, -1)[torch.arange(batch_size), torch.zeros(batch_size, dtype = torch.long), pos_min]*select_min
delta.data += my_delta.view(batch_size, 3, 32, 32)
delta.grad.zero_()
delta.data = torch.min(torch.max(delta.detach(), -X), 1-X) # clip X+delta to [0,1]
return delta.detach()
from attack.autopgd_train import pgd_1
def msd_v1(model, X,y, epsilon_l_inf = 0.03, epsilon_l_2= 1., epsilon_l_1 = 12.,
alpha_l_inf = 0.003, alpha_l_2 = 0.05, alpha_l_1 = 0.4, num_iter = 10, device = "cuda:0",**kwargs):
delta = torch.zeros_like(X,requires_grad = True)
max_delta = torch.zeros_like(X)
max_max_delta = torch.zeros_like(X)
max_loss = torch.zeros(y.shape[0]).to(y.device).half()
max_max_loss = torch.zeros(y.shape[0]).to(y.device).half()
for t in range(num_iter):
loss = nn.CrossEntropyLoss()(model(X + delta,**kwargs), y)
loss.backward()
# For L1
delta_l_1 = pgd_1(model, X + delta, y, eps=epsilon_l_1, n_iter=1, alpha=alpha_l_1, **kwargs) - X
with torch.no_grad():
#For L_2
'''
delta_l_2 = delta.data + alpha_l_2*delta.grad / norms(delta.grad)
delta_l_2 *= epsilon_l_2 / norms(delta_l_2).clamp(min=epsilon_l_2)
delta_l_2 = torch.min(torch.max(delta_l_2, -X), 1-X) # clip X+delta to [0,1]
#For L_inf
delta_l_inf= (delta.data + alpha_l_inf*delta.grad.sign()).clamp(-epsilon_l_inf,epsilon_l_inf)
delta_l_inf = torch.min(torch.max(delta_l_inf, -X), 1-X) # clip X+delta to [0,1]
'''
#Compare
#delta_tup = (delta_l_1, delta_l_2, delta_l_inf)
delta_tup = (delta_l_1,delta_l_1)
max_loss = torch.zeros(y.shape[0]).to(y.device).half()
for delta_temp in delta_tup:
loss_temp = nn.CrossEntropyLoss(reduction = 'none')(model(X + delta_temp,**kwargs), y)
max_delta[loss_temp >= max_loss] = delta_temp[loss_temp >= max_loss]
max_loss = torch.max(max_loss, loss_temp)
delta.data = max_delta.data
max_max_delta[max_loss> max_max_loss] = max_delta[max_loss> max_max_loss]
max_max_loss[max_loss> max_max_loss] = max_loss[max_loss> max_max_loss].half()
delta.grad.zero_()
return max_max_delta
def msd_v0(model, X,y, epsilon_l_inf = 0.03, epsilon_l_2= 0.5, epsilon_l_1 = 12,
alpha_l_inf = 0.003, alpha_l_2 = 0.05, alpha_l_1 = 0.05, num_iter = 50, device = "cuda:0",**kwargs):
delta = torch.zeros_like(X,requires_grad = True)
max_delta = torch.zeros_like(X)
max_max_delta = torch.zeros_like(X)
max_loss = torch.zeros(y.shape[0]).to(y.device).half()
max_max_loss = torch.zeros(y.shape[0]).to(y.device).half()
alpha_l_1_default = alpha_l_1
for t in range(num_iter):
loss = nn.CrossEntropyLoss()(model(X + delta,**kwargs), y)
loss.backward()
with torch.no_grad():
#For L_2
delta_l_2 = delta.data + alpha_l_2*delta.grad / norms(delta.grad)
delta_l_2 *= epsilon_l_2 / norms(delta_l_2).clamp(min=epsilon_l_2)
delta_l_2 = torch.min(torch.max(delta_l_2, -X), 1-X) # clip X+delta to [0,1]
#For L_inf
delta_l_inf= (delta.data + alpha_l_inf*delta.grad.sign()).clamp(-epsilon_l_inf,epsilon_l_inf)
delta_l_inf = torch.min(torch.max(delta_l_inf, -X), 1-X) # clip X+delta to [0,1]
#For L1
k = random.randint(5,20)
alpha_l_1 = (alpha_l_1_default/k)*20
delta_l_1 = delta.data + alpha_l_1*l1_dir_topk(delta.grad, delta.data, X, alpha_l_1, k = k)
delta_l_1 = proj_l1ball(delta_l_1, epsilon_l_1, device)
delta_l_1 = torch.min(torch.max(delta_l_1, -X), 1-X) # clip X+delta to [0,1]
#Compare
delta_tup = (delta_l_1, delta_l_2, delta_l_inf)
#delta_tup = (delta_l_inf,delta_l_1)
max_loss = torch.zeros(y.shape[0]).to(y.device).half()
for delta_temp in delta_tup:
loss_temp = nn.CrossEntropyLoss(reduction = 'none')(model(X + delta_temp,**kwargs), y)
max_delta[loss_temp >= max_loss] = delta_temp[loss_temp >= max_loss]
max_loss = torch.max(max_loss, loss_temp)
delta.data = max_delta.data
max_max_delta[max_loss> max_max_loss] = max_delta[max_loss> max_max_loss]
max_max_loss[max_loss> max_max_loss] = max_loss[max_loss> max_max_loss].half()
delta.grad.zero_()
return max_max_delta
def pgd_worst_dir(model, X,y, epsilon_l_inf = 0.03, epsilon_l_2= 0.5, epsilon_l_1 = 12,
alpha_l_inf = 0.003, alpha_l_2 = 0.05, alpha_l_1 = 0.05, num_iter = 100, device = "cuda:0"):
#Always call version = 0
delta_1 = pgd_l1_topk(model, X, y, epsilon = epsilon_l_1, alpha = alpha_l_1, device = device)
delta_2 = pgd_l2(model, X, y, epsilon = epsilon_l_2, alpha = alpha_l_2, device = device)
delta_inf = pgd_linf(model, X, y, epsilon = epsilon_l_inf, alpha = alpha_l_inf, device = device)
batch_size = X.shape[0]
loss_1 = nn.CrossEntropyLoss(reduction = 'none')(model(X + delta_1), y)
loss_2 = nn.CrossEntropyLoss(reduction = 'none')(model(X + delta_2), y)
loss_inf = nn.CrossEntropyLoss(reduction = 'none')(model(X + delta_inf), y)
delta_1 = delta_1.view(batch_size,1,-1)
delta_2 = delta_2.view(batch_size,1,-1)
delta_inf = delta_inf.view(batch_size,1,-1)
tensor_list = [loss_1, loss_2, loss_inf]
delta_list = [delta_1, delta_2, delta_inf]
loss_arr = torch.stack(tuple(tensor_list))
delta_arr = torch.stack(tuple(delta_list))
max_loss = loss_arr.max(dim = 0)
delta = delta_arr[max_loss[1], torch.arange(batch_size), 0]
delta = delta.view(batch_size,3, X.shape[2], X.shape[3])
return delta
def kthlargest(tensor, k, dim=-1):
val, idx = tensor.topk(k, dim = dim)
return val[:,:,-1], idx[:,:,-1]
def l1_dir_topk(grad, delta, X, gap, k = 20) :
#Check which all directions can still be increased such that
#they haven't been clipped already and have scope of increasing
# ipdb.set_trace()
X_curr = X + delta
batch_size = X.shape[0]
channels = X.shape[1]
pix = X.shape[2]
# print (batch_size)
neg1 = (grad < 0)*(X_curr <= gap)
# neg1 = (grad < 0)*(X_curr == 0)
neg2 = (grad > 0)*(X_curr >= 1-gap)
# neg2 = (grad > 0)*(X_curr == 1)
neg3 = X_curr <= 0
neg4 = X_curr >= 1
neg = neg1 + neg2 + neg3 + neg4
u = neg.view(batch_size,1,-1)
grad_check = grad.view(batch_size,1,-1)
grad_check[u] = 0
kval = kthlargest(grad_check.abs().float(), k, dim = 2)[0].unsqueeze(1)
k_hot = (grad_check.abs() >= kval.half()).half() * grad_check.sign()
return k_hot.view(batch_size, channels, pix, pix)
def proj_l1ball(x, epsilon=10, device = "cuda:1"):
assert epsilon > 0
# ipdb.set_trace()
# compute the vector of absolute values
u = x.abs()
if (u.sum(dim = (1,2,3)) <= epsilon).all():
# print (u.sum(dim = (1,2,3)))
# check if x is already a solution
# y = x* epsilon/norms_l1(x)
return x
# v is not already a solution: optimum lies on the boundary (norm == s)
# project *u* on the simplex
y = proj_simplex(u, s=epsilon, device = device)
# compute the solution to the original problem on v
y = y.view(-1,3,32,32)
y *= x.sign()
return y
def proj_simplex(v, s=1, device = "cuda:1"):
assert s > 0, "Radius s must be strictly positive (%d <= 0)" % s
batch_size = v.shape[0]
# check if we are already on the simplex
'''
#Not checking this as we are calling this from the previous function only
if v.sum(dim = (1,2,3)) == s and np.alltrue(v >= 0):
# best projection: itself!
return v
'''
# get the array of cumulative sums of a sorted (decreasing) copy of v
u = v.view(batch_size,1,-1)
n = u.shape[2]
u, indices = torch.sort(u, descending = True)
cssv = u.cumsum(dim = 2)
# get the number of > 0 components of the optimal solution
vec = u * torch.arange(1, n+1).half().to(device)
comp = (vec > (cssv - s)).half()
u = comp.cumsum(dim = 2)
w = (comp-1).cumsum(dim = 2)
u = u + w
rho = torch.argmax(u, dim = 2)
rho = rho.view(batch_size)
c = torch.HalfTensor([cssv[i,0,rho[i]] for i in range( cssv.shape[0]) ]).to(device)
c = c-s
# compute the Lagrange multiplier associated to the simplex constraint
theta = torch.div(c,(rho.half() + 1))
theta = theta.view(batch_size,1,1,1)
# compute the projection by thresholding v using theta
w = (v - theta).clamp(min=0)
return w
def epoch(loader, lr_schedule, model, epoch_i, criterion = nn.CrossEntropyLoss(), opt=None, device = "cuda:0", stop = False):
"""Standard training/evaluation epoch over the dataset"""
train_loss = 0
train_acc = 0
train_n = 0
for i,batch in enumerate(loader):
X,y = batch['input'], batch['target']
output = model(X)
loss = criterion(output, y)
if opt != None:
lr = lr_schedule(epoch_i + (i+1)/len(loader))
opt.param_groups[0].update(lr=lr)
opt.zero_grad()
loss.backward()
opt.step()
train_loss += loss.item()*y.size(0)
train_acc += (output.max(1)[1] == y).sum().item()
train_n += y.size(0)
if stop:
break
return train_loss / train_n, train_acc / train_n
def epoch_adversarial_saver(batch_size,loader, model, attack, epsilon, num_iter, device = "cuda:0", restarts = 10):
criterion = nn.CrossEntropyLoss()
train_loss = 0
train_acc = 0
train_n = 0
# print("Attack: ", attack, " epsilon: ", epsilon )
for i,batch in enumerate(loader):
X,y = batch['input'], batch['target']
delta = attack(model, X, y, epsilon = epsilon, num_iter = num_iter, device = device, restarts = restarts)
output = model(X+delta)
loss = criterion(output, y)
train_loss += loss.item()*y.size(0)
train_acc += (output.max(1)[1] == y).sum().item()
correct = (output.max(1)[1] == y).float()
eps = (correct*1000 + epsilon - 0.000001).float()
train_n += y.size(0)
break
return eps, train_acc / train_n
def epoch_adversarial(loader, lr_schedule, model, epoch_i, attack, criterion = nn.CrossEntropyLoss(),
opt=None, device = "cuda:0", stop = False, stats = False, **kwargs):
"""Adversarial training/evaluation epoch over the dataset"""
train_loss = 0
train_acc = 0
train_n = 0
# ipdb.set_trace()
for i,batch in enumerate(loader):
X,y = batch['input'], batch['target']
if stats:
delta = attack(model, X, y, device = device, batchid = i, **kwargs)
else:
delta = attack(model, X, y, device = device, **kwargs)
output = model(X+delta)
# imshow(X[11])
# print (X[11])
# imshow((X+delta)[11])
# print (norms_l1(delta))
# output = model(X)
loss = criterion(output, y)
train_loss += loss.item()*y.size(0)
train_acc += (output.max(1)[1] == y).sum().item()
train_n += y.size(0)
if opt != None:
lr = lr_schedule(epoch_i + (i+1)/len(loader))
opt.param_groups[0].update(lr=lr)
opt.zero_grad()
loss.backward()
opt.step()
else:
if (stop):
break
# break
return train_loss / train_n, train_acc / train_n
def triple_adv(loader, lr_schedule, model, epoch_i, attack, criterion = nn.CrossEntropyLoss(),
opt=None, device= "cuda:0", epsilon_l_1 = 12, epsilon_l_2 = 0.5, epsilon_l_inf = 0.03, num_iter = 50):
train_loss = 0
train_acc = 0
train_n = 0
for i,batch in enumerate(loader):
X,y = batch['input'], batch['target']
lr = lr_schedule(epoch_i + (i+1)/len(loader))
opt.param_groups[0].update(lr=lr)
##Always calls the default version 0 for the individual attacks
#L1
delta = pgd_l1_topk(model, X, y, device = device, epsilon = epsilon_l_1)
output = model(X+delta)
loss = criterion(output,y)
train_loss += loss.item()*y.size(0)
train_acc += (output.max(1)[1] == y).sum().item()
train_n += y.size(0)
if opt:
opt.zero_grad()
loss.backward()
opt.step()
#L2
delta = pgd_l2(model, X, y, device = device, epsilon = epsilon_l_2)
output = model(X+delta)
loss = nn.CrossEntropyLoss()(output,y)
train_loss += loss.item()*y.size(0)
train_acc += (output.max(1)[1] == y).sum().item()
train_n += y.size(0)
if opt:
opt.zero_grad()
loss.backward()
opt.step()
#Linf
delta = pgd_linf(model, X, y, device = device, epsilon = epsilon_l_inf)
output = model(X+delta)
loss = nn.CrossEntropyLoss()(output,y)
train_loss += loss.item()*y.size(0)
train_acc += (output.max(1)[1] == y).sum().item()
train_n += y.size(0)
if opt:
opt.zero_grad()
loss.backward()
opt.step()
else:
break
# break
return train_loss / train_n, train_acc / train_n
| 22,195 | 40.027726 | 219 | py |
MCGR | MCGR-main/attack/slide.py | """
Implementation of attack methods. Running this file as a program will
apply the attack to the model specified by the config file and store
the examples in an .npy file.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
import numpy as np
from itertools import product
from collections import Counter
import json
def uniform_weights(n_attacks, n_samples):
x = np.random.uniform(size=(n_attacks, n_samples))
y = np.maximum(-np.log(x), 1e-8)
return y / np.sum(y, axis=0, keepdims=True)
def init_delta(x, attack, weight):
if not attack["random_start"]:
return np.zeros_like(x)
assert len(weight) == len(x)
eps = (attack["epsilon"] * weight).reshape(len(x), 1, 1, 1)
if attack["type"] == "linf":
return np.random.uniform(-eps, eps, x.shape)
elif attack["type"] == "l2":
r = np.random.randn(*x.shape)
norm = np.linalg.norm(r.reshape(r.shape[0], -1), axis=-1).reshape(-1, 1, 1, 1)
return (r / norm) * eps
elif attack["type"] == "l1":
r = np.random.laplace(size=x.shape)
norm = np.linalg.norm(r.reshape(r.shape[0], -1), axis=-1, ord=1).reshape(-1, 1, 1, 1)
return (r / norm) * eps
else:
raise ValueError("Unknown norm {}".format(attack["type"]))
def delta_update(old_delta, g, x_adv, attack, x_min, x_max, weight, seed=None, t=None):
assert len(weight) == len(x_adv)
eps_w = attack["epsilon"] * weight
eps = eps_w.reshape(len(x_adv), 1, 1, 1)
if attack["type"] == "linf":
a = attack.get('a', (2.5 * eps) / attack["k"])
new_delta = old_delta + a * np.sign(g)
new_delta = np.clip(new_delta, -eps, eps)
new_delta = np.clip(new_delta, x_min - (x_adv - old_delta), x_max - (x_adv - old_delta))
return new_delta
elif attack["type"] == "l2":
a = attack.get('a', (2.5 * eps) / attack["k"])
bad_pos = ((x_adv == x_max) & (g > 0)) | ((x_adv == x_min) & (g < 0))
g[bad_pos] = 0
g = g.reshape(len(g), -1)
g /= np.maximum(np.linalg.norm(g, axis=-1, keepdims=True), 1e-8)
g = g.reshape(old_delta.shape)
new_delta = old_delta + a * g
new_delta_norm = np.linalg.norm(new_delta.reshape(len(new_delta), -1), axis=-1).reshape(-1, 1, 1, 1)
new_delta = new_delta / np.maximum(new_delta_norm, 1e-8) * np.minimum(new_delta_norm, eps)
new_delta = np.clip(new_delta, x_min - (x_adv - old_delta), x_max - (x_adv - old_delta))
return new_delta
elif attack["type"] == "l1":
_, h, w, ch = g.shape
a = attack.get('a', 1.0) * x_max
perc = attack.get('perc', 99)
if perc == 'max':
bad_pos = ((x_adv > (x_max - a)) & (g > 0)) | ((x_adv < a) & (g < 0)) | (x_adv < x_min) | (x_adv > x_max)
g[bad_pos] = 0
else:
bad_pos = ((x_adv == x_max) & (g > 0)) | ((x_adv == x_min) & (g < 0))
g[bad_pos] = 0
abs_grad = np.abs(g)
sign = np.sign(g)
if perc == 'max':
grad_flat = abs_grad.reshape(len(abs_grad), -1)
max_abs_grad = np.argmax(grad_flat, axis=-1)
optimal_perturbation = np.zeros_like(grad_flat)
optimal_perturbation[np.arange(len(grad_flat)), max_abs_grad] = 1.0
optimal_perturbation = sign * optimal_perturbation.reshape(abs_grad.shape)
else:
if isinstance(perc, list):
perc_low, perc_high = perc
perc = np.random.RandomState(seed).uniform(low=perc_low, high=perc_high)
max_abs_grad = np.percentile(abs_grad, perc, axis=(1, 2, 3), keepdims=True)
tied_for_max = (abs_grad >= max_abs_grad).astype(np.float32)
num_ties = np.sum(tied_for_max, (1, 2, 3), keepdims=True)
optimal_perturbation = sign * tied_for_max / num_ties
new_delta = old_delta + a * optimal_perturbation
l1 = np.sum(np.abs(new_delta), axis=(1, 2, 3))
to_project = l1 > eps_w
if np.any(to_project):
n = np.sum(to_project)
d = new_delta[to_project].reshape(n, -1) # n * N (N=h*w*ch)
abs_d = np.abs(d) # n * N
mu = -np.sort(-abs_d, axis=-1) # n * N
cumsums = mu.cumsum(axis=-1) # n * N
eps_d = eps_w[to_project]
js = 1.0 / np.arange(1, h * w * ch + 1)
temp = mu - js * (cumsums - np.expand_dims(eps_d, -1))
rho = np.argmin(temp > 0, axis=-1)
theta = 1.0 / (1 + rho) * (cumsums[range(n), rho] - eps_d)
sgn = np.sign(d)
d = sgn * np.maximum(abs_d - np.expand_dims(theta, -1), 0)
new_delta[to_project] = d.reshape(-1, h, w, ch)
new_delta = np.clip(new_delta, x_min - (x_adv - old_delta), x_max - (x_adv - old_delta))
return new_delta
def compute_grad(model):
label_mask = tf.one_hot(model.y_input,
model.pre_softmax.get_shape().as_list()[-1],
on_value=1.0,
off_value=0.0,
dtype=tf.float32)
correct_logit = tf.reduce_sum(label_mask * model.pre_softmax, axis=1)
wrong_logit = tf.reduce_max((1 - label_mask) * model.pre_softmax - 1e4 * label_mask, axis=1)
loss = -(correct_logit - wrong_logit)
return tf.gradients(loss, model.x_input)[0]
def name(attack):
return json.dumps(attack)
class PGDAttack:
def __init__(self, model, attack_config, x_min, x_max, grad, reps=1):
"""Attack parameter initialization. The attack performs k steps of
size a, while always staying within epsilon from the initial
point."""
print("new attack: ", attack_config)
if isinstance(attack_config, dict):
attack_config = [attack_config]
self.model = model
self.x_min = x_min
self.x_max = x_max
self.attack_config = attack_config
self.names = [name(a) for a in attack_config]
self.name = " - ".join(self.names)
self.grad = grad
self.reps = int(attack_config[0].get("reps", 1))
assert self.reps >= 1
def perturb(self, x_nat, y, sess, x_nat_no_aug=None):
if len(self.attack_config) == 0:
return x_nat, None
if x_nat_no_aug is None:
x_nat_no_aug = x_nat
n = len(x_nat)
worst_x = np.copy(x_nat)
worst_t = np.zeros([n, 3])
max_xent = np.zeros(n)
all_correct = np.ones(n).astype(bool)
for i in range(self.reps):
if "weight" in self.attack_config[0]:
weights = np.asarray([a["weight"] for a in self.attack_config])
weights = np.repeat(weights[:, np.newaxis], len(x_nat), axis=-1)
else:
weights = uniform_weights(len(self.attack_config), len(x_nat))
if self.attack_config[0]["type"] == "RT":
assert np.all([a["type"] != "RT" for a in self.attack_config[1:]])
norm_attacks = self.attack_config[1:]
norm_weights = weights[1:]
x_adv, trans = self.grid_perturb(x_nat_no_aug, y, sess, self.attack_config[0],
weights[0], norm_attacks, norm_weights)
else:
# rotation and translation attack should always come first
assert np.all([a["type"] != "RT" for a in self.attack_config])
norm_attacks = self.attack_config
x_adv = self.norm_perturb(x_nat, y, sess, norm_attacks, weights)
trans = worst_t
cur_xent, cur_correct = sess.run([self.model.y_xent, self.model.correct_prediction],
feed_dict={self.model.x_input: x_adv,
self.model.y_input: y,
self.model.is_training: False,
self.model.transform: trans})
cur_xent = np.asarray(cur_xent)
cur_correct = np.asarray(cur_correct)
idx = (cur_xent > max_xent) & (cur_correct == all_correct)
idx = idx | (cur_correct < all_correct)
max_xent = np.maximum(cur_xent, max_xent)
all_correct = cur_correct & all_correct
idx = np.expand_dims(idx, axis=-1) # shape (bsize, 1)
worst_t = np.where(idx, trans, worst_t) # shape (bsize, 3)
idx = np.expand_dims(idx, axis=-1)
idx = np.expand_dims(idx, axis=-1) # shape (bsize, 1, 1, 1)
worst_x = np.where(idx, x_adv, worst_x, ) # shape (bsize, h, w, ch)
return worst_x, worst_t
def grid_perturb(self, x_nat, y, sess, attack_config, weight, norm_attacks, norm_weights):
random_tries = attack_config["random_tries"]
n = len(x_nat)
assert len(weight) == len(x_nat)
# (3, 1) * n => (3, n)
spatial_limits = np.asarray(attack_config["spatial_limits"])[:, np.newaxis] * weight
if random_tries > 0:
grids = np.zeros((n, random_tries))
else:
# exhaustive grid
# n * (num_x * num_y * num_rot)
grids = [list(product(*list(np.linspace(-l, l, num=g)
for l, g in zip(spatial_limits[:, i], attack_config["grid_granularity"]))))
for i in range(len(x_nat))]
grids = np.asarray(grids)
worst_x = np.copy(x_nat)
worst_t = np.zeros([n, 3])
max_xent = np.zeros(n)
all_correct = np.ones(n).astype(bool)
for idx in range(len(grids[0])):
if random_tries > 0:
t = [[np.random.uniform(-l, l) for l in spatial_limits[:, i]] for i in range(len(x_nat))]
else:
t = grids[:, idx]
x = self.norm_perturb(x_nat, y, sess, norm_attacks, norm_weights, trans=t)
curr_dict = {self.model.x_input: x,
self.model.y_input: y,
self.model.is_training: False,
self.model.transform: t}
cur_xent, cur_correct = sess.run([self.model.y_xent,
self.model.correct_prediction],
feed_dict=curr_dict) # shape (bsize,)
cur_xent = np.asarray(cur_xent)
cur_correct = np.asarray(cur_correct)
# Select indices to update: we choose the misclassified transformation
# of maximum xent (or just highest xent if everything else if correct).
idx = (cur_xent > max_xent) & (cur_correct == all_correct)
idx = idx | (cur_correct < all_correct)
max_xent = np.maximum(cur_xent, max_xent)
all_correct = cur_correct & all_correct
idx = np.expand_dims(idx, axis=-1) # shape (bsize, 1)
worst_t = np.where(idx, t, worst_t) # shape (bsize, 3)
idx = np.expand_dims(idx, axis=-1)
idx = np.expand_dims(idx, axis=-1) # shape (bsize, 1, 1, 1)
worst_x = np.where(idx, x, worst_x, ) # shape (bsize, h, w, ch)
return worst_x, worst_t
def norm_perturb(self, x_nat, y, sess, norm_attacks, norm_weights, trans=None):
if len(norm_attacks) == 0:
return x_nat
x_min = self.x_min
x_max = self.x_max
if trans is None:
trans = np.zeros([len(x_nat), 3])
iters = [a["k"] for a in norm_attacks]
assert (np.all(np.asarray(iters) == iters[0]))
deltas = np.asarray([init_delta(x_nat, attack, weight)
for attack, weight in zip(norm_attacks, norm_weights)])
x_adv = np.clip(x_nat + np.sum(deltas, axis=0), 0, 1)
# a seed that remains constant across attack iterations
seed = np.random.randint(low=0, high=2**32-1)
for i in range(np.sum(iters)):
grad = sess.run(self.grad, feed_dict={self.model.x_input: x_adv,
self.model.y_input: y,
self.model.is_training: False,
self.model.transform: trans})
deltas[i % len(norm_attacks)] = delta_update(deltas[i % len(norm_attacks)],
grad,
x_adv,
norm_attacks[i % len(norm_attacks)],
x_min, x_max,
norm_weights[i % len(norm_attacks)],
seed=seed, t=i+1)
x_adv = np.clip(x_nat + np.sum(deltas, axis=0), x_min, x_max)
return np.clip(x_nat + np.sum(deltas, axis=0), x_min, x_max)
| 13,090 | 40.55873 | 117 | py |
MCGR | MCGR-main/attack/autopgd_train.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import math
import random
#from autopgd_base import L1_projection
from other_utils import L1_norm, L2_norm, L0_norm
def L1_projection(x2, y2, eps1):
'''
x2: center of the L1 ball (bs x input_dim)
y2: current perturbation (x2 + y2 is the point to be projected)
eps1: radius of the L1 ball
output: delta s.th. ||y2 + delta||_1 = eps1
and 0 <= x2 + y2 + delta <= 1
'''
x = x2.clone().float().view(x2.shape[0], -1)
y = y2.clone().float().view(y2.shape[0], -1)
sigma = y.clone().sign()
u = torch.min(1 - x - y, x + y)
#u = torch.min(u, epsinf - torch.clone(y).abs())
u = torch.min(torch.zeros_like(y), u)
l = -torch.clone(y).abs()
d = u.clone()
bs, indbs = torch.sort(-torch.cat((u, l), 1), dim=1)
bs2 = torch.cat((bs[:, 1:], torch.zeros(bs.shape[0], 1).to(bs.device)), 1)
inu = 2*(indbs < u.shape[1]).float() - 1
size1 = inu.cumsum(dim=1)
s1 = -u.sum(dim=1)
c = eps1 - y.clone().abs().sum(dim=1)
c5 = s1 + c < 0
c2 = c5.nonzero().squeeze(1)
s = s1.unsqueeze(-1) + torch.cumsum((bs2 - bs) * size1, dim=1)
#print(s[0])
#print(c5.shape, c2)
if c2.nelement != 0:
lb = torch.zeros_like(c2).float()
ub = torch.ones_like(lb) *(bs.shape[1] - 1)
#print(c2.shape, lb.shape)
nitermax = torch.ceil(torch.log2(torch.tensor(bs.shape[1]).float()))
counter2 = torch.zeros_like(lb).long()
counter = 0
while counter < nitermax:
counter4 = torch.floor((lb + ub) / 2.)
counter2 = counter4.type(torch.LongTensor)
c8 = s[c2, counter2] + c[c2] < 0
ind3 = c8.nonzero().squeeze(1)
ind32 = (~c8).nonzero().squeeze(1)
#print(ind3.shape)
if ind3.nelement != 0:
lb[ind3] = counter4[ind3]
if ind32.nelement != 0:
ub[ind32] = counter4[ind32]
#print(lb, ub)
counter += 1
lb2 = lb.long()
alpha = (-s[c2, lb2] -c[c2]) / size1[c2, lb2 + 1] + bs2[c2, lb2]
d[c2] = -torch.min(torch.max(-u[c2], alpha.unsqueeze(-1)), -l[c2])
return (sigma * d).view(x2.shape)
def dlr_loss(x, y, reduction='none'):
x_sorted, ind_sorted = x.sort(dim=1)
ind = (ind_sorted[:, -1] == y).float()
return -(x[torch.arange(x.shape[0]), y] - x_sorted[:, -2] * ind - \
x_sorted[:, -1] * (1. - ind)) / (x_sorted[:, -1] - x_sorted[:, -3] + 1e-12)
def dlr_loss_targeted(x, y, y_target):
x_sorted, ind_sorted = x.sort(dim=1)
u = torch.arange(x.shape[0])
return -(x[u, y] - x[u, y_target]) / (x_sorted[:, -1] - .5 * (
x_sorted[:, -3] + x_sorted[:, -4]) + 1e-12)
criterion_dict = {'ce': lambda x, y: F.cross_entropy(x, y, reduction='none'),
'dlr': dlr_loss, 'dlr-targeted': dlr_loss_targeted}
def check_oscillation(x, j, k, y5, k3=0.75):
t = torch.zeros(x.shape[1]).to(x.device)
for counter5 in range(k):
t += (x[j - counter5] > x[j - counter5 - 1]).float()
return (t <= k * k3 * torch.ones_like(t)).float()
def apgd_train(model, x, y, norm, eps, n_iter=10, use_rs=False, loss='ce',
verbose=False, is_train=True,**kwargs):
assert not model.training
device = x.device
ndims = len(x.shape) - 1
if not use_rs:
x_adv = x.clone()
else:
raise NotImplemented
if norm == 'Linf':
t = torch.rand_like(x)
x_adv = x_adv.clamp(0., 1.)
x_best = x_adv.clone()
x_best_adv = x_adv.clone()
loss_steps = torch.zeros([n_iter, x.shape[0]], device=device)
loss_best_steps = torch.zeros([n_iter + 1, x.shape[0]], device=device)
acc_steps = torch.zeros_like(loss_best_steps)
# set loss
criterion_indiv = criterion_dict[loss]
# set params
n_fts = math.prod(x.shape[1:])
if norm in ['Linf', 'L2']:
n_iter_2 = max(int(0.22 * n_iter), 1)
n_iter_min = max(int(0.06 * n_iter), 1)
size_decr = max(int(0.03 * n_iter), 1)
k = n_iter_2 + 0
thr_decr = .75
alpha = 2.
elif norm in ['L1']:
k = max(int(.04 * n_iter), 1)
init_topk = .05 if is_train else .2
topk = init_topk * torch.ones([x.shape[0]], device=device)
sp_old = n_fts * torch.ones_like(topk)
adasp_redstep = 1.5
adasp_minstep = 10.
alpha = 1.
step_size = alpha * eps * torch.ones([x.shape[0], *[1] * ndims],
device=device)
counter3 = 0
x_adv.requires_grad_()
#grad = torch.zeros_like(x)
#for _ in range(self.eot_iter)
#with torch.enable_grad()
logits = model(x_adv,**kwargs)
loss_indiv = criterion_indiv(logits, y)
loss = loss_indiv.sum()
#grad += torch.autograd.grad(loss, [x_adv])[0].detach()
grad = torch.autograd.grad(loss, [x_adv])[0].detach()
#grad /= float(self.eot_iter)
grad_best = grad.clone()
x_adv.detach_()
loss_indiv.detach_()
loss.detach_()
acc = logits.detach().max(1)[1] == y
acc_steps[0] = acc + 0
loss_best = loss_indiv.detach().clone()
loss_best_last_check = loss_best.clone()
reduced_last_check = torch.ones_like(loss_best)
n_reduced = 0
u = torch.arange(x.shape[0], device=device)
x_adv_old = x_adv.clone().detach()
for i in range(n_iter):
### gradient step
if True: #with torch.no_grad()
x_adv = x_adv.detach()
grad2 = x_adv - x_adv_old
x_adv_old = x_adv.clone()
loss_curr = loss.detach().mean()
a = 0.75 if i > 0 else 1.0
if norm == 'Linf':
x_adv_1 = x_adv + step_size * torch.sign(grad)
x_adv_1 = torch.clamp(torch.min(torch.max(x_adv_1,
x - eps), x + eps), 0.0, 1.0)
x_adv_1 = torch.clamp(torch.min(torch.max(
x_adv + (x_adv_1 - x_adv) * a + grad2 * (1 - a),
x - eps), x + eps), 0.0, 1.0)
elif norm == 'L2':
x_adv_1 = x_adv + step_size * grad / (L2_norm(grad,
keepdim=True) + 1e-12)
x_adv_1 = torch.clamp(x + (x_adv_1 - x) / (L2_norm(x_adv_1 - x,
keepdim=True) + 1e-12) * torch.min(eps * torch.ones_like(x),
L2_norm(x_adv_1 - x, keepdim=True)), 0.0, 1.0)
x_adv_1 = x_adv + (x_adv_1 - x_adv) * a + grad2 * (1 - a)
x_adv_1 = torch.clamp(x + (x_adv_1 - x) / (L2_norm(x_adv_1 - x,
keepdim=True) + 1e-12) * torch.min(eps * torch.ones_like(x),
L2_norm(x_adv_1 - x, keepdim=True)), 0.0, 1.0)
elif norm == 'L1':
grad_topk = grad.abs().view(x.shape[0], -1).sort(-1)[0]
topk_curr = torch.clamp((1. - topk) * n_fts, min=0, max=n_fts - 1).long()
grad_topk = grad_topk[u, topk_curr].view(-1, *[1]*(len(x.shape) - 1))
sparsegrad = grad * (grad.abs() >= grad_topk).float()
x_adv_1 = x_adv + step_size * sparsegrad.sign() / (
sparsegrad.sign().abs().view(x.shape[0], -1).sum(dim=-1).view(
-1, 1, 1, 1) + 1e-10)
delta_u = x_adv_1 - x
delta_p = L1_projection(x, delta_u, eps)
x_adv_1 = x + delta_u + delta_p
elif norm == 'L0':
L1normgrad = grad / (grad.abs().view(grad.shape[0], -1).sum(
dim=-1, keepdim=True) + 1e-12).view(grad.shape[0], *[1]*(
len(grad.shape) - 1))
x_adv_1 = x_adv + step_size * L1normgrad * n_fts
x_adv_1 = L0_projection(x_adv_1, x, eps)
# TODO: add momentum
x_adv = x_adv_1 + 0.
### get gradient
x_adv.requires_grad_()
#grad = torch.zeros_like(x)
#for _ in range(self.eot_iter)
#with torch.enable_grad()
logits = model(x_adv,**kwargs)
loss_indiv = criterion_indiv(logits, y)
loss = loss_indiv.sum()
#grad += torch.autograd.grad(loss, [x_adv])[0].detach()
if i < n_iter - 1:
# save one backward pass
grad = torch.autograd.grad(loss, [x_adv])[0].detach()
#grad /= float(self.eot_iter)
x_adv.detach_()
loss_indiv.detach_()
loss.detach_()
pred = logits.detach().max(1)[1] == y
acc = torch.min(acc, pred)
acc_steps[i + 1] = acc + 0
ind_pred = (pred == 0).nonzero().squeeze()
x_best_adv[ind_pred] = x_adv[ind_pred] + 0.
if verbose:
str_stats = ' - step size: {:.5f} - topk: {:.2f}'.format(
step_size.mean(), topk.mean() * n_fts) if norm in ['L1'] else ' - step size: {:.5f}'.format(
step_size.mean())
print('iteration: {} - best loss: {:.6f} curr loss {:.6f} - robust accuracy: {:.2%}{}'.format(
i, loss_best.sum(), loss_curr, acc.float().mean(), str_stats))
#print('pert {}'.format((x - x_best_adv).abs().view(x.shape[0], -1).sum(-1).max()))
### check step size
if True: #with torch.no_grad()
y1 = loss_indiv.detach().clone()
loss_steps[i] = y1 + 0
ind = (y1 > loss_best).nonzero().squeeze()
x_best[ind] = x_adv[ind].clone()
grad_best[ind] = grad[ind].clone()
loss_best[ind] = y1[ind] + 0
loss_best_steps[i + 1] = loss_best + 0
counter3 += 1
if counter3 == k:
if norm in ['Linf', 'L2']:
fl_oscillation = check_oscillation(loss_steps, i, k,
loss_best, k3=thr_decr)
fl_reduce_no_impr = (1. - reduced_last_check) * (
loss_best_last_check >= loss_best).float()
fl_oscillation = torch.max(fl_oscillation,
fl_reduce_no_impr)
reduced_last_check = fl_oscillation.clone()
loss_best_last_check = loss_best.clone()
if fl_oscillation.sum() > 0:
ind_fl_osc = (fl_oscillation > 0).nonzero().squeeze()
step_size[ind_fl_osc] /= 2.0
n_reduced = fl_oscillation.sum()
x_adv[ind_fl_osc] = x_best[ind_fl_osc].clone()
grad[ind_fl_osc] = grad_best[ind_fl_osc].clone()
counter3 = 0
k = max(k - size_decr, n_iter_min)
elif norm == 'L1':
# adjust sparsity
sp_curr = L0_norm(x_best - x)
fl_redtopk = (sp_curr / sp_old) < .95
topk = sp_curr / n_fts / 1.5
step_size[fl_redtopk] = alpha * eps
step_size[~fl_redtopk] /= adasp_redstep
step_size.clamp_(alpha * eps / adasp_minstep, alpha * eps)
sp_old = sp_curr.clone()
x_adv[fl_redtopk] = x_best[fl_redtopk].clone()
grad[fl_redtopk] = grad_best[fl_redtopk].clone()
counter3 = 0
return x_best, acc, loss_best, x_best_adv
def pgd_1(model, x, y, eps=12.0, n_iter=10,alpha=0.4,**kwargs):
device = x.device
ndims = len(x.shape) - 1
x_adv = x.clone()
n_fts = math.prod(x.shape[1:])
x_adv = x_adv.clamp(0., 1.)
loss_best_steps = torch.zeros([n_iter + 1, x.shape[0]], device=device)
acc_steps = torch.zeros_like(loss_best_steps)
# set loss
criterion_indiv = criterion_dict['ce']
k = max(int(.04 * n_iter), 1)
init_topk = .05
topk = init_topk * torch.ones([x.shape[0]], device=device)
step_size = alpha * eps * torch.ones([x.shape[0], *[1] * ndims],
device=device)
x_adv.requires_grad_()
# grad = torch.zeros_like(x)
# for _ in range(self.eot_iter)
# with torch.enable_grad()
logits = model(x_adv,**kwargs)
loss_indiv = criterion_indiv(logits, y)
loss = loss_indiv.sum()
# grad += torch.autograd.grad(loss, [x_adv])[0].detach()
grad = torch.autograd.grad(loss, [x_adv])[0].detach()
x_adv.detach_()
loss_indiv.detach_()
loss.detach_()
acc = logits.detach().max(1)[1] == y
acc_steps[0] = acc + 0
u = torch.arange(x.shape[0], device=device)
for i in range(n_iter):
x_adv = x_adv.detach()
x_adv_old = x_adv.clone()
grad_topk = grad.abs().view(x.shape[0], -1).sort(-1)[0]
topk_curr = torch.clamp((1. - topk) * n_fts, min=0, max=n_fts - 1).long()
grad_topk = grad_topk[u, topk_curr].view(-1, *[1] * (len(x.shape) - 1))
sparsegrad = grad * (grad.abs() >= grad_topk).float()
x_adv_1 = x_adv + step_size * sparsegrad.sign() / (
sparsegrad.sign().abs().view(x.shape[0], -1).sum(dim=-1).view(
-1, 1, 1, 1) + 1e-10)
delta_u = x_adv_1 - x
delta_p = L1_projection(x, delta_u, eps)
x_adv_1 = x + delta_u + delta_p
x_adv = x_adv_1 + 0.
### get gradient
x_adv.requires_grad_()
# grad = torch.zeros_like(x)
# for _ in range(self.eot_iter)
# with torch.enable_grad()
logits = model(x_adv,**kwargs)
loss_indiv = criterion_indiv(logits, y)
loss = loss_indiv.sum()
# grad += torch.autograd.grad(loss, [x_adv])[0].detach()
if i < n_iter - 1:
# save one backward pass
grad = torch.autograd.grad(loss, [x_adv])[0].detach()
# grad /= float(self.eot_iter)
x_adv.detach_()
loss_indiv.detach_()
loss.detach_()
return x_adv
| 13,887 | 35.643799 | 108 | py |
MCGR | MCGR-main/attack/pgd2.py | import torch
import torch.nn as nn
class PGD:
def __init__(self, eps=1.0, step_size=0.2, max_iter=10, random_init=True,
targeted=False, loss_fn=nn.CrossEntropyLoss(), batch_size=64,eps_for_division=1e-10):
self.eps = eps
self.step_size = step_size
self.max_iter = max_iter
self.random_init = random_init
self.targeted = targeted
self.loss_fn = loss_fn
self.batch_size = batch_size
self.eps_for_division=eps_for_division
def attack(self, model, x, y, x_adv=None, targets=None,**kwargs):
batchsize=x.shape[0]
if x_adv is None:
if self.random_init:
delta = torch.empty_like(x).normal_()
d_flat = delta.view(x.size(0), -1)
n = d_flat.norm(p=2, dim=1).view(x.size(0), 1, 1, 1)
r = torch.zeros_like(n).uniform_(0, 1)
delta *= r / n * self.eps
x_adv = torch.clamp(x + delta, min=0, max=1).detach()
else:
x_adv = torch.clone(x).detach()
x_adv.requires_grad_(True)
pred_adv = model(x_adv,**kwargs)
if isinstance(pred_adv, (list, tuple)):
pred_adv = pred_adv[-1]
if self.targeted:
assert targets is not None, "Target labels not found!"
loss = self.loss_fn(pred_adv, targets)
else:
loss = self.loss_fn(pred_adv, y)
grad = torch.autograd.grad(loss, x_adv)[0]
grad_norms = torch.norm(grad.view(batchsize, -1), p=2, dim=1) + self.eps_for_division
grad = grad / grad_norms.view(batchsize, 1, 1, 1)
x_adv = x_adv.detach() + self.step_size * grad
delta = x_adv - x
delta_norms = torch.norm(delta.view(batchsize, -1), p=2, dim=1)
factor = self.eps / delta_norms
factor = torch.min(factor, torch.ones_like(delta_norms))
delta = delta * factor.view(-1, 1, 1, 1)
x_adv = torch.clamp(x + delta, min=0, max=1).detach()
return x_adv
def generate(self, model, x, y=None, targets=None, device=torch.device("cpu"),**kwargs):
model.to(device)
model.eval()
x_adv = []
for i in range(0, x.size(0), self.batch_size):
x_batch = x[i: i + self.batch_size].to(device)
if y is None:
y_batch = model(x_batch,**kwargs)
if isinstance(y_batch, tuple):
y_batch = y_batch[-1]
y_batch = y_batch.max(dim=-1)[1].to(device)
else:
y_batch = y[i: i + self.batch_size].to(device)
for j in range(self.max_iter):
if j == 0:
x_adv_batch = self.attack(model, x_batch, y_batch, targets=targets,**kwargs)
else:
x_adv_batch = self.attack(model, x_batch, y_batch, x_adv_batch, targets=targets,**kwargs)
x_adv.append(x_adv_batch)
return torch.cat(x_adv, dim=0).to(device)
| 3,000 | 39.013333 | 109 | py |
MCGR | MCGR-main/models/preresnet.py | """
PreResNet model definition
ported from https://github.com/bearpaw/pytorch-classification/blob/master/models/cifar/preresnet.py
"""
import math
import torch.nn as nn
import curves
__all__ = ['PreResNet110', 'PreResNet164']
def conv3x3(in_planes, out_planes, stride=1):
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
def conv3x3curve(in_planes, out_planes, fix_points, stride=1):
return curves.Conv2d(in_planes, out_planes, kernel_size=3, fix_points=fix_points, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.bn1 = nn.BatchNorm2d(inplanes)
self.relu = nn.ReLU(inplace=True)
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn2 = nn.BatchNorm2d(planes)
self.conv2 = conv3x3(planes, planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.bn1(x)
out = self.relu(out)
out = self.conv1(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
return out
class BasicBlockCurve(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, fix_points, stride=1, downsample=None):
super(BasicBlockCurve, self).__init__()
self.bn1 = curves.BatchNorm2d(inplanes, fix_points=fix_points)
self.relu = nn.ReLU(inplace=True)
self.conv1 = conv3x3curve(inplanes, planes, stride=stride, fix_points=fix_points)
self.bn2 = curves.BatchNorm2d(planes, fix_points=fix_points)
self.conv2 = conv3x3curve(planes, planes, fix_points=fix_points)
self.downsample = downsample
self.stride = stride
def forward(self, x, coeffs_t):
residual = x
out = self.bn1(x, coeffs_t)
out = self.relu(out)
out = self.conv1(out, coeffs_t)
out = self.bn2(out, coeffs_t)
out = self.relu(out)
out = self.conv2(out, coeffs_t)
if self.downsample is not None:
residual = self.downsample(x, coeffs_t)
out += residual
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.bn1 = nn.BatchNorm2d(inplanes)
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.bn1(x)
out = self.relu(out)
out = self.conv1(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn3(out)
out = self.relu(out)
out = self.conv3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
return out
class BottleneckCurve(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, fix_points, stride=1, downsample=None):
super(BottleneckCurve, self).__init__()
self.bn1 = curves.BatchNorm2d(inplanes, fix_points=fix_points)
self.conv1 = curves.Conv2d(inplanes, planes, kernel_size=1, bias=False,
fix_points=fix_points)
self.bn2 = curves.BatchNorm2d(planes, fix_points=fix_points)
self.conv2 = curves.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False, fix_points=fix_points)
self.bn3 = curves.BatchNorm2d(planes, fix_points=fix_points)
self.conv3 = curves.Conv2d(planes, planes * 4, kernel_size=1, bias=False,
fix_points=fix_points)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x, coeffs_t):
residual = x
out = self.bn1(x, coeffs_t)
out = self.relu(out)
out = self.conv1(out, coeffs_t)
out = self.bn2(out, coeffs_t)
out = self.relu(out)
out = self.conv2(out, coeffs_t)
out = self.bn3(out, coeffs_t)
out = self.relu(out)
out = self.conv3(out, coeffs_t)
if self.downsample is not None:
residual = self.downsample(x, coeffs_t)
out += residual
return out
class PreResNetBase(nn.Module):
def __init__(self, num_classes, depth=110):
super(PreResNetBase, self).__init__()
if depth >= 44:
assert (depth - 2) % 9 == 0, 'depth should be 9n+2'
n = (depth - 2) // 9
block = Bottleneck
else:
assert (depth - 2) % 6 == 0, 'depth should be 6n+2'
n = (depth - 2) // 6
block = BasicBlock
self.inplanes = 16
self.conv1 = nn.Conv2d(3, 16, kernel_size=3, padding=1,
bias=False)
self.layer1 = self._make_layer(block, 16, n)
self.layer2 = self._make_layer(block, 32, n, stride=2)
self.layer3 = self._make_layer(block, 64, n, stride=2)
self.bn = nn.BatchNorm2d(64 * block.expansion)
self.relu = nn.ReLU(inplace=True)
self.avgpool = nn.AvgPool2d(8)
self.fc = nn.Linear(64 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
)
layers = list()
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.layer1(x) # 32x32
x = self.layer2(x) # 16x16
x = self.layer3(x) # 8x8
x = self.bn(x)
x = self.relu(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
class PreResNetCurve(nn.Module):
def __init__(self, num_classes, fix_points, depth=110):
super(PreResNetCurve, self).__init__()
if depth >= 44:
assert (depth - 2) % 9 == 0, 'depth should be 9n+2'
n = (depth - 2) // 9
block = BottleneckCurve
else:
assert (depth - 2) % 6 == 0, 'depth should be 6n+2'
n = (depth - 2) // 6
block = BasicBlockCurve
self.inplanes = 16
self.conv1 = curves.Conv2d(3, 16, kernel_size=3, padding=1,
bias=False, fix_points=fix_points)
self.layer1 = self._make_layer(block, 16, n, fix_points=fix_points)
self.layer2 = self._make_layer(block, 32, n, stride=2, fix_points=fix_points)
self.layer3 = self._make_layer(block, 64, n, stride=2, fix_points=fix_points)
self.bn = curves.BatchNorm2d(64 * block.expansion, fix_points=fix_points)
self.relu = nn.ReLU(inplace=True)
self.avgpool = nn.AvgPool2d(8)
self.fc = curves.Linear(64 * block.expansion, num_classes, fix_points=fix_points)
for m in self.modules():
if isinstance(m, curves.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
for i in range(m.num_bends):
getattr(m, 'weight_%d' % i).data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, curves.BatchNorm2d):
for i in range(m.num_bends):
getattr(m, 'weight_%d' % i).data.fill_(1)
getattr(m, 'bias_%d' % i).data.zero_()
def _make_layer(self, block, planes, blocks, fix_points, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = curves.Conv2d(self.inplanes, planes * block.expansion, kernel_size=1,
stride=stride, bias=False, fix_points=fix_points)
layers = list()
layers.append(block(self.inplanes, planes, fix_points=fix_points, stride=stride,
downsample=downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, fix_points=fix_points))
return nn.ModuleList(layers)
def forward(self, x, coeffs_t):
x = self.conv1(x, coeffs_t)
for block in self.layer1: # 32x32
x = block(x, coeffs_t)
for block in self.layer2: # 16x16
x = block(x, coeffs_t)
for block in self.layer3: # 8x8
x = block(x, coeffs_t)
x = self.bn(x, coeffs_t)
x = self.relu(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x, coeffs_t)
return x
class PreResNet110:
base = PreResNetBase
curve = PreResNetCurve
kwargs = {'depth': 110}
class PreResNet164:
base = PreResNetBase
curve = PreResNetCurve
kwargs = {'depth': 164}
| 10,221 | 32.080906 | 103 | py |
MCGR | MCGR-main/models/vgg.py | """
VGG model definition
ported from https://github.com/pytorch/vision/blob/master/torchvision/models/vgg.py
"""
import math
import torch.nn as nn
import curves
__all__ = ['VGG16', 'VGG16BN', 'VGG19', 'VGG19BN']
config = {
16: [[64, 64], [128, 128], [256, 256, 256], [512, 512, 512], [512, 512, 512]],
19: [[64, 64], [128, 128], [256, 256, 256, 256], [512, 512, 512, 512], [512, 512, 512, 512]],
}
def make_layers(config, batch_norm=False, fix_points=None):
layer_blocks = nn.ModuleList()
activation_blocks = nn.ModuleList()
poolings = nn.ModuleList()
kwargs = dict()
conv = nn.Conv2d
bn = nn.BatchNorm2d
if fix_points is not None:
kwargs['fix_points'] = fix_points
conv = curves.Conv2d
bn = curves.BatchNorm2d
in_channels = 3
for sizes in config:
layer_blocks.append(nn.ModuleList())
activation_blocks.append(nn.ModuleList())
for channels in sizes:
layer_blocks[-1].append(conv(in_channels, channels, kernel_size=3, padding=1, **kwargs))
if batch_norm:
layer_blocks[-1].append(bn(channels, **kwargs))
activation_blocks[-1].append(nn.ReLU(inplace=True))
in_channels = channels
poolings.append(nn.MaxPool2d(kernel_size=2, stride=2))
return layer_blocks, activation_blocks, poolings
class VGGBase(nn.Module):
def __init__(self, num_classes, depth=16, batch_norm=False):
super(VGGBase, self).__init__()
layer_blocks, activation_blocks, poolings = make_layers(config[depth], batch_norm)
self.layer_blocks = layer_blocks
self.activation_blocks = activation_blocks
self.poolings = poolings
self.classifier = nn.Sequential(
nn.Dropout(),
nn.Linear(512, 512),
nn.ReLU(inplace=True),
nn.Dropout(),
nn.Linear(512, 512),
nn.ReLU(inplace=True),
nn.Linear(512, num_classes),
)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
m.bias.data.zero_()
def forward(self, x):
for layers, activations, pooling in zip(self.layer_blocks, self.activation_blocks,
self.poolings):
for layer, activation in zip(layers, activations):
x = layer(x)
x = activation(x)
x = pooling(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return x
class VGGCurve(nn.Module):
def __init__(self, num_classes, fix_points, depth=16, batch_norm=False):
super(VGGCurve, self).__init__()
layer_blocks, activation_blocks, poolings = make_layers(config[depth],
batch_norm,
fix_points=fix_points)
self.layer_blocks = layer_blocks
self.activation_blocks = activation_blocks
self.poolings = poolings
self.dropout1 = nn.Dropout()
self.fc1 = curves.Linear(512, 512, fix_points=fix_points)
self.relu1 = nn.ReLU(inplace=True)
self.dropout2 = nn.Dropout()
self.fc2 = curves.Linear(512, 512, fix_points=fix_points)
self.relu2 = nn.ReLU(inplace=True)
self.fc3 = curves.Linear(512, num_classes, fix_points=fix_points)
# Initialize weights
for m in self.modules():
if isinstance(m, curves.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
for i in range(m.num_bends):
getattr(m, 'weight_%d' % i).data.normal_(0, math.sqrt(2. / n))
getattr(m, 'bias_%d' % i).data.zero_()
def forward(self, x, coeffs_t):
for layers, activations, pooling in zip(self.layer_blocks, self.activation_blocks,
self.poolings):
for layer, activation in zip(layers, activations):
x = layer(x, coeffs_t)
x = activation(x)
x = pooling(x)
x = x.view(x.size(0), -1)
x = self.dropout1(x)
x = self.fc1(x, coeffs_t)
x = self.relu1(x)
x = self.dropout2(x)
x = self.fc2(x, coeffs_t)
x = self.relu2(x)
x = self.fc3(x, coeffs_t)
return x
class VGG16:
base = VGGBase
curve = VGGCurve
kwargs = {
'depth': 16,
'batch_norm': False
}
class VGG16BN:
base = VGGBase
curve = VGGCurve
kwargs = {
'depth': 16,
'batch_norm': True
}
class VGG19:
base = VGGBase
curve = VGGCurve
kwargs = {
'depth': 19,
'batch_norm': False
}
class VGG19BN:
base = VGGBase
curve = VGGCurve
kwargs = {
'depth': 19,
'batch_norm': True
}
| 5,029 | 29.670732 | 100 | py |
MCGR | MCGR-main/models/wide_resnet.py | """
WideResNet model definition
ported from https://github.com/meliketoy/wide-resnet.pytorch/blob/master/networks/wide_resnet.py
"""
import torch.nn as nn
import torch.nn.functional as F
import curves
__all__ = ['WideResNet28x10']
def conv3x3(in_planes, out_planes, stride=1):
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=True)
def conv3x3curve(in_planes, out_planes, fix_points, stride=1):
return curves.Conv2d(in_planes, out_planes, kernel_size=3, fix_points=fix_points, stride=stride,
padding=1, bias=True)
class WideBasic(nn.Module):
def __init__(self, in_planes, planes, dropout_rate, stride=1):
super(WideBasic, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, padding=1, bias=True)
self.dropout = nn.Dropout(p=dropout_rate)
self.bn2 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=True)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, planes, kernel_size=1, stride=stride, bias=True),
)
def forward(self, x):
out = self.dropout(self.conv1(F.relu(self.bn1(x))))
out = self.conv2(F.relu(self.bn2(out)))
out += self.shortcut(x)
return out
class WideBasicCurve(nn.Module):
def __init__(self, in_planes, planes, dropout_rate, fix_points, stride=1):
super(WideBasicCurve, self).__init__()
self.bn1 = curves.BatchNorm2d(in_planes, fix_points=fix_points)
self.conv1 = curves.Conv2d(in_planes, planes, kernel_size=3, padding=1, bias=True,
fix_points=fix_points)
self.dropout = nn.Dropout(p=dropout_rate)
self.bn2 = curves.BatchNorm2d(planes, fix_points=fix_points)
self.conv2 = curves.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1,
bias=True, fix_points=fix_points)
self.shortcut = None
if stride != 1 or in_planes != planes:
self.shortcut = curves.Conv2d(in_planes, planes, kernel_size=1, stride=stride,
bias=True, fix_points=fix_points)
def forward(self, x, coeffs_t):
out = self.dropout(self.conv1(F.relu(self.bn1(x, coeffs_t)), coeffs_t))
out = self.conv2(F.relu(self.bn2(out, coeffs_t)), coeffs_t)
residual = x
if self.shortcut is not None:
residual = self.shortcut(x, coeffs_t)
out += residual
return out
class WideResNetBase(nn.Module):
def __init__(self, num_classes, depth=28, widen_factor=10, dropout_rate=0.):
super(WideResNetBase, self).__init__()
self.in_planes = 16
assert ((depth - 4) % 6 == 0), 'Wide-resnet depth should be 6n+4'
n = (depth - 4) / 6
k = widen_factor
nstages = [16, 16 * k, 32 * k, 64 * k]
self.conv1 = conv3x3(3, nstages[0])
self.layer1 = self._wide_layer(WideBasic, nstages[1], n, dropout_rate, stride=1)
self.layer2 = self._wide_layer(WideBasic, nstages[2], n, dropout_rate, stride=2)
self.layer3 = self._wide_layer(WideBasic, nstages[3], n, dropout_rate, stride=2)
self.bn1 = nn.BatchNorm2d(nstages[3], momentum=0.9)
self.linear = nn.Linear(nstages[3], num_classes)
def _wide_layer(self, block, planes, num_blocks, dropout_rate, stride):
strides = [stride] + [1] * int(num_blocks - 1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, dropout_rate, stride))
self.in_planes = planes
return nn.Sequential(*layers)
def forward(self, x):
out = self.conv1(x)
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = F.relu(self.bn1(out))
out = F.avg_pool2d(out, 8)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
class WideResNetCurve(nn.Module):
def __init__(self, num_classes, fix_points, depth=28, widen_factor=10, dropout_rate=0.):
super(WideResNetCurve, self).__init__()
self.in_planes = 16
assert ((depth - 4) % 6 == 0), 'Wide-resnet depth should be 6n+4'
n = (depth - 4) / 6
k = widen_factor
nstages = [16, 16 * k, 32 * k, 64 * k]
self.conv1 = conv3x3curve(3, nstages[0], fix_points=fix_points)
self.layer1 = self._wide_layer(WideBasicCurve, nstages[1], n, dropout_rate, stride=1,
fix_points=fix_points)
self.layer2 = self._wide_layer(WideBasicCurve, nstages[2], n, dropout_rate, stride=2,
fix_points=fix_points)
self.layer3 = self._wide_layer(WideBasicCurve, nstages[3], n, dropout_rate, stride=2,
fix_points=fix_points)
self.bn1 = curves.BatchNorm2d(nstages[3], momentum=0.9, fix_points=fix_points)
self.linear = curves.Linear(nstages[3], num_classes, fix_points=fix_points)
def _wide_layer(self, block, planes, num_blocks, dropout_rate, stride, fix_points):
strides = [stride] + [1] * int(num_blocks - 1)
layers = []
for stride in strides:
layers.append(
block(self.in_planes, planes, dropout_rate, fix_points=fix_points, stride=stride)
)
self.in_planes = planes
return nn.ModuleList(layers)
def forward(self, x, coeffs_t):
out = self.conv1(x, coeffs_t)
for block in self.layer1:
out = block(out, coeffs_t)
for block in self.layer2:
out = block(out, coeffs_t)
for block in self.layer3:
out = block(out, coeffs_t)
out = F.relu(self.bn1(out, coeffs_t))
out = F.avg_pool2d(out, 8)
out = out.view(out.size(0), -1)
out = self.linear(out, coeffs_t)
return out
class WideResNet28x10:
base = WideResNetBase
curve = WideResNetCurve
kwargs = {'depth': 28, 'widen_factor': 10}
| 6,272 | 36.562874 | 100 | py |
MCGR | MCGR-main/models/convfc.py | import math
import torch.nn as nn
import curves
__all__ = [
'ConvFC',
]
class ConvFCBase(nn.Module):
def __init__(self, num_classes):
super(ConvFCBase, self).__init__()
self.conv_part = nn.Sequential(
nn.Conv2d(3, 32, kernel_size=5, padding=2),
nn.ReLU(True),
nn.MaxPool2d(kernel_size=3, stride=2),
nn.Conv2d(32, 64, kernel_size=5, padding=2),
nn.ReLU(True),
nn.MaxPool2d(3, 2),
nn.Conv2d(64, 128, kernel_size=5, padding=2),
nn.ReLU(True),
nn.MaxPool2d(3, 2),
)
self.fc_part = nn.Sequential(
nn.Linear(1152, 1000),
nn.ReLU(True),
nn.Linear(1000, 1000),
nn.ReLU(True),
nn.Linear(1000, num_classes)
)
# Initialize weights
for m in self.conv_part.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
m.bias.data.zero_()
def forward(self, x):
x = self.conv_part(x)
x = x.view(x.size(0), -1)
x = self.fc_part(x)
return x
class ConvFCCurve(nn.Module):
def __init__(self, num_classes, fix_points):
super(ConvFCCurve, self).__init__()
self.conv1 = curves.Conv2d(3, 32, kernel_size=5, padding=2, fix_points=fix_points)
self.relu1 = nn.ReLU(True)
self.max_pool1 = nn.MaxPool2d(kernel_size=3, stride=2)
self.conv2 = curves.Conv2d(32, 64, kernel_size=5, padding=2, fix_points=fix_points)
self.relu2 = nn.ReLU(True)
self.max_pool2 = nn.MaxPool2d(3, 2)
self.conv3 = curves.Conv2d(64, 128, kernel_size=5, padding=2, fix_points=fix_points)
self.relu3 = nn.ReLU(True)
self.max_pool3 = nn.MaxPool2d(3, 2)
self.fc4 = curves.Linear(1152, 1000, fix_points=fix_points)
self.relu4 = nn.ReLU(True)
self.fc5 = curves.Linear(1000, 1000, fix_points=fix_points)
self.relu5 = nn.ReLU(True)
self.fc6 = curves.Linear(1000, num_classes, fix_points=fix_points)
# Initialize weights
for m in self.modules():
if isinstance(m, curves.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
for i in range(m.num_bends):
getattr(m, 'weight_%d' % i).data.normal_(0, math.sqrt(2. / n))
getattr(m, 'bias_%d' % i).data.zero_()
def forward(self, x, coeffs_t):
x = self.conv1(x, coeffs_t)
x = self.relu1(x)
x = self.max_pool1(x)
x = self.conv2(x, coeffs_t)
x = self.relu2(x)
x = self.max_pool2(x)
x = self.conv3(x, coeffs_t)
x = self.relu3(x)
x = self.max_pool3(x)
x = x.view(x.size(0), -1)
x = self.fc4(x, coeffs_t)
x = self.relu4(x)
x = self.fc5(x, coeffs_t)
x = self.relu5(x)
x = self.fc6(x, coeffs_t)
return x
class ConvFC:
base = ConvFCBase
curve = ConvFCCurve
kwargs = {}
| 3,153 | 27.93578 | 92 | py |
Conformer-RLpatching | Conformer-RLpatching-main/RLpatching/main_agent_traing.py | from ast import arg
from Re_buffer import *
import os
import critic_q
import argparse
from critic_q import Critic
import numpy as np
import torch
import torch.optim as optim
import torch.nn.functional as F
from numpy import *
from actor_pre_training.actor_pre_training import Agent_Supervised_Actor as Model_p
from critic_pre_training.critic_pre_training import Agent_Supervised_Critic as Model_r
from actor_pre_training.actor_pre_training import supervised_train_actor
from critic_pre_training.critic_pre_training import supervised_train_critic
from grid_agent import GridAgent
from cmath import inf
from utilize.form_action import *
from Environment.base_env import Environment
from utilize.settings import settings
import csv
import torch.utils.data as Data
import random
from dispatching_necessity_evaluation import *
import sys
sys.path.append("/root/lixinhang/Conformer")
import Conformer
from Conformer import Conformer
start_idx_set = 42312
select=True
R=0.9
L=0.9
N=40
def get_parsers():
parser = argparse.ArgumentParser()
parser.add_argument('--tau', default=0.005, type=float) # target smoothing coefficient
#parser.add_argument('--target_update_interval', default=1, type=int)
parser.add_argument('--test_iteration', default=10, type=int)
parser.add_argument('--learning_rate', default=1e-4, type=float)
parser.add_argument('--gamma', default=0.99, type=float) # discounted factor
parser.add_argument('--capacity', default=1000000, type=int) # replay buffer size
parser.add_argument('--batch_size', default=512, type=int) # mini batch size
parser.add_argument('--seed', default=False, type=bool)
parser.add_argument('--random_seed', default=9527, type=int)
# optional parameters
parser.add_argument('--sample_frequency', default=2000, type=int)
#parser.add_argument('--render', default=False, type=bool) # show UI or not
parser.add_argument('--log_interval', default=50, type=int) #
#parser.add_argument('--load', default=False, type=bool) # load model
parser.add_argument('--render_interval', default=100, type=int) # after render_interval, the env.render() will work
#parser.add_argument('--exploration_noise', default=0.1, type=float)
parser.add_argument('--max_episode', default=100000, type=int) # num of games
parser.add_argument('--print_log', default=5, type=int)
parser.add_argument('--update_iteration', default=100, type=int)
parser.add_argument('--pre_training', action="store_true", default=False, help="pre training")
parser.add_argument('--exp_name', type=str, default="0323_CR_R8_L8_N40")
parser.add_argument('--format', type=int, default=0, help='the format of datas')
return parser.parse_args()
def sep(_obs):
_list = _obs.a_ex + _obs.p_ex + _obs.q_ex + _obs.v_ex + _obs.rho + _obs.grid_loss + _obs.nextstep_load_p + \
_obs.load_q + _obs.nextstep_renewable_gen_p_max + _obs.gen_q
return _list
def sep_all(_obs, repm, format):
idx = start_idx_set + _obs.timestep
row_y = []
if format==0:
next_ten_step_renewable_max=repm.pre(t=idx)
for i in range(len(next_ten_step_renewable_max)):
row_y += list(next_ten_step_renewable_max[i])
elif format==1:
for i in range(10):
row_y += [0 for _ in range(18)]
elif format==2:
row_y += _obs.nextstep_renewable_gen_p_max
listIndexs = list(range(idx+3, idx+11+1))
with open('data/max_renewable_gen_p.csv', 'rt') as csvfile:
reader = csv.reader(csvfile)
reader_rows = [row for row in reader]
for index in listIndexs:
row_y_temp = []
row = reader_rows[index]
for num in row:
row_y_temp.append(float(num))
row_y += row_y_temp
elif format == 3:
row_y = []
next_ten_step_renewable_max=repm.pre(t=idx)
row_y += list(next_ten_step_renewable_max[0])
for i in range(9):
row_y += [0 for _ in range(18)]
_list = _obs.a_ex + _obs.p_ex + _obs.q_ex + _obs.v_ex + _obs.p_or + _obs.q_or + _obs.v_or + _obs.rho + \
_obs.grid_loss + _obs.nextstep_load_p + _obs.load_q + row_y + _obs.gen_q
return _list
def change_adj_p(obs, actions_p, num_gen):
adjust_gen_p_action_space = obs.action_space['adjust_gen_p']
min_adj_p = adjust_gen_p_action_space.low
max_adj_p = adjust_gen_p_action_space.high
for i in range(num_gen):
if min_adj_p[i] == -inf:
actions_p[i] = 0
continue
if min_adj_p[i] == inf:
actions_p[i] = 0
continue
if actions_p[i] < min_adj_p[i]:
actions_p[i] = min_adj_p[i]
elif actions_p[i] > max_adj_p[i]:
actions_p[i] = max_adj_p[i]
return actions_p
def wrap_action(adjust_gen_p, adjust_gen_v):
act = {
'adjust_gen_p': adjust_gen_p,
'adjust_gen_v': adjust_gen_v
}
return act
def change_adj_v(obs, actions_v, num_gen):
adjust_gen_v_action_space = obs.action_space['adjust_gen_v']
min_adj_v = adjust_gen_v_action_space.low
max_adj_v = adjust_gen_v_action_space.high
for i in range(num_gen):
if min_adj_v[i] == -inf or min_adj_v[i] == inf:
actions_v[i] = 0
continue
if actions_v[i] < min_adj_v[i]:
actions_v[i] = min_adj_v[i]
elif actions_v[i] > max_adj_v[i]:
actions_v[i] = max_adj_v[i]
return actions_v
def str_list_to_float_list(str_list):
n = 0
while n < len(str_list):
str_list[n] = float(str_list[n])
n += 1
return (str_list)
class Agent():
def __init__(self, pre_training=True):
self.num_agent = 54
self.rand=False
self.device = torch.device('cuda')
self.actor_p = Model_p().to(self.device)
self.actor_p_target=Model_p().to(self.device)
self.actor_optimizer = optim.Adam(self.actor_p.parameters(), lr=1e-7)
self.StepLR_actor = torch.optim.lr_scheduler.StepLR(self.actor_optimizer, step_size=100, gamma=0.8)
self.critic_p = Model_r().to(self.device)
self.critic_p_target = Model_r().to(self.device)
self.critic_optimizer = optim.Adam(self.critic_p.parameters(), lr=1e-5)
self.StepLR_critic = torch.optim.lr_scheduler.StepLR(self.critic_optimizer, step_size=100, gamma=0.8)
self.replay_buffer = Replay_buffer(args)
self.num_critic_update_iteration = 0
self.num_actor_update_iteration = 0
if pre_training:
self.is_pre = self.pre_training()
else:
self.is_pre = False
def initial_Conformer(self, start_index):
self.conformer=Conformer(start_index=start_index)
def pre_training(self):
print("Pre training...")
actor_result = supervised_train_actor(self.actor_p, format=args.format, exp_name=args.exp_name)
# critic_result = supervised_train_critic(self.critic_p, format=args.format, exp_name=args.exp_name)
return actor_result
def load(self):
if self.is_pre:
print("Load from pre training model...")
self.actor_p_target.load_state_dict(self.actor_p.state_dict())
self.critic_p_target.load_state_dict(self.critic_p.state_dict())
self.is_pre = False
else:
if not os.path.exists(os.path.join('best_model', args.exp_name, 'actor_best_10.pth')):
print("Load the actor from the history model...")
model_path_p = os.path.join('best_model', args.exp_name, "train_model_p_all_10.pth")
self.actor_p.load_state_dict(torch.load(model_path_p, map_location='cuda'))
self.actor_p_target.load_state_dict(self.actor_p.state_dict())
else:
print("Load the actor from best model...")
# model_path_p = os.path.join('best_model_10/actor_best_10.pth')
model_path_p = os.path.join('best_model', args.exp_name, 'actor_best_10.pth')
self.actor_p.load_state_dict(torch.load(model_path_p, map_location='cuda'))
self.actor_p_target.load_state_dict(self.actor_p.state_dict())
if not os.path.exists(os.path.join('best_model', args.exp_name, 'critic_best_10.pth')):
print("Load the critic from the history model...")
#self.critic_p_target.load_state_dict(self.critic_p.state_dict())
model_path_r = os.path.join('best_model', args.exp_name, "train_model_r_all_10.pth")
# model_path_r = os.path.join('best_model_10/critic_best_10.pth')
self.critic_p.load_state_dict(torch.load(model_path_r, map_location='cuda'))
self.critic_p_target.load_state_dict(self.critic_p.state_dict())
else:
print("Load the critic from best model...")
# self.actor_optimizer = optim.Adam(self.actor_p.parameters(), lr=1e-6)
# StepLR_ = torch.optim.lr_scheduler.StepLR(self.actor_optimizer, step_size=100, gamma=0.45)
# model_path_r = os.path.join('best_model_10/critic_best_10.pth')
model_path_r = os.path.join('best_model', args.exp_name, 'critic_best_10.pth')
self.critic_p.load_state_dict(torch.load(model_path_r, map_location='cuda'))
self.critic_p_target.load_state_dict(self.critic_p.state_dict())
# self.critic_optimizer = optim.Adam(self.critic_p.parameters(), lr=1e-6)
# StepLR_ = torch.optim.lr_scheduler.StepLR(self.critic_optimizer, step_size=100, gamma=0.45)
self.actor_p = self.actor_p.to(self.device)
self.actor_p_target = self.actor_p_target.to(self.device)
self.critic_p = self.critic_p.to(self.device)
self.critic_p_target = self.critic_p_target.to(self.device)
def act(self, obs, state_all_repm=None):
action_p, action_v = self.test(obs, state_all_repm)
ret_action = wrap_action(action_p, action_v)
return ret_action
def test(self, obs, state_all_repm=None):
state_all = state_all_repm
self.actor_p.eval()
state_all = state_all.to(self.device)
output_p = self.actor_p(state_all)
output_p = output_p.cpu().detach().numpy().tolist()
output_p = output_p[0]
action_p_orig = []
action_v_orig=[]
for i in range(self.num_agent):
action_p_orig.append(output_p[i] - obs.gen_p[i])
# action_v_orig.append(output_v[i] - obs.gen_v[i])
action_v_orig.append(0)
action_v = change_adj_v(obs, action_v_orig, self.num_agent)
if self.rand:
# random_list = random.sample(range(0,self.num_agent),15)
for i in range(54):
action_p_orig[i] = action_p_orig[i] + random.uniform(-3,3)
# action_p_orig[random_list[i]] = -1 * action_p_orig[random_list[i]]
# action_p_orig[i] = action_p_orig[i] + random.uniform(-5,5)
if select:
print("selecting...")
action_p_orig=dispatching_necessity_evaluation(obs,output_p,action_p_orig,fiR=R,fiL=L,ndis=N)
action_p = change_adj_p(obs,action_p_orig,self.num_agent)
return action_p, action_v
def train(self,time_a,time_c):
all_actor_loss=0
all_critic_loss = 0
self.actor_p_target.eval()
self.critic_p_target.eval()
# Sample replay buffer
x, y, u, r, d = self.replay_buffer.sample(args.batch_size)
state = torch.FloatTensor(x).to(self.device)
action = torch.FloatTensor(u).to(self.device)
next_state = torch.FloatTensor(y).to(self.device)
done = torch.FloatTensor(1-d).to(self.device)
reward = torch.FloatTensor(r).to(self.device)
# Compute the target Q value
target_Q = self.critic_p_target(next_state, self.actor_p_target(next_state))
target_Q = reward + (done * args.gamma * target_Q).detach()
current_Q = self.critic_p(state, action)
# Compute critic loss
critic_loss = F.mse_loss(current_Q, target_Q)
all_critic_loss =all_critic_loss+critic_loss.item()
# Optimize the critic
self.critic_optimizer.zero_grad()
critic_loss.backward()
self.critic_optimizer.step()
print("current_lr: %s" % (self.critic_optimizer.state_dict()['param_groups'][0]['lr']))
# Compute actor loss
actor_loss = -self.critic_p(state, self.actor_p(state)).mean()
all_actor_loss = all_actor_loss + actor_loss.item()
# Optimize the actor
self.actor_optimizer.zero_grad()
actor_loss.backward()
self.actor_optimizer.step()
# Update the frozen target models
if time_c % 5 ==0:
for param, target_param in zip(self.critic_p.parameters(), self.critic_p_target.parameters()):
target_param.data.copy_(args.tau * param.data + (1 - args.tau) * target_param.data)
if time_a % 2 ==0:
for param, target_param in zip(self.actor_p.parameters(), self.actor_p_target.parameters()):
target_param.data.copy_(args.tau * param.data + (1 - args.tau) * target_param.data)
self.num_actor_update_iteration += 1
self.num_critic_update_iteration += 1
return all_critic_loss, all_actor_loss
def save_a(self):
print("Save the actor network...")
save_path = os.path.join('best_model_10/', args.exp_name)
torch.save(self.actor_p.state_dict(), save_path+'/actor_best_10.pth')
self.StepLR_actor.step()
def save_c(self):
print("Save the critic network...")
# save_path='best_model_10/'
save_path = os.path.join('best_model_10/', args.exp_name)
torch.save(self.critic_p.state_dict(), save_path+'/critic_best_10.pth')
self.StepLR_critic.step()
def test_result(self):
self.actor_p.eval()
total_reward = 0
step = 0
env = Environment(settings, "EPRIReward")
obs, start_idx = env.reset(start_sample_idx=42312)
self.initial_Conformer(start_index=42312)
state_p = torch.Tensor([sep_all(obs, agent.conformer, args.format)])
test_total_reward_t_lists = {"EPRIReward":[], "line_over_flow_reward":[], "renewable_consumption_reward":[] ,
"running_cost_reward":[] ,"balanced_gen_reward":[] ,"gen_reactive_power_reward":[] ,"sub_voltage_reward":[],"running_cost":[]}
while True:
action = self.act(obs, state_p)
adj_action_p=action['adjust_gen_p']
action_p=[]
for gen_i in range(len(adj_action_p)):
action_p.append(adj_action_p[gen_i]+obs.gen_p[gen_i])
print("test****************",step)
#print(action_p)
next_obs, reward_lists, done, info = env.step(action)
# print("List:")
# print(reward_lists)
if reward_lists == 0:
reward = 0
else:
reward = reward_lists["EPRIReward"]
total_reward += reward
if reward != 0:
test_total_reward_t_lists["EPRIReward"].append(reward_lists["EPRIReward"])
test_total_reward_t_lists["line_over_flow_reward"].append(reward_lists["line_over_flow_reward"])
test_total_reward_t_lists["renewable_consumption_reward"].append(reward_lists["renewable_consumption_reward"])
test_total_reward_t_lists["running_cost_reward"].append(reward_lists["running_cost_reward"])
test_total_reward_t_lists["balanced_gen_reward"].append(reward_lists["balanced_gen_reward"])
test_total_reward_t_lists["gen_reactive_power_reward"].append(reward_lists["gen_reactive_power_reward"])
test_total_reward_t_lists["sub_voltage_reward"].append(reward_lists["sub_voltage_reward"])
test_total_reward_t_lists["running_cost"] .append( reward_lists["running_cost"])
print("step_reward:",reward)
# r_pre=total_reward
if abs(reward-0)<0.00001:
reward=-2
state_p_next = torch.Tensor([sep_all(next_obs, agent.conformer, args.format)])
#if args.render and i >= args.render_interval: env.render()
if reward>0.5 or len(self.replay_buffer.storage) < 500:
self.replay_buffer.push((state_p, state_p_next, action_p, reward, np.float(done)))
state_p = state_p_next
obs = next_obs
if done:
break
step += 1
return total_reward, step, test_total_reward_t_lists
if __name__ == '__main__':
args = get_parsers()
save_Test=False
if not os.path.exists(os.path.join('best_model/', args.exp_name)):
os.makedirs(os.path.join('best_model/', args.exp_name))
print("make the new dirs...")
pre_tra = False
agent=Agent(pre_training=pre_tra)
total_step = 0
r_best = 0
c_loss_best = 100
time_a = 0
time_c = 0
if pre_tra:
with open(os.path.join('best_model', args.exp_name, "result_online_best_reward.csv"),"w",newline="") as csvfile:
writer = csv.writer(csvfile)
writer.writerow(["episode","train_times","update_times","steps","EPRIReward","line_over_flow_reward","renewable_consumption_rewards","running_cost_reward","balanced_gen_reward","gen_reactive_power_reward","sub_voltage_reward","total_cost","A loss","C loss"])
with open(os.path.join('best_model', args.exp_name, "result_online_loss.csv"),"w",newline="") as csvfile:
writer = csv.writer(csvfile)
writer.writerow(["episode","train_times","update_times","steps","total reward","A loss","C loss"])
with open(os.path.join('best_model', args.exp_name, "result_best_epoch_reward.csv"),"w",newline="") as csvfile:
writer = csv.writer(csvfile)
writer.writerow(["step","EPRIReward","line_over_flow_reward","renewable_consumption_rewards","running_cost_reward","balanced_gen_reward","gen_reactive_power_reward","sub_voltage_reward","cost"])
with open(os.path.join('best_model', args.exp_name, "test_reward.csv"),"w",newline="") as csvfile:
writer = csv.writer(csvfile)
writer.writerow(["episode","update_times","steps","EPRIReward","line_over_flow_reward","renewable_consumption_rewards","running_cost_reward","balanced_gen_reward","gen_reactive_power_reward","sub_voltage_reward","cost"])
with open(os.path.join('best_model', args.exp_name, "result_loss.csv"),"w",newline="") as csvfile:
writer = csv.writer(csvfile)
writer.writerow(["episode","Average a loss","Average c loss"])
# with open(os.path.join('best_model_10', args.exp_name, "result_episode_reward.csv"),"w",newline="") as csvfile:
# writer = csv.writer(csvfile)
# writer.writerow(["episode","EPRIReward","line_over_flow_reward","renewable_consumption_rewards","running_cost_reward","balanced_gen_reward","gen_reactive_power_reward","sub_voltage_reward"])
for i in range(args.max_episode):
total_reward = 0
step = 0
env = Environment(settings, "EPRIReward")
obs, start_idx = env.reset(start_sample_idx=42312)
agent.initial_Conformer(start_index=42312)
agent.load()
state_p = torch.Tensor([sep_all(obs, agent.conformer, args.format)])
test_total_reward_t_lists = {"EPRIReward":[], "line_over_flow_reward":[], "renewable_consumption_reward":[] ,
"running_cost_reward":[] ,"balanced_gen_reward":[] ,"gen_reactive_power_reward":[] ,"sub_voltage_reward":[],"running_cost":[]}
while True:
action = agent.act(obs, state_p)
adj_action_p=action['adjust_gen_p']
action_p=[]
for gen_i in range(len(adj_action_p)):
action_p.append(adj_action_p[gen_i]+obs.gen_p[gen_i])
print("****************",step)
next_obs, reward_lists, done, info = env.step(action)
if reward_lists == 0:
reward = 0
else:
reward = reward_lists["EPRIReward"]
total_reward += reward
if reward != 0:
test_total_reward_t_lists["EPRIReward"].append(reward_lists["EPRIReward"])
test_total_reward_t_lists["line_over_flow_reward"] .append( reward_lists["line_over_flow_reward"])
test_total_reward_t_lists["renewable_consumption_reward"] .append( reward_lists["renewable_consumption_reward"])
test_total_reward_t_lists["running_cost_reward"] .append( reward_lists["running_cost_reward"])
test_total_reward_t_lists["balanced_gen_reward"] .append( reward_lists["balanced_gen_reward"])
test_total_reward_t_lists["gen_reactive_power_reward"] .append( reward_lists["gen_reactive_power_reward"])
test_total_reward_t_lists["sub_voltage_reward"] .append( reward_lists["sub_voltage_reward"])
test_total_reward_t_lists["running_cost"] .append( reward_lists["running_cost"])
print("step_reward:",reward)
if abs(reward-0)<0.00001:
reward=-2
state_p_next = torch.Tensor([sep_all(next_obs, agent.conformer, args.format)])
if reward>-5 or len(agent.replay_buffer.storage) < 1000:
agent.replay_buffer.push((state_p, state_p_next, action_p, reward, np.float(done)))
state_p = state_p_next
obs = next_obs
if done:
break
step += 1
print("total_reward:",total_reward)
total_step += step + 1
if r_best>=50:
agent.rand=True
if save_Test or total_reward>r_best:
with open(os.path.join('best_model', args.exp_name, "test_reward.csv"),"a+",newline="") as csvfile:
writer = csv.writer(csvfile)
writer.writerow([ i,time_a,step,
sum(test_total_reward_t_lists["EPRIReward"]),
sum(test_total_reward_t_lists["line_over_flow_reward"]),
sum(test_total_reward_t_lists["renewable_consumption_reward"]),
sum(test_total_reward_t_lists["running_cost_reward"]),
sum(test_total_reward_t_lists["balanced_gen_reward"]),
sum(test_total_reward_t_lists["gen_reactive_power_reward"]),
sum(test_total_reward_t_lists["sub_voltage_reward"]),
sum(test_total_reward_t_lists["running_cost"])])
save_Test=False
if total_reward>r_best:
r_best=total_reward
with open(os.path.join('best_model', args.exp_name, "result_online_best_reward.csv"),"a+",newline="") as csvfile:
writer = csv.writer(csvfile)
writer.writerow([ i, 0, time_a,step,
sum(test_total_reward_t_lists["EPRIReward"]),
sum(test_total_reward_t_lists["line_over_flow_reward"]),
sum(test_total_reward_t_lists["renewable_consumption_reward"]),
sum(test_total_reward_t_lists["running_cost_reward"]),
sum(test_total_reward_t_lists["balanced_gen_reward"]),
sum(test_total_reward_t_lists["gen_reactive_power_reward"]),
sum(test_total_reward_t_lists["sub_voltage_reward"]),
sum(test_total_reward_t_lists["running_cost"]),
0, 0])
with open(os.path.join('best_model', args.exp_name, "result_best_epoch_reward.csv"), "w",
newline="") as csvfile:
writer = csv.writer(csvfile)
writer.writerow(["Step", "EPRIReward", "line_over_flow_reward", "renewable_consumption_rewards",
"running_cost_reward", "balanced_gen_reward", "gen_reactive_power_reward",
"sub_voltage_reward","cost"])
for step_num in range(len(test_total_reward_t_lists["EPRIReward"])):
writer.writerow([step_num, test_total_reward_t_lists["EPRIReward"][step_num],
test_total_reward_t_lists["line_over_flow_reward"][step_num],
test_total_reward_t_lists["renewable_consumption_reward"][step_num],
test_total_reward_t_lists["running_cost_reward"][step_num],
test_total_reward_t_lists["balanced_gen_reward"][step_num],
test_total_reward_t_lists["gen_reactive_power_reward"][step_num],
test_total_reward_t_lists["sub_voltage_reward"][step_num],
test_total_reward_t_lists["running_cost"][step_num]])
if len(agent.replay_buffer.storage)> args.batch_size:
total_c_loss = 0
total_a_loss = 0
iterations = 0
for train_times in range(args.update_iteration):
print("****************",train_times,"****************")
c_loss,a_loss=agent.train(time_a,time_c)
total_c_loss += c_loss
total_a_loss += a_loss
iterations += 1
total_reward_test, step_test, reward_lists = agent.test_result()
if total_reward_test > r_best:
agent.save_a()
r_pre = total_reward_test
time_a = time_a + 1
save_Test = True
with open(os.path.join('best_model', args.exp_name, "result_online_best_reward.csv"),"a+",newline="") as csvfile:
writer = csv.writer(csvfile)
writer.writerow([i, train_times, time_a, step_test,
sum(reward_lists["EPRIReward"]),
sum(reward_lists["line_over_flow_reward"]),
sum(reward_lists["renewable_consumption_reward"]),
sum(reward_lists["running_cost_reward"]),
sum(reward_lists["balanced_gen_reward"]),
sum(reward_lists["gen_reactive_power_reward"]),
sum(reward_lists["sub_voltage_reward"]),
sum(reward_lists["running_cost"]),
a_loss, c_loss])
if total_reward_test > 0:
with open(os.path.join('best_model', args.exp_name, "result_best_epoch_reward.csv"), "w", newline="") as csvfile:
writer = csv.writer(csvfile)
writer.writerow(["Step", "EPRIReward","line_over_flow_reward","renewable_consumption_rewards","running_cost_reward","balanced_gen_reward","gen_reactive_power_reward","sub_voltage_reward","cost"])
for step_num in range(len(reward_lists["EPRIReward"])):
writer.writerow([step_num,
reward_lists["EPRIReward"][step_num],
reward_lists["line_over_flow_reward"][step_num],
reward_lists["renewable_consumption_reward"][step_num],
reward_lists["running_cost_reward"][step_num],
reward_lists["balanced_gen_reward"][step_num],
reward_lists["gen_reactive_power_reward"][step_num],
reward_lists["sub_voltage_reward"][step_num],
reward_lists["running_cost"][step_num]])
if c_loss < c_loss_best:
agent.save_c()
c_loss_best = c_loss
time_c = time_c + 1
with open(os.path.join('best_model', args.exp_name, "result_online_loss.csv"), "a+", newline="") as csvfile:
writer = csv.writer(csvfile)
writer.writerow([i, train_times, time_c,step_test, total_reward_test, a_loss, c_loss])
print("EXP_NAME: {} Total T:{} Episode: \t{} steps: \t{} Total Reward: \t{:0.2f} A_loss: \t{:0.2f} C_loss: \t{:0.2f} R_PRE: \t{:0.2f} Test_reward: \t{:0.2f}".format(args.exp_name, total_step, i, step_test,
total_reward_test,a_loss,c_loss,r_pre,total_reward_test))
if step_test<50 and r_pre > 200:
break
with open(os.path.join('best_model', args.exp_name, "result_loss.csv"), "a+", newline="") as csvfile:
writer = csv.writer(csvfile)
writer.writerow([i, total_a_loss/iterations, total_c_loss/iterations])
else:
print("EXP_NAME: {} Total T:{} Episode: \t{} steps: \t{} Total Reward: \t{:0.2f} R_PRE: \t{:0.2f}".format(args.exp_name, total_step, i, step,
total_reward,r_pre))
| 30,268 | 46.742902 | 270 | py |
Conformer-RLpatching | Conformer-RLpatching-main/RLpatching/critic_pre_training/critic_pre_training.py | import csv
import random
import torch.utils.data as Data
import numpy
import numpy as np
import torch
import torch.nn.functional as F
import torch.nn as nn
from collections import OrderedDict
from torch import optim
from torch.autograd import Variable
import os
import sys
sys.path.append(r'~/lixinhang/RLpatching/')
BATCH_SIZE=256
LR = 0.01
epoch_num = 100
class Agent_Supervised_Critic(torch.nn.Module):
def __init__(self):
super(Agent_Supervised_Critic,self).__init__()
self.fc_net = nn.Sequential(
OrderedDict([
('dense1', nn.Linear(1951,1024)),
('norm1', nn.BatchNorm1d(1024)),
('relu1', nn.ELU()),
('dense2', nn.Linear(1024,512)),
('norm2', nn.BatchNorm1d(512)),
('relu2', nn.ELU()),
("dense3", nn.Linear(512, 256)),
("norm3", nn.BatchNorm1d(256)),
("relu3", nn.ELU()),
("dense4", nn.Linear(256, 128)),
("norm4", nn.BatchNorm1d(128)),
("relu4", nn.ELU()),
("dense5", nn.Linear(128,1))
])
)
def forward(self, x,a):
if len(x.size())==3:
x = torch.squeeze(x,1)
# print("X shape is", x.size())
# print("A shape is", a.size())
return self.fc_net(torch.cat([x, a], 1))
def str_list_to_float_list(str_list):
n = 0
while n < len(str_list):
str_list[n] = float(str_list[n])
n += 1
return(str_list)
def dataset(train_data, format=2):
train_data_size = 106820 * train_data
train_data1 = []
test_data1=[]
train_data2 = []
test_data2=[]
train_target = []
test_target=[]
train_data_a_ex =[]
train_data_p_ex =[]
train_data_v_ex =[]
train_data_q_ex =[]
train_data_p_or=[]
train_data_v_or = []
train_data_q_or = []
train_data_rho =[]
train_data_grid_loss =[]
train_data_load_p =[]
train_data_load_q =[]
train_data_max_p =[]
train_data_gen_q =[]
train_data_gen_p = []
test_data_a_ex =[]
test_data_p_ex =[]
test_data_v_ex =[]
test_data_q_ex =[]
test_data_p_or = []
test_data_v_or = []
test_data_q_or = []
test_data_rho =[]
test_data_grid_loss =[]
test_data_load_p =[]
test_data_load_q =[]
test_data_max_p =[]
test_data_gen_q =[]
test_data_gen_p = []
with open("data/a_ex.csv", "r", newline="") as csvfile1:
reader1 = csv.reader(csvfile1)
for i, rows in enumerate(reader1):
if i > 0 and i <= train_data_size:
row = rows
row = str_list_to_float_list(row)
train_data_a_ex.append(row)
elif i>train_data_size and i<=106820:
row = rows
row = str_list_to_float_list(row)
test_data_a_ex.append(row)
with open("data/p_ex.csv", "r", newline="") as csvfile1:
reader1 = csv.reader(csvfile1)
for i, rows in enumerate(reader1):
if i > 0 and i <= train_data_size:
row = rows
row = str_list_to_float_list(row)
train_data_p_ex.append(row)
elif i > train_data_size and i <= 106820:
row = rows
row = str_list_to_float_list(row)
test_data_p_ex.append(row)
with open("data/v_ex.csv", "r", newline="") as csvfile1:
reader1 = csv.reader(csvfile1)
for i, rows in enumerate(reader1):
if i > 0 and i <= train_data_size:
row = rows
row = str_list_to_float_list(row)
train_data_v_ex.append(row)
elif i > train_data_size and i <= 106820:
row = rows
row = str_list_to_float_list(row)
test_data_v_ex.append(row)
with open("data/q_ex.csv", "r", newline="") as csvfile1:
reader1 = csv.reader(csvfile1)
for i, rows in enumerate(reader1):
if i > 0 and i <= train_data_size:
row = rows
row = str_list_to_float_list(row)
train_data_q_ex.append(row)
elif i > train_data_size and i <= 106820:
row = rows
row = str_list_to_float_list(row)
test_data_q_ex.append(row)
with open("data/p_or.csv", "r", newline="") as csvfile1:
reader1 = csv.reader(csvfile1)
for i, rows in enumerate(reader1):
if i > 0 and i <= train_data_size:
row = rows
row = str_list_to_float_list(row)
train_data_p_or.append(row)
elif i > train_data_size and i <= 106820:
row = rows
row = str_list_to_float_list(row)
test_data_p_or.append(row)
with open("data/v_or.csv", "r", newline="") as csvfile1:
reader1 = csv.reader(csvfile1)
for i, rows in enumerate(reader1):
if i > 0 and i <= train_data_size:
row = rows
row = str_list_to_float_list(row)
train_data_v_or.append(row)
elif i > train_data_size and i <= 106820:
row = rows
row = str_list_to_float_list(row)
test_data_v_or.append(row)
with open("data/q_or.csv", "r", newline="") as csvfile1:
reader1 = csv.reader(csvfile1)
for i, rows in enumerate(reader1):
if i > 0 and i <= train_data_size:
row = rows
row = str_list_to_float_list(row)
train_data_q_or.append(row)
elif i > train_data_size and i <= 106820:
row = rows
row = str_list_to_float_list(row)
test_data_q_or.append(row)
with open("data/rho.csv", "r", newline="") as csvfile1:
reader1 = csv.reader(csvfile1)
for i, rows in enumerate(reader1):
if i > 0 and i <= train_data_size:
row = rows
row = str_list_to_float_list(row)
train_data_rho.append(row)
elif i>train_data_size and i<=106820:
row = rows
row = str_list_to_float_list(row)
test_data_rho.append(row)
with open("data/grid_loss.csv", "r", newline="") as csvfile1:
reader1 = csv.reader(csvfile1)
for i, rows in enumerate(reader1):
if i > 0 and i <= train_data_size:
row = rows
row = str_list_to_float_list(row)
train_data_grid_loss.append(row)
elif i>train_data_size and i<=106820:
row = rows
row = str_list_to_float_list(row)
test_data_grid_loss.append(row)
with open("data/load_p.csv", "r", newline="") as csvfile1:
reader1 = csv.reader(csvfile1)
for i, rows in enumerate(reader1):
if i > 0 and i <= train_data_size:
row = rows
row = str_list_to_float_list(row)
train_data_load_p.append(row)
elif i>train_data_size and i<=106820:
row = rows
row = str_list_to_float_list(row)
test_data_load_p.append(row)
with open("data/load_q.csv", "r", newline="") as csvfile2:
reader2 = csv.reader(csvfile2)
for i, rows in enumerate(reader2):
if i > 0 and i <= train_data_size:
row = rows
row = str_list_to_float_list(row)
train_data_load_q.append(row)
elif i>train_data_size and i<=106820:
row = rows
row = str_list_to_float_list(row)
test_data_load_q.append(row)
with open("data/max_renewable_gen_p.csv", "r", newline="") as csvfile3:
reader3 = csv.reader(csvfile3)
for i, rows in enumerate(reader3):
if i > 0 and i <= train_data_size:
row = rows
row = str_list_to_float_list(row)
train_data_max_p.append(row)
elif i>train_data_size and i<=106820:
row = rows
row = str_list_to_float_list(row)
test_data_max_p.append(row)
with open("data/gen_q.csv", "r", newline="") as csvfile4:
reader4 = csv.reader(csvfile4)
for i, rows in enumerate(reader4):
if i > 0 and i <= train_data_size:
row = rows
row = str_list_to_float_list(row)
train_data_gen_q.append(row)
elif i>train_data_size and i<=106820:
row = rows
row = str_list_to_float_list(row)
test_data_gen_q.append(row)
with open("data/gen_p.csv", "r", newline="") as csvfile4:
reader4 = csv.reader(csvfile4)
for i, rows in enumerate(reader4):
if i > 0 and i <= train_data_size:
row = rows
row = str_list_to_float_list(row)
train_data_gen_p.append(row)
elif i>train_data_size and i<=106820:
row = rows
row = str_list_to_float_list(row)
test_data_gen_p.append(row)
# print("%%%%",np.array(test_data_load_p).shape)
# print("%%%%",np.array(test_data_load_q).shape)
# print("%%%%",np.array(test_data_max_p).shape
# print("%%%%",np.array(test_data_gen_q).shape)
if format == 1:
row_y = []
for i in range(10):
row_y += [0 for _ in range(18)]
for i in range(0,len(train_data_gen_q)-10):
temp = train_data_a_ex[i] +train_data_p_ex[i] +train_data_q_ex[i] +train_data_v_ex[i] +train_data_p_or[i]+train_data_q_or[i]+train_data_v_or[i]+train_data_rho[i] +train_data_grid_loss[i]+train_data_load_p[i] + train_data_load_q[i] \
+ row_y + train_data_gen_q[i]
train_data1.append(temp)
train_data2.append(train_data_gen_p[i])
# print("***",np.array(train_data).shape)
for i in range(0,len(test_data_gen_q)-10):
temp = test_data_a_ex[i] +test_data_p_ex[i] +test_data_q_ex[i] +test_data_v_ex[i] +test_data_p_or[i]+test_data_q_or[i]+test_data_v_or[i]+test_data_rho[i] +test_data_grid_loss[i]+test_data_load_p[i] + test_data_load_q[i]\
+ row_y + test_data_gen_q[i]
test_data1.append(temp)
test_data2.append(test_data_gen_p[i])
elif format==0 or format==2:
for i in range(0,len(train_data_gen_q)-10):
temp = train_data_a_ex[i] +train_data_p_ex[i] +train_data_q_ex[i] +train_data_v_ex[i] +train_data_p_or[i]+train_data_q_or[i]+train_data_v_or[i]+train_data_rho[i] +train_data_grid_loss[i]+train_data_load_p[i] + train_data_load_q[i] \
+ train_data_max_p[i] + train_data_max_p[i+1] + train_data_max_p[i+2] + train_data_max_p[i+3] + train_data_max_p[i+4] + train_data_max_p[i+5] + train_data_max_p[i+6] + train_data_max_p[i+7] + train_data_max_p[i+8] + train_data_max_p[i+9] + train_data_gen_q[i]
train_data1.append(temp)
train_data2.append(train_data_gen_p[i])
# print("***",np.array(train_data).shape)
for i in range(0,len(test_data_gen_q)-10):
temp = test_data_a_ex[i] +test_data_p_ex[i] +test_data_q_ex[i] +test_data_v_ex[i] +test_data_p_or[i]+test_data_q_or[i]+test_data_v_or[i]+test_data_rho[i] +test_data_grid_loss[i]+test_data_load_p[i] + test_data_load_q[i]\
+ test_data_max_p[i] + test_data_max_p[i] + test_data_max_p[i+1] + test_data_max_p[i+2] + test_data_max_p[i+3] + test_data_max_p[i+4] + test_data_max_p[i+5] + test_data_max_p[i+6] + test_data_max_p[i+7] + test_data_max_p[i+8] + test_data_max_p[i+9] + test_data_gen_q[i]
test_data1.append(temp)
test_data2.append(test_data_gen_p[i])
# print("***",np.array(test_data).shape)
elif format==3:
for i in range(0,len(train_data_gen_q)-10):
row_y = []
row_y += train_data_max_p[i]
for i in range(9):
row_y += [0 for _ in range(18)]
temp = train_data_a_ex[i] +train_data_p_ex[i] +train_data_q_ex[i] +train_data_v_ex[i] +train_data_p_or[i]+train_data_q_or[i]+train_data_v_or[i]+train_data_rho[i] +train_data_grid_loss[i]+train_data_load_p[i] + train_data_load_q[i] \
+ row_y + train_data_gen_q[i]
train_data1.append(temp)
train_data2.append(train_data_gen_p[i])
# print("***",np.array(train_data).shape)
for i in range(0,len(test_data_gen_q)-10):
row_y = []
row_y += test_data_gen_p[i]
for i in range(9):
row_y += [0 for _ in range(18)]
temp = test_data_a_ex[i] +test_data_p_ex[i] +test_data_q_ex[i] +test_data_v_ex[i] +test_data_p_or[i]+test_data_q_or[i]+test_data_v_or[i]+test_data_rho[i] +test_data_grid_loss[i]+test_data_load_p[i] + test_data_load_q[i]\
+ row_y + test_data_gen_q[i]
test_data1.append(temp)
test_data2.append(test_data_gen_p[i])
with open("data/reward.csv", "r", newline="") as csvfile:
reader = csv.reader(csvfile)
for i, rows in enumerate(reader):
if i <= train_data_size-10 and i > 0:
row = rows
row = str_list_to_float_list(row)
train_target.append(row)
elif i>train_data_size and i<=106810:
row = rows
row = str_list_to_float_list(row)
test_target.append(row)
# print('train_target:', np.array(train_target).shape)
# print('test_target:', np.array(test_target).shape)
return train_data1,train_data2,train_target,test_data1,test_data2,test_target
def fit(epoch,model,data_loader,optimizer,phase='training',volatile=False):
if phase == "training": # 判断当前是训练还是验证
model.train()
if phase == "validation":
model.eval()
volatile=True
all_loss=0.0
for batch_idx,(data1_,data2_,target_) in enumerate(data_loader):
# print('data[i]',data[i])
data1,data2, target = data1_.cuda(),data2_.cuda(), target_.cuda() # 使用cuda加速
#target.argmax(1)
#data, target = Variable(data, volatile), Variable(target)
if phase == 'training':
optimizer.zero_grad() # 重置梯度
output = model(data1,data2) # 得出预测结果
loss = torch.nn.functional.mse_loss(output, target) # 计算损失值
all_loss+=torch.nn.functional.mse_loss(output, target,size_average=False).item()
if phase == 'training':
loss.backward()
optimizer.step()
print("Training critic, epoch:",epoch,"----",phase,"loss:",all_loss/len(data_loader.dataset))
return all_loss/len(data_loader.dataset)
def supervised_train_critic(model=None, phase='training', format=2, exp_name="pre_training_critic"):
if model is not None:
if os.path.exists('train_model_c_10.pth'):
model = torch.load('train_model_c_10.pth')
torch.save(model.state_dict(), "train_model_r_all_10.pth")
else:
model=Agent_Supervised_Critic()
model.cuda(0)
else:
model.to("cuda")
print("**********Reading the dataset************")
train_data1,train_data2,train_target,test_data1,test_data2,test_target=dataset(1, format=format)
train_dataset = Data.TensorDataset(torch.Tensor(train_data1), torch.Tensor(train_data2),torch.Tensor(train_target))
# 把 dataset 放入 DataLoader
train_loader = Data.DataLoader(
dataset=train_dataset, # torch TensorDataset format
batch_size=BATCH_SIZE, # mini batch size
shuffle=True, # 要不要打乱数据 (打乱比较好)
num_workers=3, # 多线程来读数据
)
optimizer = torch.optim.Adam(model.parameters(), lr=LR)
StepLR = torch.optim.lr_scheduler.StepLR(optimizer, step_size=200, gamma=0.9)
train_losses, train_accuracy = [], []
val_losses, val_accuracy = [], []
with open(os.path.join('best_model', exp_name, "pre_training_critic_loss.csv"),"w",newline="") as csvfile:
writer = csv.writer(csvfile)
writer.writerow(["epoch","loss","accuracy"])
# with open(os.path.join('best_model_10', exp_name, "test_result_c_10.csv"),"w",newline="") as csvfile:
# writer = csv.writer(csvfile)
# writer.writerow(["epoch","loss","accuracy"])
for epoch in range(0, epoch_num):
epoch_loss = fit(epoch, model, train_loader,optimizer, phase='training')
StepLR.step()
if epoch %10==0:
torch.save(model, os.path.join('best_model', exp_name, "train_model_c_10.pth"))
torch.save(model.state_dict(), os.path.join('best_model', exp_name, "train_model_r_all_10.pth"))
with open(os.path.join('best_model', exp_name, "pre_training_critic_loss.csv"),"a+",newline="") as csvfile:
writer = csv.writer(csvfile)
writer.writerow([epoch,epoch_loss])
train_losses.append(epoch_loss)
print("current_lr: %s" % (optimizer.state_dict()['param_groups'][0]['lr']))
if epoch > 100 or epoch_loss <= 0.005:
print("Complicate the training of critic!")
return True
if __name__ == '__main__':
result = supervised_train_critic()
| 17,230 | 39.735225 | 285 | py |
Conformer-RLpatching | Conformer-RLpatching-main/RLpatching/actor_pre_training/actor_pre_training.py | import csv
import random
import torch.utils.data as Data
import numpy
import numpy as np
import torch
import torch.nn.functional as F
import torch.nn as nn
from collections import OrderedDict
from torch import optim
from torch.autograd import Variable
import os
import sys
sys.path.append(r'~/lixinhang/RLpatching/')
BATCH_SIZE=256
LR = 0.001
epoch_num = 100000000000000
class Agent_Supervised_Actor(torch.nn.Module):
def __init__(self):
super(Agent_Supervised_Actor,self).__init__()
self.fc_net = nn.Sequential(
OrderedDict([
('dense1', nn.Linear(1897,1024)),
('norm1', nn.BatchNorm1d(1024)),
('relu1', nn.ELU()),
('dense2', nn.Linear(1024,512)),
('norm2', nn.BatchNorm1d(512)),
('relu2', nn.ELU()),
("dense3", nn.Linear(512, 256)),
("norm3", nn.BatchNorm1d(256)),
("relu3", nn.ELU()),
("dense4", nn.Linear(256, 128)),
("norm4", nn.BatchNorm1d(128)),
("relu4", nn.ELU()),
("dense5", nn.Linear(128,54))
])
)
def forward(self,x):
if len(x.size())==3:
x = torch.squeeze(x,1)
return self.fc_net(x)
def str_list_to_float_list(str_list):
n = 0
while n < len(str_list):
str_list[n] = float(str_list[n])
n += 1
return(str_list)
def dataset(train_data, format=2):
train_data_size = 106820 * train_data
train_data = []
test_data=[]
train_target = []
test_target=[]
train_data_a_ex =[]
train_data_p_ex =[]
train_data_v_ex =[]
train_data_q_ex =[]
train_data_p_or=[]
train_data_v_or = []
train_data_q_or = []
train_data_rho =[]
train_data_grid_loss =[]
train_data_load_p =[]
train_data_load_q =[]
train_data_max_p =[]
train_data_gen_q =[]
test_data_a_ex =[]
test_data_p_ex =[]
test_data_v_ex =[]
test_data_q_ex =[]
test_data_p_or = []
test_data_v_or = []
test_data_q_or = []
test_data_rho =[]
test_data_grid_loss =[]
test_data_load_p =[]
test_data_load_q =[]
test_data_max_p =[]
test_data_gen_q =[]
with open("data/a_ex.csv", "r", newline="") as csvfile1:
reader1 = csv.reader(csvfile1)
for i, rows in enumerate(reader1):
if i > 0 and i <= train_data_size:
row = rows
row = str_list_to_float_list(row)
train_data_a_ex.append(row)
elif i>train_data_size and i<=106820:
row = rows
row = str_list_to_float_list(row)
test_data_a_ex.append(row)
with open("data/p_ex.csv", "r", newline="") as csvfile1:
reader1 = csv.reader(csvfile1)
for i, rows in enumerate(reader1):
if i > 0 and i <= train_data_size:
row = rows
row = str_list_to_float_list(row)
train_data_p_ex.append(row)
elif i > train_data_size and i <= 106820:
row = rows
row = str_list_to_float_list(row)
test_data_p_ex.append(row)
with open("data/v_ex.csv", "r", newline="") as csvfile1:
reader1 = csv.reader(csvfile1)
for i, rows in enumerate(reader1):
if i > 0 and i <= train_data_size:
row = rows
row = str_list_to_float_list(row)
train_data_v_ex.append(row)
elif i > train_data_size and i <= 106820:
row = rows
row = str_list_to_float_list(row)
test_data_v_ex.append(row)
with open("data/q_ex.csv", "r", newline="") as csvfile1:
reader1 = csv.reader(csvfile1)
for i, rows in enumerate(reader1):
if i > 0 and i <= train_data_size:
row = rows
row = str_list_to_float_list(row)
train_data_q_ex.append(row)
elif i > train_data_size and i <= 106820:
row = rows
row = str_list_to_float_list(row)
test_data_q_ex.append(row)
with open("data/p_or.csv", "r", newline="") as csvfile1:
reader1 = csv.reader(csvfile1)
for i, rows in enumerate(reader1):
if i > 0 and i <= train_data_size:
row = rows
row = str_list_to_float_list(row)
train_data_p_or.append(row)
elif i > train_data_size and i <= 106820:
row = rows
row = str_list_to_float_list(row)
test_data_p_or.append(row)
with open("data/v_or.csv", "r", newline="") as csvfile1:
reader1 = csv.reader(csvfile1)
for i, rows in enumerate(reader1):
if i > 0 and i <= train_data_size:
row = rows
row = str_list_to_float_list(row)
train_data_v_or.append(row)
elif i > train_data_size and i <= 106820:
row = rows
row = str_list_to_float_list(row)
test_data_v_or.append(row)
with open("data/q_or.csv", "r", newline="") as csvfile1:
reader1 = csv.reader(csvfile1)
for i, rows in enumerate(reader1):
if i > 0 and i <= train_data_size:
row = rows
row = str_list_to_float_list(row)
train_data_q_or.append(row)
elif i > train_data_size and i <= 106820:
row = rows
row = str_list_to_float_list(row)
test_data_q_or.append(row)
with open("data/rho.csv", "r", newline="") as csvfile1:
reader1 = csv.reader(csvfile1)
for i, rows in enumerate(reader1):
if i > 0 and i <= train_data_size:
row = rows
row = str_list_to_float_list(row)
train_data_rho.append(row)
elif i>train_data_size and i<=106820:
row = rows
row = str_list_to_float_list(row)
test_data_rho.append(row)
with open("data/grid_loss.csv", "r", newline="") as csvfile1:
reader1 = csv.reader(csvfile1)
for i, rows in enumerate(reader1):
if i > 0 and i <= train_data_size:
row = rows
row = str_list_to_float_list(row)
train_data_grid_loss.append(row)
elif i>train_data_size and i<=106820:
row = rows
row = str_list_to_float_list(row)
test_data_grid_loss.append(row)
with open("data/load_p.csv", "r", newline="") as csvfile1:
reader1 = csv.reader(csvfile1)
for i, rows in enumerate(reader1):
if i > 0 and i <= train_data_size:
row = rows
row = str_list_to_float_list(row)
train_data_load_p.append(row)
elif i>train_data_size and i<=106820:
row = rows
row = str_list_to_float_list(row)
test_data_load_p.append(row)
with open("data/load_q.csv", "r", newline="") as csvfile2:
reader2 = csv.reader(csvfile2)
for i, rows in enumerate(reader2):
if i > 0 and i <= train_data_size:
row = rows
row = str_list_to_float_list(row)
train_data_load_q.append(row)
elif i>train_data_size and i<=106820:
row = rows
row = str_list_to_float_list(row)
test_data_load_q.append(row)
with open("data/max_renewable_gen_p.csv", "r", newline="") as csvfile3:
reader3 = csv.reader(csvfile3)
for i, rows in enumerate(reader3):
if i > 0 and i <= train_data_size:
row = rows
row = str_list_to_float_list(row)
train_data_max_p.append(row)
elif i>train_data_size and i<=106820:
row = rows
row = str_list_to_float_list(row)
test_data_max_p.append(row)
with open("data/gen_q.csv", "r", newline="") as csvfile4:
reader4 = csv.reader(csvfile4)
for i, rows in enumerate(reader4):
if i > 0 and i <= train_data_size:
row = rows
row = str_list_to_float_list(row)
train_data_gen_q.append(row)
elif i>train_data_size and i<=106820:
row = rows
row = str_list_to_float_list(row)
test_data_gen_q.append(row)
# print("%%%%",np.array(test_data_load_p).shape)
# print("%%%%",np.array(test_data_load_q).shape)
# print("%%%%",np.array(test_data_max_p).shape
# print("%%%%",np.array(test_data_gen_q).shape)
if format == 1:
row_y = []
for i in range(10):
row_y += [0 for _ in range(18)]
for i in range(0,len(train_data_gen_q)-10):
temp = train_data_a_ex[i] +train_data_p_ex[i] +train_data_q_ex[i] +train_data_v_ex[i] +train_data_p_or[i]+train_data_q_or[i]+train_data_v_or[i]+train_data_rho[i] +train_data_grid_loss[i]+train_data_load_p[i] + train_data_load_q[i]\
+ row_y + train_data_gen_q[i]
train_data.append(temp)
# print("***",np.array(train_data).shape)
for i in range(0,len(test_data_gen_q)-10):
temp = test_data_a_ex[i] +test_data_p_ex[i] +test_data_q_ex[i] +test_data_v_ex[i] +test_data_p_or[i]+test_data_q_or[i]+test_data_v_or[i]+test_data_rho[i] +test_data_grid_loss[i]+test_data_load_p[i] + test_data_load_q[i]\
+ row_y + test_data_gen_q[i]
test_data.append(temp)
# print("***",np.array(test_data).shape)
elif format==0 or format==2:
for i in range(0,len(train_data_gen_q)-10):
temp = train_data_a_ex[i] +train_data_p_ex[i] +train_data_q_ex[i] +train_data_v_ex[i] +train_data_p_or[i]+train_data_q_or[i]+train_data_v_or[i]+train_data_rho[i] +train_data_grid_loss[i]+train_data_load_p[i] + train_data_load_q[i]\
+ train_data_max_p[i] + train_data_max_p[i+1] + train_data_max_p[i+2] + train_data_max_p[i+3] + train_data_max_p[i+4] + train_data_max_p[i+5] + train_data_max_p[i+6] + train_data_max_p[i+7] + train_data_max_p[i+8] + train_data_max_p[i+9] + train_data_gen_q[i]
train_data.append(temp)
# print("***",np.array(train_data).shape)
for i in range(0,len(test_data_gen_q)-10):
temp = test_data_a_ex[i] +test_data_p_ex[i] +test_data_q_ex[i] +test_data_v_ex[i] +test_data_p_or[i]+test_data_q_or[i]+test_data_v_or[i]+test_data_rho[i] +test_data_grid_loss[i]+test_data_load_p[i] + test_data_load_q[i]\
+ test_data_max_p[i] + test_data_max_p[i+1] + test_data_max_p[i+2] + test_data_max_p[i+3] + test_data_max_p[i+4] + test_data_max_p[i+5] + test_data_max_p[i+6] + test_data_max_p[i+7] + test_data_max_p[i+8] + test_data_max_p[i+9] + test_data_gen_q[i]
test_data.append(temp)
# print("***",np.array(test_data).shape)
elif format==3:
for i in range(0,len(train_data_gen_q)-10):
row_y = []
row_y += train_data_max_p[i]
for i in range(9):
row_y += [0 for _ in range(18)]
temp = train_data_a_ex[i] +train_data_p_ex[i] +train_data_q_ex[i] +train_data_v_ex[i] +train_data_p_or[i]+train_data_q_or[i]+train_data_v_or[i]+train_data_rho[i] +train_data_grid_loss[i]+train_data_load_p[i] + train_data_load_q[i]\
+ row_y_train + train_data_gen_q[i]
train_data.append(temp)
# print("***",np.array(train_data).shape)
for i in range(0,len(test_data_gen_q)-10):
row_y = []
row_y += test_data_max_p[i]
for i in range(9):
row_y += [0 for _ in range(18)]
temp = test_data_a_ex[i] +test_data_p_ex[i] +test_data_q_ex[i] +test_data_v_ex[i] +test_data_p_or[i]+test_data_q_or[i]+test_data_v_or[i]+test_data_rho[i] +test_data_grid_loss[i]+test_data_load_p[i] + test_data_load_q[i]\
+ row_y_test + test_data_gen_q[i]
test_data.append(temp)
# print("***",np.array(test_data).shape)
with open("data/gen_p.csv", "r", newline="") as csvfile:
reader = csv.reader(csvfile)
for i, rows in enumerate(reader):
if i <= train_data_size-10 and i > 0:
row = rows
row = str_list_to_float_list(row)
train_target.append(row)
elif i>train_data_size and i<=106810:
row = rows
row = str_list_to_float_list(row)
test_target.append(row)
# print('train_target:', np.array(train_target).shape)
# print('test_target:', np.array(test_target).shape)
return train_data,train_target,test_data,test_target
def fit(epoch,model,data_loader,optimizer,phase='training',volatile=False):
if phase == "training": # 判断当前是训练还是验证
model.train()
if phase == "validation":
model.eval()
volatile=True
all_loss=0.0
for batch_idx,(data_,target_) in enumerate(data_loader):
# print('data[i]',data[i])
data, target = data_.cuda(), target_.cuda() # 使用cuda加速
#target.argmax(1)
#data, target = Variable(data, volatile), Variable(target)
if phase == 'training':
optimizer.zero_grad() # 重置梯度
output = model(data) # 得出预测结果
loss = torch.nn.functional.mse_loss(output, target) # 计算损失值
all_loss+=torch.nn.functional.mse_loss(output, target,size_average=False).item()
if phase == 'training':
loss.backward()
optimizer.step()
print("Training actor, epoch:",epoch,"----",phase,"loss:",all_loss/len(data_loader.dataset))
return all_loss/len(data_loader.dataset)
def supervised_train_actor(model=None, phase='training', format=2, exp_name="pre_training_actor"):
if model is not None:
if os.path.exists('train_model_a_10.pth'):
model = torch.load('train_model_a_10.pth')
torch.save(model.state_dict(), "train_model_p_all_10.pth")
else:
model=Agent_Supervised_Actor()
model.cuda(0)
else:
model.to("cuda")
print("**********Reading the dataset************")
train_data,train_target,test_data,test_target=dataset(1, format=format)
train_dataset = Data.TensorDataset(torch.Tensor(train_data), torch.Tensor(train_target))
# 把 dataset 放入 DataLoader
train_loader = Data.DataLoader(
dataset=train_dataset, # torch TensorDataset format
batch_size=BATCH_SIZE, # mini batch size
shuffle=True, # 要不要打乱数据 (打乱比较好)
num_workers=2, # 多线程来读数据
)
optimizer = torch.optim.Adam(model.parameters(), lr=LR)
StepLR = torch.optim.lr_scheduler.StepLR(optimizer, step_size=200, gamma=0.9)
train_losses, train_accuracy = [], []
val_losses, val_accuracy = [], []
with open(os.path.join('best_model', exp_name, "pre_training_actor_loss.csv"),"w",newline="") as csvfile:
writer = csv.writer(csvfile)
writer.writerow(["epoch","loss"])
for epoch in range(0, epoch_num):
epoch_loss = fit(epoch, model, train_loader, optimizer, phase='training')
StepLR.step()
if epoch %10==0:
torch.save(model, os.path.join('best_model', exp_name, "train_model_a_10.pth"))
torch.save(model.state_dict(), os.path.join('best_model', exp_name, "train_model_p_all_10.pth"))
with open(os.path.join('best_model', exp_name, "pre_training_actor_loss.csv"),"a+",newline="") as csvfile:
writer = csv.writer(csvfile)
writer.writerow([epoch,epoch_loss])
train_losses.append(epoch_loss)
print("current_lr: %s" % (optimizer.state_dict()['param_groups'][0]['lr']))
if epoch > 1000 or epoch_loss <= 20:
print("Complicate the training of actor!")
return True
if __name__ == '__main__':
result = supervised_train_actor()
| 15,929 | 38.626866 | 275 | py |
Conformer-RLpatching | Conformer-RLpatching-main/Conformer/Conformer.py | import argparse
import math
import numpy as np
import torch
import exp.exp_informer as EI
from utils.metrics import metric,MSE,RMSE
parser = argparse.ArgumentParser(description='[Informer] Long Sequences Forecasting')
parser.add_argument('--model', type=str, required=False, default='Informer',help='model of experiment, options: [informer, informerstack, informerlight(TBD)]')
parser.add_argument('--data', type=str, required=False, default='renewable_gen_p_max', help='data')
parser.add_argument('--root_path', type=str, default='/root/lixinhang/Conformer/data/renewable_gen_p_max/', help='root path of the data file')
parser.add_argument('--data_path', type=str, default='renewable_gen_p_max.csv', help='data file')
parser.add_argument('--features', type=str, default='M', help='forecasting task, options:[M, S, MS]; M:multivariate predict multivariate, S:univariate predict univariate, MS:multivariate predict univariate')
parser.add_argument('--target', type=str, default='OT', help='target feature in S or MS task')
parser.add_argument('--freq', type=str, default='h', help='freq for time features encoding, options:[s:secondly, t:minutely, h:hourly, d:daily, b:business days, w:weekly, m:monthly], you can also use more detailed freq like 15min or 3h')
parser.add_argument('--checkpoints', type=str, default='/root/lixinhang/Conformer/checkpoints/', help='location of model checkpoints')
parser.add_argument('--seq_len', type=int, default=56, help='input sequence length of Informer encoder')
parser.add_argument('--label_len', type=int, default=24, help='start token length of Informer decoder')
parser.add_argument('--pred_len', type=int, default=15, help='prediction sequence length')
# Informer decoder input: concat[start token series(label_len), zero padding series(pred_len)]
parser.add_argument('--enc_in', type=int, default=18, help='encoder input size')
parser.add_argument('--dec_in', type=int, default=18, help='decoder input size')
parser.add_argument('--c_out', type=int, default=18, help='output size')
parser.add_argument('--d_model', type=int, default=2048, help='dimension of model')
parser.add_argument('--n_heads', type=int, default=128, help='num of heads')
parser.add_argument('--e_layers', type=int, default=2, help='num of encoder layers')
parser.add_argument('--d_layers', type=int, default=1, help='num of decoder layers')
parser.add_argument('--s_layers', type=str, default='15,14,13,12,11,10,9,8,7,6,5,4,3,2,1', help='num of stack encoder layers')
parser.add_argument('--d_ff', type=int, default=4096, help='dimension of fcn')
parser.add_argument('--factor', type=int, default=5, help='probsparse attn factor')
parser.add_argument('--padding', type=int, default=0, help='padding type')
parser.add_argument('--distil', action='store_false', help='whether to use distilling in encoder, using this argument means not using distilling', default=True)
parser.add_argument('--dropout', type=float, default=0.05, help='dropout')
parser.add_argument('--attn', type=str, default='prob', help='attention used in encoder, options:[prob, full]')
parser.add_argument('--embed', type=str, default='timeF', help='time features encoding, options:[timeF, fixed, learned]')
parser.add_argument('--activation', type=str, default='gelu',help='activation')
parser.add_argument('--output_attention', action='store_true', help='whether to output attention in ecoder')
parser.add_argument('--do_predict', action='store_true', help='whether to predict unseen future data')
parser.add_argument('--mix', action='store_false', help='use mix attention in generative decoder', default=True)
parser.add_argument('--cols', type=str, nargs='+', help='certain cols from the data files as the input features')
parser.add_argument('--num_workers', type=int, default=0, help='data loader num workers')
parser.add_argument('--itr', type=int, default=2, help='experiments times')
parser.add_argument('--train_epochs', type=int, default=500, help='train epochs')
parser.add_argument('--batch_size', type=int, default=128, help='batch size of train input data')
parser.add_argument('--patience', type=int, default=3, help='early stopping patience')
parser.add_argument('--learning_rate', type=float, default=0.00001, help='optimizer learning rate')
parser.add_argument('--des', type=str, default='test',help='exp description')
parser.add_argument('--loss', type=str, default='mse',help='loss function')
parser.add_argument('--lradj', type=str, default='type1',help='adjust learning rate')
parser.add_argument('--use_amp', action='store_true', help='use automatic mixed precision training', default=False)
parser.add_argument('--inverse', action='store_true', help='inverse output data', default=False)
parser.add_argument('--use_gpu', type=bool, default=True, help='use gpu')
parser.add_argument('--gpu', type=int, default=0, help='gpu')
parser.add_argument('--use_multi_gpu', action='store_true', help='use multiple gpus', default=False)
parser.add_argument('--devices', type=str, default='0,1,2,3',help='device ids of multile gpus')
args = parser.parse_args()
args.use_gpu = True if torch.cuda.is_available() and args.use_gpu else False
#args.use_gpu =False
if args.use_gpu and args.use_multi_gpu:
args.devices = args.devices.replace(' ','')
device_ids = args.devices.split(',')
args.device_ids = [int(id_) for id_ in device_ids]
args.gpu = args.device_ids[0]
data_parser = {
'renewable_gen_p_max':{'data':'renewable_gen_p_max.csv','T':'OT','M':[18,18,18],'S':[1,1,1],'MS':[18,18,1]}
}
if args.data in data_parser.keys():
data_info = data_parser[args.data]
args.data_path = data_info['data']
args.target = data_info['T']
args.enc_in, args.dec_in, args.c_out = data_info[args.features]
args.s_layers = [int(s_l) for s_l in args.s_layers.replace(' ','').split(',')]
args.detail_freq = args.freq
args.freq = args.freq[-1:]
setting = '{}_{}_ft{}_sl{}_ll{}_pl{}_dm{}_nh{}_el{}_dl{}_df{}_at{}_fc{}_eb{}_dt{}_mx{}_{}_{}'.format(args.model, args.data, args.features,
args.seq_len, args.label_len, args.pred_len,
args.d_model, args.n_heads, args.e_layers, args.d_layers, args.d_ff, args.attn, args.factor,
args.embed, args.distil, args.mix, args.des, 0)
class Conformer():
def __init__(self,start_index):
self.storage_pool_predict=[]
self.storage_pool_true = []
self.exp=EI.Exp_Informer(args)
self.start=start_index
self.out_length = 10
def pre(self,t):
t_current = t - self.start
preds, trues = self.exp.predict(setting, t,load=True)
self.store_pred(preds)
self.store_true(trues)
true_tensteps=[]
for i in range(self.out_length):
true_tensteps.append(trues[i])
if t_current < 5:
pred_final = []
for i in range(self.out_length):
pred_final.append(preds[i])
return pred_final
else:
pred_former = []
true_former = []
for i in range(5):
idx = len(self.storage_pool_predict)-2-i
pred_former.append(self.storage_pool_predict[idx])
true_former.append(self.storage_pool_true[idx])
con_weights = self.cal_confidence(pred_former, true_former)
pred_final = self.cal_pred_final(pred_former, con_weights,true_tensteps)
print("rmse", RMSE(np.array(true_tensteps), np.array(pred_final)))
return pred_final
def cal_confidence(self,pred_former,true_former):
m_list = []
n_list = []
for i in range(len(pred_former)):
m = 0
n = 0
for j in range(i+1):
mae, mse, rmse, mape, mspe = metric(pred_former[i][j], true_former[i][j])
if rmse <= 5:
m = m+1
else:
n = n+1
m_list.append(m)
n_list.append(n)
cr_list = []
for i in range(len(pred_former)):
cr = m_list[i]/(m_list[i]+n_list[i])
cr_list.append(cr)
cr_sum = 0
for i in range(len(cr_list)):
cr_sum = cr_sum + cr_list[i]
cred_weights = []
for i in range(len(pred_former)):
cw = cr_list[i]/cr_sum
cred_weights.append(cw)
return cred_weights
def cal_pred_final(self, pred_former, cred_weights,true_tensteps):
pred_select = []
for i in range(len(pred_former)):
pred_select_each = []
for j in range(self.out_length):
pred_select_each.append(pred_former[i][i+j+1])
pred_select.append(pred_select_each)
pred_select_weight = np.zeros((5,self.out_length,18))
for i in range(len(pred_select)):
for j in range(len(pred_select[i])):
for k in range(len(pred_select[i][j])):
pred_select_weight[i][j][k] = pred_select[i][j][k]*cred_weights[i]
pred_final=np.zeros((self.out_length,18))
for i in range(len(pred_select)):
pred_final=pred_final+np.array(pred_select_weight[i])
return pred_final
def store_pred(self, preds):
self.storage_pool_predict.append(preds)
def store_true(self, trues):
self.storage_pool_true.append(trues)
| 9,348 | 46.94359 | 237 | py |
Conformer-RLpatching | Conformer-RLpatching-main/Conformer/models/embed.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import math
class PositionalEmbedding(nn.Module):
def __init__(self, d_model, max_len=5000):
super(PositionalEmbedding, self).__init__()
# Compute the positional encodings once in log space.
pe = torch.zeros(max_len, d_model).float()
pe.require_grad = False
position = torch.arange(0, max_len).float().unsqueeze(1)
div_term = (torch.arange(0, d_model, 2).float() * -(math.log(10000.0) / d_model)).exp()
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0)
self.register_buffer('pe', pe)
def forward(self, x):
return self.pe[:, :x.size(1)]
class TokenEmbedding(nn.Module):
def __init__(self, c_in, d_model):
super(TokenEmbedding, self).__init__()
padding = 1 if torch.__version__>='1.5.0' else 2
self.tokenConv = nn.Conv1d(in_channels=c_in, out_channels=d_model,
kernel_size=3, padding=padding, padding_mode='circular')
for m in self.modules():
if isinstance(m, nn.Conv1d):
nn.init.kaiming_normal_(m.weight,mode='fan_in',nonlinearity='leaky_relu')
def forward(self, x):
x = self.tokenConv(x.permute(0, 2, 1)).transpose(1,2)
return x
class FixedEmbedding(nn.Module):
def __init__(self, c_in, d_model):
super(FixedEmbedding, self).__init__()
w = torch.zeros(c_in, d_model).float()
w.require_grad = False
position = torch.arange(0, c_in).float().unsqueeze(1)
div_term = (torch.arange(0, d_model, 2).float() * -(math.log(10000.0) / d_model)).exp()
w[:, 0::2] = torch.sin(position * div_term)
w[:, 1::2] = torch.cos(position * div_term)
self.emb = nn.Embedding(c_in, d_model)
self.emb.weight = nn.Parameter(w, requires_grad=False)
def forward(self, x):
return self.emb(x).detach()
class TemporalEmbedding(nn.Module):
def __init__(self, d_model, embed_type='fixed', freq='h'):
super(TemporalEmbedding, self).__init__()
minute_size = 4; hour_size = 24
weekday_size = 7; day_size = 32; month_size = 13
Embed = FixedEmbedding if embed_type=='fixed' else nn.Embedding
if freq=='t':
self.minute_embed = Embed(minute_size, d_model)
self.hour_embed = Embed(hour_size, d_model)
self.weekday_embed = Embed(weekday_size, d_model)
self.day_embed = Embed(day_size, d_model)
self.month_embed = Embed(month_size, d_model)
def forward(self, x):
x = x.long()
minute_x = self.minute_embed(x[:,:,4]) if hasattr(self, 'minute_embed') else 0.
hour_x = self.hour_embed(x[:,:,3])
weekday_x = self.weekday_embed(x[:,:,2])
day_x = self.day_embed(x[:,:,1])
month_x = self.month_embed(x[:,:,0])
return hour_x + weekday_x + day_x + month_x + minute_x
class TimeFeatureEmbedding(nn.Module):
def __init__(self, d_model, embed_type='timeF', freq='h'):
super(TimeFeatureEmbedding, self).__init__()
freq_map = {'h':4, 't':5, 's':6, 'm':1, 'a':1, 'w':2, 'd':3, 'b':3}
d_inp = freq_map[freq]
self.embed = nn.Linear(d_inp, d_model)
def forward(self, x):
return self.embed(x)
class DataEmbedding(nn.Module):
def __init__(self, c_in, d_model, embed_type='fixed', freq='h', dropout=0.1):
super(DataEmbedding, self).__init__()
self.value_embedding = TokenEmbedding(c_in=c_in, d_model=d_model)
self.position_embedding = PositionalEmbedding(d_model=d_model)
self.temporal_embedding = TemporalEmbedding(d_model=d_model, embed_type=embed_type, freq=freq) if embed_type!='timeF' else TimeFeatureEmbedding(d_model=d_model, embed_type=embed_type, freq=freq)
self.dropout = nn.Dropout(p=dropout)
def forward(self, x, x_mark):
x = self.value_embedding(x) + self.position_embedding(x) + self.temporal_embedding(x_mark)
return self.dropout(x) | 4,135 | 36.944954 | 202 | py |
Conformer-RLpatching | Conformer-RLpatching-main/Conformer/models/model.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from utils.masking import TriangularCausalMask, ProbMask
from models.encoder import Encoder, EncoderLayer, ConvLayer, EncoderStack
from models.decoder import Decoder, DecoderLayer
from models.attn import FullAttention, ProbAttention, AttentionLayer
from models.embed import DataEmbedding
class Informer(nn.Module):
def __init__(self, enc_in, dec_in, c_out, seq_len, label_len, out_len,
factor=5, d_model=512, n_heads=8, e_layers=3, d_layers=2, d_ff=512,
dropout=0.0, attn='prob', embed='fixed', freq='h', activation='gelu',
output_attention = False, distil=True, mix=True,
device=torch.device('cuda:0')):
super(Informer, self).__init__()
self.pred_len = out_len
self.attn = attn
self.output_attention = output_attention
# Encoding
self.enc_embedding = DataEmbedding(enc_in, d_model, embed, freq, dropout)
self.dec_embedding = DataEmbedding(dec_in, d_model, embed, freq, dropout)
# Attention
Attn = ProbAttention if attn=='prob' else FullAttention
# Encoder
self.encoder = Encoder(
[
EncoderLayer(
AttentionLayer(Attn(False, factor, attention_dropout=dropout, output_attention=output_attention),
d_model, n_heads, mix=False),
d_model,
d_ff,
dropout=dropout,
activation=activation
) for l in range(e_layers)
],
[
ConvLayer(
d_model
) for l in range(e_layers-1)
] if distil else None,
norm_layer=torch.nn.LayerNorm(d_model)
)
# Decoder
self.decoder = Decoder(
[
DecoderLayer(
AttentionLayer(Attn(True, factor, attention_dropout=dropout, output_attention=False),
d_model, n_heads, mix=mix),
AttentionLayer(FullAttention(False, factor, attention_dropout=dropout, output_attention=False),
d_model, n_heads, mix=False),
d_model,
d_ff,
dropout=dropout,
activation=activation,
)
for l in range(d_layers)
],
norm_layer=torch.nn.LayerNorm(d_model)
)
# self.end_conv1 = nn.Conv1d(in_channels=label_len+out_len, out_channels=out_len, kernel_size=1, bias=True)
# self.end_conv2 = nn.Conv1d(in_channels=d_model, out_channels=c_out, kernel_size=1, bias=True)
self.projection = nn.Linear(d_model, c_out, bias=True)
def forward(self, x_enc, x_mark_enc, x_dec, x_mark_dec,
enc_self_mask=None, dec_self_mask=None, dec_enc_mask=None):
enc_out = self.enc_embedding(x_enc, x_mark_enc)
enc_out, attns = self.encoder(enc_out, attn_mask=enc_self_mask)
dec_out = self.dec_embedding(x_dec, x_mark_dec)
dec_out = self.decoder(dec_out, enc_out, x_mask=dec_self_mask, cross_mask=dec_enc_mask)
dec_out = self.projection(dec_out)
# dec_out = self.end_conv1(dec_out)
# dec_out = self.end_conv2(dec_out.transpose(2,1)).transpose(1,2)
if self.output_attention:
return dec_out[:,-self.pred_len:,:], attns
else:
return dec_out[:,-self.pred_len:,:] # [B, L, D]
class InformerStack(nn.Module):
def __init__(self, enc_in, dec_in, c_out, seq_len, label_len, out_len,
factor=5, d_model=512, n_heads=8, e_layers=[3,2,1], d_layers=2, d_ff=512,
dropout=0.0, attn='prob', embed='fixed', freq='h', activation='gelu',
output_attention = False, distil=True, mix=True,
device=torch.device('cuda:0')):
super(InformerStack, self).__init__()
self.pred_len = out_len
self.attn = attn
self.output_attention = output_attention
# Encoding
self.enc_embedding = DataEmbedding(enc_in, d_model, embed, freq, dropout)
self.dec_embedding = DataEmbedding(dec_in, d_model, embed, freq, dropout)
# Attention
Attn = ProbAttention if attn=='prob' else FullAttention
# Encoder
inp_lens = list(range(len(e_layers))) # [0,1,2,...] you can customize here
encoders = [
Encoder(
[
EncoderLayer(
AttentionLayer(Attn(False, factor, attention_dropout=dropout, output_attention=output_attention),
d_model, n_heads, mix=False),
d_model,
d_ff,
dropout=dropout,
activation=activation
) for l in range(el)
],
[
ConvLayer(
d_model
) for l in range(el-1)
] if distil else None,
norm_layer=torch.nn.LayerNorm(d_model)
) for el in e_layers]
self.encoder = EncoderStack(encoders, inp_lens)
# Decoder
self.decoder = Decoder(
[
DecoderLayer(
AttentionLayer(Attn(True, factor, attention_dropout=dropout, output_attention=False),
d_model, n_heads, mix=mix),
AttentionLayer(FullAttention(False, factor, attention_dropout=dropout, output_attention=False),
d_model, n_heads, mix=False),
d_model,
d_ff,
dropout=dropout,
activation=activation,
)
for l in range(d_layers)
],
norm_layer=torch.nn.LayerNorm(d_model)
)
# self.end_conv1 = nn.Conv1d(in_channels=label_len+out_len, out_channels=out_len, kernel_size=1, bias=True)
# self.end_conv2 = nn.Conv1d(in_channels=d_model, out_channels=c_out, kernel_size=1, bias=True)
self.projection = nn.Linear(d_model, c_out, bias=True)
def forward(self, x_enc, x_mark_enc, x_dec, x_mark_dec,
enc_self_mask=None, dec_self_mask=None, dec_enc_mask=None):
enc_out = self.enc_embedding(x_enc, x_mark_enc)
enc_out, attns = self.encoder(enc_out, attn_mask=enc_self_mask)
dec_out = self.dec_embedding(x_dec, x_mark_dec)
dec_out = self.decoder(dec_out, enc_out, x_mask=dec_self_mask, cross_mask=dec_enc_mask)
dec_out = self.projection(dec_out)
# dec_out = self.end_conv1(dec_out)
# dec_out = self.end_conv2(dec_out.transpose(2,1)).transpose(1,2)
if self.output_attention:
return dec_out[:,-self.pred_len:,:], attns
else:
return dec_out[:,-self.pred_len:,:] # [B, L, D]
| 7,066 | 43.446541 | 122 | py |
Conformer-RLpatching | Conformer-RLpatching-main/Conformer/models/encoder.py | import torch
import torch.nn as nn
import torch.nn.functional as F
class ConvLayer(nn.Module):
def __init__(self, c_in):
super(ConvLayer, self).__init__()
padding = 1 if torch.__version__>='1.5.0' else 2
self.downConv = nn.Conv1d(in_channels=c_in,
out_channels=c_in,
kernel_size=3,
padding=padding,
padding_mode='circular')
self.norm = nn.BatchNorm1d(c_in)
self.activation = nn.ELU()
self.maxPool = nn.MaxPool1d(kernel_size=3, stride=2, padding=1)
def forward(self, x):
x = self.downConv(x.permute(0, 2, 1))
x = self.norm(x)
x = self.activation(x)
x = self.maxPool(x)
x = x.transpose(1,2)
return x
class EncoderLayer(nn.Module):
def __init__(self, attention, d_model, d_ff=None, dropout=0.1, activation="relu"):
super(EncoderLayer, self).__init__()
d_ff = d_ff or 4*d_model
self.attention = attention
self.conv1 = nn.Conv1d(in_channels=d_model, out_channels=d_ff, kernel_size=1)
self.conv2 = nn.Conv1d(in_channels=d_ff, out_channels=d_model, kernel_size=1)
self.norm1 = nn.LayerNorm(d_model)
self.norm2 = nn.LayerNorm(d_model)
self.dropout = nn.Dropout(dropout)
self.activation = F.relu if activation == "relu" else F.gelu
def forward(self, x, attn_mask=None):
# x [B, L, D]
# x = x + self.dropout(self.attention(
# x, x, x,
# attn_mask = attn_mask
# ))
new_x, attn = self.attention(
x, x, x,
attn_mask = attn_mask
)
x = x + self.dropout(new_x)
y = x = self.norm1(x)
y = self.dropout(self.activation(self.conv1(y.transpose(-1,1))))
y = self.dropout(self.conv2(y).transpose(-1,1))
return self.norm2(x+y), attn
class Encoder(nn.Module):
def __init__(self, attn_layers, conv_layers=None, norm_layer=None):
super(Encoder, self).__init__()
self.attn_layers = nn.ModuleList(attn_layers)
self.conv_layers = nn.ModuleList(conv_layers) if conv_layers is not None else None
self.norm = norm_layer
def forward(self, x, attn_mask=None):
# x [B, L, D]
attns = []
if self.conv_layers is not None:
for attn_layer, conv_layer in zip(self.attn_layers, self.conv_layers):
x, attn = attn_layer(x, attn_mask=attn_mask)
x = conv_layer(x)
attns.append(attn)
x, attn = self.attn_layers[-1](x, attn_mask=attn_mask)
attns.append(attn)
else:
for attn_layer in self.attn_layers:
x, attn = attn_layer(x, attn_mask=attn_mask)
attns.append(attn)
if self.norm is not None:
x = self.norm(x)
return x, attns
class EncoderStack(nn.Module):
def __init__(self, encoders, inp_lens):
super(EncoderStack, self).__init__()
self.encoders = nn.ModuleList(encoders)
self.inp_lens = inp_lens
def forward(self, x, attn_mask=None):
# x [B, L, D]
x_stack = []; attns = []
for i_len, encoder in zip(self.inp_lens, self.encoders):
inp_len = x.shape[1]//(2**i_len)
x_s, attn = encoder(x[:, -inp_len:, :])
x_stack.append(x_s); attns.append(attn)
x_stack = torch.cat(x_stack, -2)
return x_stack, attns
| 3,555 | 34.919192 | 90 | py |
Conformer-RLpatching | Conformer-RLpatching-main/Conformer/models/decoder.py | import torch
import torch.nn as nn
import torch.nn.functional as F
class DecoderLayer(nn.Module):
def __init__(self, self_attention, cross_attention, d_model, d_ff=None,
dropout=0.1, activation="relu"):
super(DecoderLayer, self).__init__()
d_ff = d_ff or 4*d_model
self.self_attention = self_attention
self.cross_attention = cross_attention
self.conv1 = nn.Conv1d(in_channels=d_model, out_channels=d_ff, kernel_size=1)
self.conv2 = nn.Conv1d(in_channels=d_ff, out_channels=d_model, kernel_size=1)
self.norm1 = nn.LayerNorm(d_model)
self.norm2 = nn.LayerNorm(d_model)
self.norm3 = nn.LayerNorm(d_model)
self.dropout = nn.Dropout(dropout)
self.activation = F.relu if activation == "relu" else F.gelu
def forward(self, x, cross, x_mask=None, cross_mask=None):
x = x + self.dropout(self.self_attention(
x, x, x,
attn_mask=x_mask
)[0])
x = self.norm1(x)
x = x + self.dropout(self.cross_attention(
x, cross, cross,
attn_mask=cross_mask
)[0])
y = x = self.norm2(x)
y = self.dropout(self.activation(self.conv1(y.transpose(-1,1))))
y = self.dropout(self.conv2(y).transpose(-1,1))
return self.norm3(x+y)
class Decoder(nn.Module):
def __init__(self, layers, norm_layer=None):
super(Decoder, self).__init__()
self.layers = nn.ModuleList(layers)
self.norm = norm_layer
def forward(self, x, cross, x_mask=None, cross_mask=None):
for layer in self.layers:
x = layer(x, cross, x_mask=x_mask, cross_mask=cross_mask)
if self.norm is not None:
x = self.norm(x)
return x | 1,772 | 33.764706 | 85 | py |
Conformer-RLpatching | Conformer-RLpatching-main/Conformer/models/attn.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from math import sqrt
from utils.masking import TriangularCausalMask, ProbMask
class FullAttention(nn.Module):
def __init__(self, mask_flag=True, factor=5, scale=None, attention_dropout=0.1, output_attention=False):
super(FullAttention, self).__init__()
self.scale = scale
self.mask_flag = mask_flag
self.output_attention = output_attention
self.dropout = nn.Dropout(attention_dropout)
def forward(self, queries, keys, values, attn_mask):
B, L, H, E = queries.shape
_, S, _, D = values.shape
scale = self.scale or 1./sqrt(E)
scores = torch.einsum("blhe,bshe->bhls", queries, keys)
if self.mask_flag:
if attn_mask is None:
attn_mask = TriangularCausalMask(B, L, device=queries.device)
scores.masked_fill_(attn_mask.mask, -np.inf)
A = self.dropout(torch.softmax(scale * scores, dim=-1))
V = torch.einsum("bhls,bshd->blhd", A, values)
if self.output_attention:
return (V.contiguous(), A)
else:
return (V.contiguous(), None)
class ProbAttention(nn.Module):
def __init__(self, mask_flag=True, factor=5, scale=None, attention_dropout=0.1, output_attention=False):
super(ProbAttention, self).__init__()
self.factor = factor
self.scale = scale
self.mask_flag = mask_flag
self.output_attention = output_attention
self.dropout = nn.Dropout(attention_dropout)
def _prob_QK(self, Q, K, sample_k, n_top): # n_top: c*ln(L_q)
# Q [B, H, L, D]
B, H, L_K, E = K.shape
_, _, L_Q, _ = Q.shape
# calculate the sampled Q_K
K_expand = K.unsqueeze(-3).expand(B, H, L_Q, L_K, E)
index_sample = torch.randint(L_K, (L_Q, sample_k)) # real U = U_part(factor*ln(L_k))*L_q
K_sample = K_expand[:, :, torch.arange(L_Q).unsqueeze(1), index_sample, :]
Q_K_sample = torch.matmul(Q.unsqueeze(-2), K_sample.transpose(-2, -1)).squeeze(-2)
# find the Top_k query with sparisty measurement
M = Q_K_sample.max(-1)[0] - torch.div(Q_K_sample.sum(-1), L_K)
M_top = M.topk(n_top, sorted=False)[1]
# use the reduced Q to calculate Q_K
Q_reduce = Q[torch.arange(B)[:, None, None],
torch.arange(H)[None, :, None],
M_top, :] # factor*ln(L_q)
Q_K = torch.matmul(Q_reduce, K.transpose(-2, -1)) # factor*ln(L_q)*L_k
return Q_K, M_top
def _get_initial_context(self, V, L_Q):
B, H, L_V, D = V.shape
if not self.mask_flag:
# V_sum = V.sum(dim=-2)
V_sum = V.mean(dim=-2)
contex = V_sum.unsqueeze(-2).expand(B, H, L_Q, V_sum.shape[-1]).clone()
else: # use mask
assert(L_Q == L_V) # requires that L_Q == L_V, i.e. for self-attention only
contex = V.cumsum(dim=-2)
return contex
def _update_context(self, context_in, V, scores, index, L_Q, attn_mask):
B, H, L_V, D = V.shape
if self.mask_flag:
attn_mask = ProbMask(B, H, L_Q, index, scores, device=V.device)
scores.masked_fill_(attn_mask.mask, -np.inf)
attn = torch.softmax(scores, dim=-1) # nn.Softmax(dim=-1)(scores)
context_in[torch.arange(B)[:, None, None],
torch.arange(H)[None, :, None],
index, :] = torch.matmul(attn, V).type_as(context_in)
if self.output_attention:
attns = (torch.ones([B, H, L_V, L_V])/L_V).type_as(attn).to(attn.device)
attns[torch.arange(B)[:, None, None], torch.arange(H)[None, :, None], index, :] = attn
return (context_in, attns)
else:
return (context_in, None)
def forward(self, queries, keys, values, attn_mask):
B, L_Q, H, D = queries.shape
_, L_K, _, _ = keys.shape
queries = queries.transpose(2,1)
keys = keys.transpose(2,1)
values = values.transpose(2,1)
U_part = self.factor * np.ceil(np.log(L_K)).astype('int').item() # c*ln(L_k)
u = self.factor * np.ceil(np.log(L_Q)).astype('int').item() # c*ln(L_q)
U_part = U_part if U_part<L_K else L_K
u = u if u<L_Q else L_Q
scores_top, index = self._prob_QK(queries, keys, sample_k=U_part, n_top=u)
# add scale factor
scale = self.scale or 1./sqrt(D)
if scale is not None:
scores_top = scores_top * scale
# get the context
context = self._get_initial_context(values, L_Q)
# update the context with selected top_k queries
context, attn = self._update_context(context, values, scores_top, index, L_Q, attn_mask)
return context.transpose(2,1).contiguous(), attn
class AttentionLayer(nn.Module):
def __init__(self, attention, d_model, n_heads,
d_keys=None, d_values=None, mix=False):
super(AttentionLayer, self).__init__()
d_keys = d_keys or (d_model//n_heads)
d_values = d_values or (d_model//n_heads)
self.inner_attention = attention
self.query_projection = nn.Linear(d_model, d_keys * n_heads)
self.key_projection = nn.Linear(d_model, d_keys * n_heads)
self.value_projection = nn.Linear(d_model, d_values * n_heads)
self.out_projection = nn.Linear(d_values * n_heads, d_model)
self.n_heads = n_heads
self.mix = mix
def forward(self, queries, keys, values, attn_mask):
B, L, _ = queries.shape
_, S, _ = keys.shape
H = self.n_heads
queries = self.query_projection(queries).view(B, L, H, -1)
keys = self.key_projection(keys).view(B, S, H, -1)
values = self.value_projection(values).view(B, S, H, -1)
out, attn = self.inner_attention(
queries,
keys,
values,
attn_mask
)
if self.mix:
out = out.transpose(2,1).contiguous()
out = out.view(B, L, -1)
return self.out_projection(out), attn
| 6,179 | 36.682927 | 108 | py |
Conformer-RLpatching | Conformer-RLpatching-main/Conformer/utils/tools.py | import numpy as np
import torch
import pandas as pd
import os
def adjust_learning_rate(optimizer, epoch, args):
# lr = args.learning_rate * (0.2 ** (epoch // 2))
if args.lradj=='type1':
lr_adjust = {epoch: args.learning_rate * (0.5 ** ((epoch-1) // 1))}
elif args.lradj=='type2':
lr_adjust = {
2: 5e-5, 4: 1e-5, 6: 5e-6, 8: 1e-6,
10: 5e-7, 15: 1e-7, 20: 5e-8
}
if epoch in lr_adjust.keys():
lr = lr_adjust[epoch]
for param_group in optimizer.param_groups:
param_group['lr'] = lr
print('Updating learning rate to {}'.format(lr))
class EarlyStopping:
def __init__(self, patience=50, verbose=False, delta=0):
self.patience = patience
self.verbose = verbose
self.counter = 0
self.best_score = None
self.early_stop = False
self.val_loss_min = np.Inf
self.delta = delta
def __call__(self, val_loss, model, path):
score = -val_loss
if self.best_score is None:
self.best_score = score
self.save_checkpoint(val_loss, model, path)
elif score < self.best_score + self.delta:
self.counter += 1
print(f'EarlyStopping counter: {self.counter} out of {self.patience}')
if self.counter >= self.patience:
self.early_stop = True
else:
self.best_score = score
self.save_checkpoint(val_loss, model, path)
self.counter = 0
def save_checkpoint(self, val_loss, model, path):
if self.verbose:
print(f'Validation loss decreased ({self.val_loss_min:.6f} --> {val_loss:.6f}). Saving model ...')
torch.save(model.state_dict(), path+'/'+'checkpoint.pth')
self.val_loss_min = val_loss
class dotdict(dict):
"""dot.notation access to dictionary attributes"""
__getattr__ = dict.get
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__
class StandardScaler():
def __init__(self,root_path,data_path):
# self.mean = 0.
# self.std = 1.
# print(root_path)
# print(data_path)
df_raw = pd.read_csv(os.path.join(root_path,
data_path))
cols_data = df_raw.columns[1:]
df_data = df_raw[cols_data]
self.mean = df_data[0:96145].values.mean(0)
self.std = df_data[0:96145].values.std(0)
# print("mean",self.mean)
# print("std", self.std)
# def fit(self, data):
# self.mean = data.mean(0)
# self.std = data.std(0)
# print("mean",self.mean)
# print("std", self.std)
def transform(self, data):
mean = torch.from_numpy(self.mean).type_as(data).to(data.device) if torch.is_tensor(data) else self.mean
std = torch.from_numpy(self.std).type_as(data).to(data.device) if torch.is_tensor(data) else self.std
return (data - mean) / std
def inverse_transform(self, data):
mean = torch.from_numpy(self.mean).type_as(data).to(data.device) if torch.is_tensor(data) else self.mean
std = torch.from_numpy(self.std).type_as(data).to(data.device) if torch.is_tensor(data) else self.std
return (data * std) + mean | 3,260 | 36.056818 | 112 | py |
Conformer-RLpatching | Conformer-RLpatching-main/Conformer/utils/masking.py | import torch
class TriangularCausalMask():
def __init__(self, B, L, device="cpu"):
mask_shape = [B, 1, L, L]
with torch.no_grad():
self._mask = torch.triu(torch.ones(mask_shape, dtype=torch.bool), diagonal=1).to(device)
@property
def mask(self):
return self._mask
class ProbMask():
def __init__(self, B, H, L, index, scores, device="cpu"):
_mask = torch.ones(L, scores.shape[-1], dtype=torch.bool).to(device).triu(1)
_mask_ex = _mask[None, None, :].expand(B, H, L, scores.shape[-1])
indicator = _mask_ex[torch.arange(B)[:, None, None],
torch.arange(H)[None, :, None],
index, :].to(device)
self._mask = indicator.view(scores.shape).to(device)
@property
def mask(self):
return self._mask | 851 | 34.5 | 100 | py |
Conformer-RLpatching | Conformer-RLpatching-main/Conformer/data/data_loader.py | import os
import numpy as np
import pandas as pd
import torch
from torch.utils.data import Dataset, DataLoader
# from sklearn.preprocessing import StandardScaler
from utils.tools import StandardScaler
from utils.timefeatures import time_features
import warnings
warnings.filterwarnings('ignore')
class Dataset_ETT_hour(Dataset):
def __init__(self, root_path, flag='train', size=None,
features='S', data_path='ETTh1.csv',
target='OT', scale=True, inverse=False, timeenc=0, freq='h', cols=None,index=None):
# size [seq_len, label_len, pred_len]
# info
if size == None:
self.seq_len = 24*4*4
self.label_len = 24*4
self.pred_len = 24*4
else:
self.seq_len = size[0]
self.label_len = size[1]
self.pred_len = size[2]
# init
assert flag in ['train', 'test', 'val']
type_map = {'train':0, 'test':1, 'val':2}
self.set_type = type_map[flag]
self.features = features
self.target = target
self.scale = scale
self.inverse = inverse
self.timeenc = timeenc
self.freq = freq
self.root_path = root_path
self.data_path = data_path
self.__read_data__()
def __read_data__(self):
self.scaler = StandardScaler(self.root_path,self.data_path)
df_raw = pd.read_csv(os.path.join(self.root_path,
self.data_path))
border1s = [0, 96075, 12*30*24+4*30*24 - self.seq_len]
border2s = [96145, 99145, 12*30*24+8*30*24]
border1 = border1s[self.set_type]
border2 = border2s[self.set_type]
if self.features=='M' or self.features=='MS':
cols_data = df_raw.columns[1:]
# print("*****",cols_data)
df_data = df_raw[cols_data]
elif self.features=='S':
df_data = df_raw[[self.target]]
if self.scale:
#train_data = df_data[border1s[0]:border2s[0]]
# self.scaler.fit(train_data.values)
data = self.scaler.transform(df_data.values)
else:
data = df_data.values
df_stamp = df_raw[['date']][border1:border2]
df_stamp['date'] = pd.to_datetime(df_stamp.date)
data_stamp = time_features(df_stamp, timeenc=self.timeenc, freq=self.freq)
self.data_x = data[border1:border2]##x��������
if self.inverse:
self.data_y = df_data.values[border1:border2]##��ʵֵ
else:
self.data_y = data[border1:border2]##y�����껯
self.data_stamp = data_stamp
def __getitem__(self, index):
s_begin = index
s_end = s_begin + self.seq_len
r_begin = s_end - self.label_len
r_end = r_begin + self.label_len + self.pred_len
seq_x = self.data_x[s_begin:s_end]
if self.inverse:
seq_y = np.concatenate([self.data_x[r_begin:r_begin+self.label_len], self.data_y[r_begin+self.label_len:r_end]], 0)
else:
seq_y = self.data_y[r_begin:r_end]
seq_x_mark = self.data_stamp[s_begin:s_end]
seq_y_mark = self.data_stamp[r_begin:r_end]
return seq_x, seq_y, seq_x_mark, seq_y_mark
def __len__(self):
return len(self.data_x) - self.seq_len- self.pred_len + 1
def inverse_transform(self, data):
return self.scaler.inverse_transform(data)
class Dataset_ETT_minute(Dataset):
def __init__(self, root_path, flag='train', size=None,
features='S', data_path='ETTm1.csv',
target='OT', scale=True, inverse=False, timeenc=0, freq='t', cols=None):
# size [seq_len, label_len, pred_len]
# info
if size == None:
self.seq_len = 24*4*4
self.label_len = 24*4
self.pred_len = 24*4
else:
self.seq_len = size[0]
self.label_len = size[1]
self.pred_len = size[2]
# init
assert flag in ['train', 'test', 'val']
type_map = {'train':0, 'val':1, 'test':2}
self.set_type = type_map[flag]
self.features = features
self.target = target
self.scale = scale
self.inverse = inverse
self.timeenc = timeenc
self.freq = freq
self.root_path = root_path
self.data_path = data_path
self.__read_data__()
def __read_data__(self):
self.scaler = StandardScaler()
df_raw = pd.read_csv(os.path.join(self.root_path,
self.data_path))
border1s = [0, 12*30*24*4 - self.seq_len, 12*30*24*4+4*30*24*4 - self.seq_len]
border2s = [12*30*24*4, 12*30*24*4+4*30*24*4, 12*30*24*4+8*30*24*4]
border1 = border1s[self.set_type]
border2 = border2s[self.set_type]
if self.features=='M' or self.features=='MS':
cols_data = df_raw.columns[1:]
df_data = df_raw[cols_data]
elif self.features=='S':
df_data = df_raw[[self.target]]
if self.scale:
train_data = df_data[border1s[0]:border2s[0]]
self.scaler.fit(train_data.values)
data = self.scaler.transform(df_data.values)
else:
data = df_data.values
df_stamp = df_raw[['date']][border1:border2]
df_stamp['date'] = pd.to_datetime(df_stamp.date)
data_stamp = time_features(df_stamp, timeenc=self.timeenc, freq=self.freq)
self.data_x = data[border1:border2]
if self.inverse:
self.data_y = df_data.values[border1:border2]
else:
self.data_y = data[border1:border2]
self.data_stamp = data_stamp
def __getitem__(self, index):
s_begin = index
s_end = s_begin + self.seq_len
r_begin = s_end - self.label_len
r_end = r_begin + self.label_len + self.pred_len
seq_x = self.data_x[s_begin:s_end]
if self.inverse:
seq_y = np.concatenate([self.data_x[r_begin:r_begin+self.label_len], self.data_y[r_begin+self.label_len:r_end]], 0)
else:
seq_y = self.data_y[r_begin:r_end]
seq_x_mark = self.data_stamp[s_begin:s_end]
seq_y_mark = self.data_stamp[r_begin:r_end]
return seq_x, seq_y, seq_x_mark, seq_y_mark
def __len__(self):
return len(self.data_x) - self.seq_len - self.pred_len + 1
def inverse_transform(self, data):
return self.scaler.inverse_transform(data)
class Dataset_Custom(Dataset):
def __init__(self, root_path, flag='train', size=None,
features='S', data_path='ETTh1.csv',
target='OT', scale=True, inverse=False, timeenc=0, freq='h', cols=None):
# size [seq_len, label_len, pred_len]
# info
if size == None:
self.seq_len = 24*4*4
self.label_len = 24*4
self.pred_len = 24*4
else:
self.seq_len = size[0]
self.label_len = size[1]
self.pred_len = size[2]
# init
assert flag in ['train', 'test', 'val']
type_map = {'train':0, 'val':1, 'test':2}
self.set_type = type_map[flag]
self.features = features
self.target = target
self.scale = scale
self.inverse = inverse
self.timeenc = timeenc
self.freq = freq
self.cols=cols
self.root_path = root_path
self.data_path = data_path
self.__read_data__()
def __read_data__(self):
self.scaler = StandardScaler()
df_raw = pd.read_csv(os.path.join(self.root_path,
self.data_path))
'''
df_raw.columns: ['date', ...(other features), target feature]
'''
# cols = list(df_raw.columns);
if self.cols:
cols=self.cols.copy()
cols.remove(self.target)
else:
cols = list(df_raw.columns); cols.remove(self.target); cols.remove('date')
df_raw = df_raw[['date']+cols+[self.target]]
num_train = int(len(df_raw)*0.7)
num_test = int(len(df_raw)*0.2)
num_vali = len(df_raw) - num_train - num_test
border1s = [0, num_train-self.seq_len, len(df_raw)-num_test-self.seq_len]
border2s = [num_train, num_train+num_vali, len(df_raw)]
border1 = border1s[self.set_type]
border2 = border2s[self.set_type]
if self.features=='M' or self.features=='MS':
cols_data = df_raw.columns[1:]
df_data = df_raw[cols_data]
elif self.features=='S':
df_data = df_raw[[self.target]]
if self.scale:
train_data = df_data[border1s[0]:border2s[0]]
self.scaler.fit(train_data.values)
data = self.scaler.transform(df_data.values)
else:
data = df_data.values
df_stamp = df_raw[['date']][border1:border2]
df_stamp['date'] = pd.to_datetime(df_stamp.date)
data_stamp = time_features(df_stamp, timeenc=self.timeenc, freq=self.freq)
self.data_x = data[border1:border2]
if self.inverse:
self.data_y = df_data.values[border1:border2]
else:
self.data_y = data[border1:border2]
self.data_stamp = data_stamp
def __getitem__(self, index):
s_begin = index
s_end = s_begin + self.seq_len
r_begin = s_end - self.label_len
r_end = r_begin + self.label_len + self.pred_len
seq_x = self.data_x[s_begin:s_end]
if self.inverse:
seq_y = np.concatenate([self.data_x[r_begin:r_begin+self.label_len], self.data_y[r_begin+self.label_len:r_end]], 0)
else:
seq_y = self.data_y[r_begin:r_end]
seq_x_mark = self.data_stamp[s_begin:s_end]
seq_y_mark = self.data_stamp[r_begin:r_end]
return seq_x, seq_y, seq_x_mark, seq_y_mark
def __len__(self):
return len(self.data_x) - self.seq_len- self.pred_len + 1
def inverse_transform(self, data):
return self.scaler.inverse_transform(data)
class Dataset_Pred(Dataset):
def __init__(self, root_path, flag='pred', size=None,
features='S', data_path='ETTh1.csv',
target='OT', scale=True, inverse=False, timeenc=0, freq='15min', cols=None,index=None):
# size [seq_len, label_len, pred_len]
# info
if size == None:
self.seq_len = 24*4*4
self.label_len = 24*4
self.pred_len = 24*4
else:
self.seq_len = size[0]
self.label_len = size[1]
self.pred_len = size[2]
# init
assert flag in ['pred']
self.features = features
self.target = target
self.scale = scale
self.inverse = inverse
self.timeenc = timeenc
self.freq = freq
self.cols=cols
self.root_path = root_path
self.data_path = data_path
self.index=index
self.__read_data__()
def __read_data__(self):
self.scaler = StandardScaler(self.root_path,self.data_path)
df_raw = pd.read_csv(os.path.join(self.root_path,
self.data_path))
'''
df_raw.columns: ['date', ...(other features), target feature]
'''
# if self.cols:
# cols=self.cols.copy()
# cols.remove(self.target)
# else:
# cols = list(df_raw.columns); cols.remove(self.target); cols.remove('date')
# df_raw = df_raw[['date']+cols+[self.target]]
border1 = self.index - self.seq_len+1
border2 = self.index + self.pred_len+1
# print("border1",border1)
# print("border2", border2)
if self.features == 'M' or self.features == 'MS':
cols_data = df_raw.columns[1:]
# print("*****",cols_data)
df_data = df_raw[cols_data]
elif self.features == 'S':
df_data = df_raw[[self.target]]
if self.scale:
# train_data = df_data[border1s[0]:border2s[0]]
# self.scaler.fit(train_data.values)
data = self.scaler.transform(df_data.values)
else:
data = df_data.values
df_stamp = df_raw[['date']][border1:border2]
df_stamp['date'] = pd.to_datetime(df_stamp.date)
data_stamp = time_features(df_stamp, timeenc=self.timeenc, freq=self.freq)
self.data_x = data[border1:border2] ##x��������
if self.inverse:
self.data_y = df_data.values[border1:border2] ##��ʵֵ
else:
self.data_y = data[border1:border2] ##y�����껯
self.data_stamp = data_stamp
def __getitem__(self, index):
s_begin = index
s_end = s_begin + self.seq_len
r_begin = s_end - self.label_len
r_end = r_begin + self.label_len + self.pred_len
seq_x = self.data_x[s_begin:s_end]
if self.inverse:
seq_y = np.concatenate([self.data_x[r_begin:r_begin+self.label_len], self.data_y[r_begin+self.label_len:r_end]], 0)
else:
seq_y = self.data_y[r_begin:r_end]
seq_x_mark = self.data_stamp[s_begin:s_end]
seq_y_mark = self.data_stamp[r_begin:r_end]
return seq_x, seq_y, seq_x_mark, seq_y_mark
def __len__(self):
return len(self.data_x) - self.seq_len- self.pred_len + 1
def inverse_transform(self, data):
return self.scaler.inverse_transform(data)
| 13,665 | 34.496104 | 127 | py |
Conformer-RLpatching | Conformer-RLpatching-main/Conformer/exp/exp_informer.py | from data.data_loader import Dataset_ETT_hour, Dataset_ETT_minute, Dataset_Custom, Dataset_Pred
from exp.exp_basic import Exp_Basic
from models.model import Informer, InformerStack
from utils.metrics import MSE
from utils.tools import EarlyStopping, adjust_learning_rate
from utils.metrics import metric
from utils.tools import StandardScaler
import numpy as np
import torch
import torch.nn as nn
from torch import optim
from torch.utils.data import DataLoader
import os
import time
import warnings
warnings.filterwarnings('ignore')
import sys
sys.path.append("/root/lixinhang/Informer_predict")
class Exp_Informer(Exp_Basic):
def __init__(self, args):
super(Exp_Informer, self).__init__(args)
self.standardscaler = StandardScaler(root_path=args.root_path,data_path=args.data_path)
def _build_model(self):
model_dict = {
'informer':Informer,
'informerstack':InformerStack,
}
if self.args.model=='informer' or self.args.model=='informerstack':
e_layers = self.args.e_layers if self.args.model=='informer' else self.args.s_layers
model = model_dict[self.args.model](
self.args.enc_in,
self.args.dec_in,
self.args.c_out,
self.args.seq_len,
self.args.label_len,
self.args.pred_len,
self.args.factor,
self.args.d_model,
self.args.n_heads,
e_layers, # self.args.e_layers,
self.args.d_layers,
self.args.d_ff,
self.args.dropout,
self.args.attn,
self.args.embed,
self.args.freq,
self.args.activation,
self.args.output_attention,
self.args.distil,
self.args.mix,
self.device
).float()
if self.args.use_multi_gpu and self.args.use_gpu:
model = nn.DataParallel(model, device_ids=self.args.device_ids)
return model
def _get_data(self, flag,index=None):
args = self.args
data_dict = {
'ETTh1':Dataset_ETT_hour,
'ETTh2':Dataset_ETT_hour,
'ETTm1':Dataset_ETT_minute,
'ETTm2':Dataset_ETT_minute,
'WTH':Dataset_Custom,
'ECL':Dataset_Custom,
'Solar':Dataset_Custom,
'custom':Dataset_Custom,
}
Data = data_dict[self.args.data]
timeenc = 0 if args.embed!='timeF' else 1
if flag == 'test':
shuffle_flag = False; drop_last = True; batch_size = 1; freq=args.freq
elif flag=='pred':
shuffle_flag = False; drop_last = False; batch_size = 1; freq=args.detail_freq
Data = Dataset_Pred
else:
shuffle_flag = True; drop_last = True; batch_size = args.batch_size; freq=args.freq
data_set = Data(
root_path=args.root_path,
data_path=args.data_path,
flag=flag,
size=[args.seq_len, args.label_len, args.pred_len],
features=args.features,
target=args.target,
inverse=args.inverse,
timeenc=timeenc,
freq=freq,
cols=args.cols,
index=index
)
#print(flag, len(data_set))
data_loader = DataLoader(
data_set,
batch_size=batch_size,
shuffle=shuffle_flag,
num_workers=args.num_workers,
drop_last=drop_last)
return data_set, data_loader
def _select_optimizer(self):
model_optim = optim.Adam(self.model.parameters(), lr=self.args.learning_rate)
return model_optim
def _select_criterion(self):
criterion = nn.MSELoss()
return criterion
def vali(self, vali_data, vali_loader, criterion):
self.model.eval()
total_loss = []
for i, (batch_x,batch_y,batch_x_mark,batch_y_mark) in enumerate(vali_loader):
pred, true = self._process_one_batch(
vali_data, batch_x, batch_y, batch_x_mark, batch_y_mark)
# print("***********************************")
pred=self.standardscaler.inverse_transform(torch.squeeze(pred))
true=self.standardscaler.inverse_transform(torch.squeeze(true))
loss = criterion(pred.detach().cpu(), true.detach().cpu())
# print("***********************************")
# print(loss)
# print("***********************************")
total_loss.append(loss)
total_loss = np.average(total_loss)
self.model.train()
return total_loss
def train(self, setting):
train_data, train_loader = self._get_data(flag = 'train')
# vali_data, vali_loader = self._get_data(flag = 'val')
test_data, test_loader = self._get_data(flag = 'test')
path = os.path.join(self.args.checkpoints, setting)
if (not os.path.exists(path)):
os.makedirs(path)
else:
self.best_model_path = path + '/' + 'checkpoint.pth'
self.model.load_state_dict(torch.load(self.best_model_path))
time_now = time.time()
train_steps = len(train_loader)
early_stopping = EarlyStopping(patience=self.args.patience, verbose=True)
model_optim = self._select_optimizer()
criterion = self._select_criterion()
if self.args.use_amp:
scaler = torch.cuda.amp.GradScaler()
for epoch in range(self.args.train_epochs):
iter_count = 0
train_loss = []
self.model.train()
epoch_time = time.time()
for i, (batch_x,batch_y,batch_x_mark,batch_y_mark) in enumerate(train_loader):##96075�� 256
iter_count += 1
model_optim.zero_grad()
pred, true = self._process_one_batch(
train_data, batch_x, batch_y, batch_x_mark, batch_y_mark)
loss = criterion(pred, true)
train_loss.append(loss.item())
if (i+1) % 50==0:
print("\titers: {0}, epoch: {1} | loss: {2:.7f}".format(i + 1, epoch + 1, loss.item()))
speed = (time.time()-time_now)/iter_count
left_time = speed*((self.args.train_epochs - epoch)*train_steps - i)
print('\tspeed: {:.4f}s/iter; left time: {:.4f}s'.format(speed, left_time))
iter_count = 0
time_now = time.time()
if (i + 1) % 100 == 0:
test_mse = self.vali(test_data, test_loader, criterion)
early_stopping(test_mse, self.model, path)
if early_stopping.early_stop:
print("Early stopping")
break
if self.args.use_amp:
scaler.scale(loss).backward()
scaler.step(model_optim)
scaler.update()
else:
loss.backward()
model_optim.step()
print("Epoch: {} cost time: {}".format(epoch+1, time.time()-epoch_time))
print("****************************************")
train_loss = np.average(train_loss)
##����train_loss��csv
# vali_loss = self.vali(vali_data, vali_loader, criterion)
# test_loss = self.vali(test_data, test_loader, criterion)
# print("Epoch: {0}, Steps: {1} | Train Loss: {2:.7f} Vali Loss: {3:.7f} Test Loss: {4:.7f}".format(
# epoch + 1, train_steps, train_loss, vali_loss, test_loss))
print("Epoch: {0}, Steps: {1} | Train Loss: {2:.7f}".format(
epoch + 1, train_steps, train_loss))
# early_stopping(vali_loss, self.model, path)
adjust_learning_rate(model_optim, epoch+1, self.args)
self.best_model_path = path + '/' + 'checkpoint.pth'
self.model.load_state_dict(torch.load(self.best_model_path))
test_mse = self.vali(test_data, test_loader, criterion)
##����test_mse��csv
return self.model
def test(self, setting):
test_data, test_loader = self._get_data(flag='test')
self.model.eval()
preds = []
trues = []
for i, (batch_x,batch_y,batch_x_mark,batch_y_mark) in enumerate(test_loader):
pred, true = self._process_one_batch(
test_data, batch_x, batch_y, batch_x_mark, batch_y_mark)
preds.append(pred.detach().cpu().numpy())
trues.append(true.detach().cpu().numpy())
preds = np.array(preds)
trues = np.array(trues)
print('test shape:', preds.shape, trues.shape)
preds = preds.reshape(-1, preds.shape[-2], preds.shape[-1])
trues = trues.reshape(-1, trues.shape[-2], trues.shape[-1])
print('test shape:', preds.shape, trues.shape)
# result save
folder_path = './results/' + setting +'/'
if not os.path.exists(folder_path):
os.makedirs(folder_path)
mae, mse, rmse, mape, mspe = metric(preds, trues)
print('mse:{}, mae:{}'.format(mse, mae))
np.save(folder_path+'metrics.npy', np.array([mae, mse, rmse, mape, mspe]))
np.save(folder_path+'pred.npy', preds)
np.save(folder_path+'true.npy', trues)
return
def predict(self, setting, index,load=True):
pred_data, pred_loader = self._get_data(flag='pred',index=index)
if load:
#print("#####################################")
path = os.path.join(self.args.checkpoints, setting)
best_model_path = path+'/'+'checkpoint.pth'
self.model.load_state_dict(torch.load(best_model_path))
self.model.eval()
preds = []
trues = []
for i, (batch_x,batch_y,batch_x_mark,batch_y_mark) in enumerate(pred_loader):
# print("*************************************")
# print(batch_x.shape)
pred, true = self._process_one_batch(
pred_data, batch_x, batch_y, batch_x_mark, batch_y_mark)
preds.append(pred.detach().cpu().numpy())
trues.append(true.detach().cpu().numpy())
preds = np.array(preds)
trues = np.array(trues)
#print(preds.shape)
preds = preds.reshape( preds.shape[-2], preds.shape[-1])
trues = trues.reshape(trues.shape[-2], trues.shape[-1])
preds=self.standardscaler.inverse_transform(preds)
trues = self.standardscaler.inverse_transform(trues)
# print(preds.shape)
# print("trues0",trues[0])
# print(trues.shape)
# print("mse_15steps",MSE(preds,trues))
# result save
# folder_path = './results/' + setting +'/'
# if not os.path.exists(folder_path):
# os.makedirs(folder_path)
#
# np.save(folder_path+'real_prediction.npy', preds)
return preds,trues
def _process_one_batch(self, dataset_object, batch_x, batch_y, batch_x_mark, batch_y_mark):
batch_x = batch_x.float().to(self.device)
batch_y = batch_y.float()
batch_x_mark = batch_x_mark.float().to(self.device)
batch_y_mark = batch_y_mark.float().to(self.device)
# decoder input
if self.args.padding==0:
dec_inp = torch.zeros([batch_y.shape[0], self.args.pred_len, batch_y.shape[-1]]).float()
elif self.args.padding==1:
dec_inp = torch.ones([batch_y.shape[0], self.args.pred_len, batch_y.shape[-1]]).float()
dec_inp = torch.cat([batch_y[:,:self.args.label_len,:], dec_inp], dim=1).float().to(self.device)
# encoder - decoder
if self.args.use_amp:
with torch.cuda.amp.autocast():
if self.args.output_attention:
outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)[0]
else:
outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)
else:
if self.args.output_attention:
outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)[0]
else:
outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)
if self.args.inverse:
outputs = dataset_object.inverse_transform(outputs)
f_dim = -1 if self.args.features=='MS' else 0
# print("ARGS:",self.args.pred_len)
# print("SHAPE:",batch_y.shape)
batch_y = batch_y[:,-self.args.pred_len:,f_dim:].to(self.device)
return outputs, batch_y
# def load(self,setting):
# path = os.path.join(self.args.checkpoints, setting)
# if os.path.exists(path):
| 13,055 | 37.627219 | 112 | py |
Conformer-RLpatching | Conformer-RLpatching-main/Conformer/exp/exp_basic.py | import os
import torch
import numpy as np
class Exp_Basic(object):
def __init__(self, args):
self.args = args
self.device = self._acquire_device()
self.model = self._build_model().to(self.device)
def _build_model(self):
raise NotImplementedError
return None
def _acquire_device(self):
if self.args.use_gpu:
os.environ["CUDA_VISIBLE_DEVICES"] = str(self.args.gpu) if not self.args.use_multi_gpu else self.args.devices
device = torch.device('cuda:{}'.format(self.args.gpu))
print('Use GPU: cuda:{}'.format(self.args.gpu))
else:
device = torch.device('cpu')
print('Use CPU')
return device
def _get_data(self):
pass
def vali(self):
pass
def train(self):
pass
def test(self):
pass
| 875 | 23.333333 | 121 | py |
DiffMIC | DiffMIC-main/main.py | import argparse
import traceback
import shutil
import logging
import yaml
import sys
import os
import time
import torch
import numpy as np
import random
torch.set_printoptions(sci_mode=False)
parser = argparse.ArgumentParser(description=globals()["__doc__"])
parser.add_argument(
"--config", type=str, required=True, help="Path to the config file"
)
parser.add_argument('--device', type=int, default=0, help='GPU device id')
parser.add_argument('--thread', type=int, default=4, help='number of threads')
parser.add_argument("--seed", type=int, default=1234, help="Random seed")
parser.add_argument("--test_sample_seed", type=int, default=-1, help="Random seed during test time sampling")
parser.add_argument(
"--exp", type=str, default="exp", help="Path for saving running related data."
)
parser.add_argument(
"--doc",
type=str,
required=True,
help="A string for documentation purpose. "
"Will be the name of the log folder.",
)
parser.add_argument(
"--dataroot", type=str, default=None,
help="This argument will overwrite the dataroot in the config if it is not None."
)
parser.add_argument(
"--comment", type=str, default="", help="A string for experiment comment"
)
parser.add_argument(
"--verbose",
type=str,
default="info",
help="Verbose level: info | debug | warning | critical",
)
parser.add_argument("--test", action="store_true", help="Whether to test the model")
parser.add_argument("--tune_T",
action="store_true",
help="Whether to tune the scaling temperature parameter for calibration with training set.")
parser.add_argument("--sanity_check",
action="store_true",
help="Whether to quickly check test function implementation by running on only a few subsets.")
parser.add_argument(
"--sample",
action="store_true",
help="Whether to produce samples from the model",
)
parser.add_argument(
"--train_guidance_only",
action="store_true",
help="Whether to only pre-train the guidance classifier f_phi",
)
parser.add_argument(
"--noise_prior",
action="store_true",
help="Whether to apply a noise prior distribution at timestep T",
)
parser.add_argument(
"--no_cat_f_phi",
action="store_true",
help="Whether to not concatenate f_phi as part of eps_theta input",
)
parser.add_argument(
"--add_ce_loss",
action="store_true",
help="Whether to add cross entropy loss",
)
parser.add_argument(
"--eval_best",
action="store_true",
help="Evaluate best model during training, instead of the ckpt stored at the last epoch",
)
parser.add_argument("--fid", action="store_true")
parser.add_argument("--interpolation", action="store_true")
parser.add_argument(
"--resume_training", action="store_true", help="Whether to resume training"
)
parser.add_argument(
"-i",
"--image_folder",
type=str,
default="images",
help="The folder name of samples",
)
parser.add_argument(
"--n_splits", type=int, default=10, help="total number of runs with different seeds for a specific task"
)
parser.add_argument(
"--split", type=int, default=0, help="split ID"
)
parser.add_argument(
"--ni",
action="store_true",
help="No interaction. Suitable for Slurm Job launcher",
)
parser.add_argument(
"--sample_type",
type=str,
default="generalized",
help="sampling approach (generalized or ddpm_noisy)",
)
parser.add_argument(
"--skip_type",
type=str,
default="uniform",
help="skip according to (uniform or quadratic)",
)
parser.add_argument(
"--timesteps", type=int, default=None, help="number of steps involved"
)
parser.add_argument(
"--eta",
type=float,
default=0.0,
help="eta used to control the variances of sigma",
)
parser.add_argument("--sequence", action="store_true")
# loss option
# parser.add_argument(
# "--simple", action="store_true", default=False, help="Whether use simple loss for L0"
# )
parser.add_argument(
"--loss", type=str, default='ddpm', help="loss function"
)
parser.add_argument(
"--num_sample", type=int, default=1, help="number of samples used in forward and reverse"
)
args = parser.parse_args()
def parse_config():
args.log_path = os.path.join(args.exp, "logs", args.doc)
# parse config file
with open(os.path.join(args.config), "r") as f:
if args.sample or args.test:
config = yaml.unsafe_load(f)
new_config = config
else:
config = yaml.safe_load(f)
new_config = dict2namespace(config)
tb_path = os.path.join(args.exp, "tensorboard", args.doc)
if not os.path.exists(tb_path):
os.makedirs(tb_path)
if not args.ni:
import torch.utils.tensorboard as tb
# overwrite if dataroot is not None
if not args.dataroot is None:
new_config.data.dataroot = args.dataroot
if not args.test and not args.sample:
args.im_path = os.path.join(args.exp, new_config.training.image_folder, args.doc)
new_config.diffusion.noise_prior = True if args.noise_prior else False
new_config.model.cat_y_pred = False if args.no_cat_f_phi else True
if not args.resume_training:
if not args.timesteps is None:
new_config.diffusion.timesteps = args.timesteps
if args.num_sample > 1:
new_config.diffusion.num_sample = args.num_sample
if os.path.exists(args.log_path):
overwrite = False
if args.ni:
overwrite = True
else:
response = input("Folder already exists. Overwrite? (Y/N)")
if response.upper() == "Y":
overwrite = True
if overwrite:
shutil.rmtree(args.log_path)
shutil.rmtree(tb_path)
shutil.rmtree(args.im_path)
os.makedirs(args.log_path)
os.makedirs(args.im_path)
if os.path.exists(tb_path):
shutil.rmtree(tb_path)
else:
print("Folder exists. Program halted.")
sys.exit(0)
else:
os.makedirs(args.log_path)
if not os.path.exists(args.im_path):
os.makedirs(args.im_path)
with open(os.path.join(args.log_path, "config.yml"), "w") as f:
yaml.dump(new_config, f, default_flow_style=False)
if not args.ni:
new_config.tb_logger = tb.SummaryWriter(log_dir=tb_path)
else:
new_config.tb_logger = None
# setup logger
level = getattr(logging, args.verbose.upper(), None)
if not isinstance(level, int):
raise ValueError("level {} not supported".format(args.verbose))
handler1 = logging.StreamHandler()
handler2 = logging.FileHandler(os.path.join(args.log_path, "stdout.txt"))
formatter = logging.Formatter(
"%(levelname)s - %(filename)s - %(asctime)s - %(message)s"
)
handler1.setFormatter(formatter)
handler2.setFormatter(formatter)
logger = logging.getLogger()
logger.addHandler(handler1)
logger.addHandler(handler2)
logger.setLevel(level)
else:
if args.sample:
args.im_path = os.path.join(args.exp, new_config.sampling.image_folder, args.doc)
else:
args.im_path = os.path.join(args.exp, new_config.testing.image_folder, args.doc)
level = getattr(logging, args.verbose.upper(), None)
if not isinstance(level, int):
raise ValueError("level {} not supported".format(args.verbose))
handler1 = logging.StreamHandler()
# saving test metrics to a .txt file
handler2 = logging.FileHandler(os.path.join(args.log_path, "testmetrics.txt"))
formatter = logging.Formatter(
"%(levelname)s - %(filename)s - %(asctime)s - %(message)s"
)
handler1.setFormatter(formatter)
handler2.setFormatter(formatter)
logger = logging.getLogger()
logger.addHandler(handler1)
logger.addHandler(handler2)
logger.setLevel(level)
if args.sample or args.test:
os.makedirs(args.im_path, exist_ok=True)
# add device
device_name = f'cuda:{args.device}' if torch.cuda.is_available() else 'cpu'
device = torch.device(device_name)
logging.info("Using device: {}".format(device))
new_config.device = device
# set random seed
#print(args.seed)
torch.manual_seed(args.seed)
random.seed(args.seed)
np.random.seed(args.seed)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(args.seed)
torch.backends.cudnn.benchmark = True
return new_config, logger
def dict2namespace(config):
namespace = argparse.Namespace()
for key, value in config.items():
if isinstance(value, dict):
new_value = dict2namespace(value)
else:
new_value = value
setattr(namespace, key, new_value)
return namespace
def main():
config, logger = parse_config()
logging.info("Writing log file to {}".format(args.log_path))
logging.info("Exp instance id = {}".format(os.getpid()))
logging.info("Exp comment = {}".format(args.comment))
if args.loss == 'diffmic_conditional':
from diffusion_trainer import Diffusion
else:
raise NotImplementedError("Invalid loss option")
try:
runner = Diffusion(args, config, device=config.device)
start_time = time.time()
procedure = None
if args.sample:
runner.sample()
procedure = "Sampling"
elif args.test:
runner.test()
procedure = "Testing"
else:
#set_random_seed(config.data.seed)
runner.train()
procedure = "Training"
end_time = time.time()
logging.info("\n{} procedure finished. It took {:.4f} minutes.\n\n\n".format(
procedure, (end_time - start_time) / 60))
# remove logging handlers
handlers = logger.handlers[:]
for handler in handlers:
logger.removeHandler(handler)
handler.close()
# # return test metric lists
# if args.test:
# return y_majority_vote_accuracy_all_steps_list, config
except Exception:
logging.error(traceback.format_exc())
return 0
if __name__ == "__main__":
args.doc = args.doc + "/split_" + str(args.split)
if args.test:
args.config = args.config + args.doc + "/config.yml"
sys.exit(main())
| 10,762 | 31.914373 | 115 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.